1 //===- InstCombineLoadStoreAlloca.cpp -------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the visit functions for load, store and alloca.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #include "InstCombineInternal.h"
14 #include "llvm/ADT/MapVector.h"
15 #include "llvm/ADT/SmallString.h"
16 #include "llvm/ADT/Statistic.h"
17 #include "llvm/Analysis/AliasAnalysis.h"
18 #include "llvm/Analysis/Loads.h"
19 #include "llvm/IR/ConstantRange.h"
20 #include "llvm/IR/DataLayout.h"
21 #include "llvm/IR/DebugInfoMetadata.h"
22 #include "llvm/IR/IntrinsicInst.h"
23 #include "llvm/IR/LLVMContext.h"
24 #include "llvm/IR/MDBuilder.h"
25 #include "llvm/IR/PatternMatch.h"
26 #include "llvm/Transforms/InstCombine/InstCombiner.h"
27 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
28 #include "llvm/Transforms/Utils/Local.h"
29 using namespace llvm;
30 using namespace PatternMatch;
31
32 #define DEBUG_TYPE "instcombine"
33
34 STATISTIC(NumDeadStore, "Number of dead stores eliminated");
35 STATISTIC(NumGlobalCopies, "Number of allocas copied from constant global");
36
37 /// isOnlyCopiedFromConstantGlobal - Recursively walk the uses of a (derived)
38 /// pointer to an alloca. Ignore any reads of the pointer, return false if we
39 /// see any stores or other unknown uses. If we see pointer arithmetic, keep
40 /// track of whether it moves the pointer (with IsOffset) but otherwise traverse
41 /// the uses. If we see a memcpy/memmove that targets an unoffseted pointer to
42 /// the alloca, and if the source pointer is a pointer to a constant global, we
43 /// can optimize this.
44 static bool
isOnlyCopiedFromConstantMemory(AAResults * AA,Value * V,MemTransferInst * & TheCopy,SmallVectorImpl<Instruction * > & ToDelete)45 isOnlyCopiedFromConstantMemory(AAResults *AA,
46 Value *V, MemTransferInst *&TheCopy,
47 SmallVectorImpl<Instruction *> &ToDelete) {
48 // We track lifetime intrinsics as we encounter them. If we decide to go
49 // ahead and replace the value with the global, this lets the caller quickly
50 // eliminate the markers.
51
52 SmallVector<std::pair<Value *, bool>, 35> ValuesToInspect;
53 ValuesToInspect.emplace_back(V, false);
54 while (!ValuesToInspect.empty()) {
55 auto ValuePair = ValuesToInspect.pop_back_val();
56 const bool IsOffset = ValuePair.second;
57 for (auto &U : ValuePair.first->uses()) {
58 auto *I = cast<Instruction>(U.getUser());
59
60 if (auto *LI = dyn_cast<LoadInst>(I)) {
61 // Ignore non-volatile loads, they are always ok.
62 if (!LI->isSimple()) return false;
63 continue;
64 }
65
66 if (isa<BitCastInst>(I) || isa<AddrSpaceCastInst>(I)) {
67 // If uses of the bitcast are ok, we are ok.
68 ValuesToInspect.emplace_back(I, IsOffset);
69 continue;
70 }
71 if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) {
72 // If the GEP has all zero indices, it doesn't offset the pointer. If it
73 // doesn't, it does.
74 ValuesToInspect.emplace_back(I, IsOffset || !GEP->hasAllZeroIndices());
75 continue;
76 }
77
78 if (auto *Call = dyn_cast<CallBase>(I)) {
79 // If this is the function being called then we treat it like a load and
80 // ignore it.
81 if (Call->isCallee(&U))
82 continue;
83
84 unsigned DataOpNo = Call->getDataOperandNo(&U);
85 bool IsArgOperand = Call->isArgOperand(&U);
86
87 // Inalloca arguments are clobbered by the call.
88 if (IsArgOperand && Call->isInAllocaArgument(DataOpNo))
89 return false;
90
91 // If this is a readonly/readnone call site, then we know it is just a
92 // load (but one that potentially returns the value itself), so we can
93 // ignore it if we know that the value isn't captured.
94 if (Call->onlyReadsMemory() &&
95 (Call->use_empty() || Call->doesNotCapture(DataOpNo)))
96 continue;
97
98 // If this is being passed as a byval argument, the caller is making a
99 // copy, so it is only a read of the alloca.
100 if (IsArgOperand && Call->isByValArgument(DataOpNo))
101 continue;
102 }
103
104 // Lifetime intrinsics can be handled by the caller.
105 if (I->isLifetimeStartOrEnd()) {
106 assert(I->use_empty() && "Lifetime markers have no result to use!");
107 ToDelete.push_back(I);
108 continue;
109 }
110
111 // If this is isn't our memcpy/memmove, reject it as something we can't
112 // handle.
113 MemTransferInst *MI = dyn_cast<MemTransferInst>(I);
114 if (!MI)
115 return false;
116
117 // If the transfer is using the alloca as a source of the transfer, then
118 // ignore it since it is a load (unless the transfer is volatile).
119 if (U.getOperandNo() == 1) {
120 if (MI->isVolatile()) return false;
121 continue;
122 }
123
124 // If we already have seen a copy, reject the second one.
125 if (TheCopy) return false;
126
127 // If the pointer has been offset from the start of the alloca, we can't
128 // safely handle this.
129 if (IsOffset) return false;
130
131 // If the memintrinsic isn't using the alloca as the dest, reject it.
132 if (U.getOperandNo() != 0) return false;
133
134 // If the source of the memcpy/move is not a constant global, reject it.
135 if (!AA->pointsToConstantMemory(MI->getSource()))
136 return false;
137
138 // Otherwise, the transform is safe. Remember the copy instruction.
139 TheCopy = MI;
140 }
141 }
142 return true;
143 }
144
145 /// isOnlyCopiedFromConstantGlobal - Return true if the specified alloca is only
146 /// modified by a copy from a constant global. If we can prove this, we can
147 /// replace any uses of the alloca with uses of the global directly.
148 static MemTransferInst *
isOnlyCopiedFromConstantMemory(AAResults * AA,AllocaInst * AI,SmallVectorImpl<Instruction * > & ToDelete)149 isOnlyCopiedFromConstantMemory(AAResults *AA,
150 AllocaInst *AI,
151 SmallVectorImpl<Instruction *> &ToDelete) {
152 MemTransferInst *TheCopy = nullptr;
153 if (isOnlyCopiedFromConstantMemory(AA, AI, TheCopy, ToDelete))
154 return TheCopy;
155 return nullptr;
156 }
157
158 /// Returns true if V is dereferenceable for size of alloca.
isDereferenceableForAllocaSize(const Value * V,const AllocaInst * AI,const DataLayout & DL)159 static bool isDereferenceableForAllocaSize(const Value *V, const AllocaInst *AI,
160 const DataLayout &DL) {
161 if (AI->isArrayAllocation())
162 return false;
163 uint64_t AllocaSize = DL.getTypeStoreSize(AI->getAllocatedType());
164 if (!AllocaSize)
165 return false;
166 return isDereferenceableAndAlignedPointer(V, Align(AI->getAlignment()),
167 APInt(64, AllocaSize), DL);
168 }
169
simplifyAllocaArraySize(InstCombinerImpl & IC,AllocaInst & AI)170 static Instruction *simplifyAllocaArraySize(InstCombinerImpl &IC,
171 AllocaInst &AI) {
172 // Check for array size of 1 (scalar allocation).
173 if (!AI.isArrayAllocation()) {
174 // i32 1 is the canonical array size for scalar allocations.
175 if (AI.getArraySize()->getType()->isIntegerTy(32))
176 return nullptr;
177
178 // Canonicalize it.
179 return IC.replaceOperand(AI, 0, IC.Builder.getInt32(1));
180 }
181
182 // Convert: alloca Ty, C - where C is a constant != 1 into: alloca [C x Ty], 1
183 if (const ConstantInt *C = dyn_cast<ConstantInt>(AI.getArraySize())) {
184 if (C->getValue().getActiveBits() <= 64) {
185 Type *NewTy = ArrayType::get(AI.getAllocatedType(), C->getZExtValue());
186 AllocaInst *New = IC.Builder.CreateAlloca(NewTy, nullptr, AI.getName());
187 New->setAlignment(AI.getAlign());
188
189 // Scan to the end of the allocation instructions, to skip over a block of
190 // allocas if possible...also skip interleaved debug info
191 //
192 BasicBlock::iterator It(New);
193 while (isa<AllocaInst>(*It) || isa<DbgInfoIntrinsic>(*It))
194 ++It;
195
196 // Now that I is pointing to the first non-allocation-inst in the block,
197 // insert our getelementptr instruction...
198 //
199 Type *IdxTy = IC.getDataLayout().getIntPtrType(AI.getType());
200 Value *NullIdx = Constant::getNullValue(IdxTy);
201 Value *Idx[2] = {NullIdx, NullIdx};
202 Instruction *NewI = GetElementPtrInst::CreateInBounds(
203 NewTy, New, Idx, New->getName() + ".sub");
204 IC.InsertNewInstBefore(NewI, *It);
205
206 // Gracefully handle allocas in other address spaces.
207 if (AI.getType()->getPointerAddressSpace() !=
208 NewI->getType()->getPointerAddressSpace()) {
209 NewI =
210 CastInst::CreatePointerBitCastOrAddrSpaceCast(NewI, AI.getType());
211 IC.InsertNewInstBefore(NewI, *It);
212 }
213
214 // Now make everything use the getelementptr instead of the original
215 // allocation.
216 return IC.replaceInstUsesWith(AI, NewI);
217 }
218 }
219
220 if (isa<UndefValue>(AI.getArraySize()))
221 return IC.replaceInstUsesWith(AI, Constant::getNullValue(AI.getType()));
222
223 // Ensure that the alloca array size argument has type intptr_t, so that
224 // any casting is exposed early.
225 Type *IntPtrTy = IC.getDataLayout().getIntPtrType(AI.getType());
226 if (AI.getArraySize()->getType() != IntPtrTy) {
227 Value *V = IC.Builder.CreateIntCast(AI.getArraySize(), IntPtrTy, false);
228 return IC.replaceOperand(AI, 0, V);
229 }
230
231 return nullptr;
232 }
233
234 namespace {
235 // If I and V are pointers in different address space, it is not allowed to
236 // use replaceAllUsesWith since I and V have different types. A
237 // non-target-specific transformation should not use addrspacecast on V since
238 // the two address space may be disjoint depending on target.
239 //
240 // This class chases down uses of the old pointer until reaching the load
241 // instructions, then replaces the old pointer in the load instructions with
242 // the new pointer. If during the chasing it sees bitcast or GEP, it will
243 // create new bitcast or GEP with the new pointer and use them in the load
244 // instruction.
245 class PointerReplacer {
246 public:
PointerReplacer(InstCombinerImpl & IC)247 PointerReplacer(InstCombinerImpl &IC) : IC(IC) {}
248
249 bool collectUsers(Instruction &I);
250 void replacePointer(Instruction &I, Value *V);
251
252 private:
253 void replace(Instruction *I);
254 Value *getReplacement(Value *I);
255
256 SmallSetVector<Instruction *, 4> Worklist;
257 MapVector<Value *, Value *> WorkMap;
258 InstCombinerImpl &IC;
259 };
260 } // end anonymous namespace
261
collectUsers(Instruction & I)262 bool PointerReplacer::collectUsers(Instruction &I) {
263 for (auto U : I.users()) {
264 auto *Inst = cast<Instruction>(&*U);
265 if (auto *Load = dyn_cast<LoadInst>(Inst)) {
266 if (Load->isVolatile())
267 return false;
268 Worklist.insert(Load);
269 } else if (isa<GetElementPtrInst>(Inst) || isa<BitCastInst>(Inst)) {
270 Worklist.insert(Inst);
271 if (!collectUsers(*Inst))
272 return false;
273 } else if (auto *MI = dyn_cast<MemTransferInst>(Inst)) {
274 if (MI->isVolatile())
275 return false;
276 Worklist.insert(Inst);
277 } else if (Inst->isLifetimeStartOrEnd()) {
278 continue;
279 } else {
280 LLVM_DEBUG(dbgs() << "Cannot handle pointer user: " << *U << '\n');
281 return false;
282 }
283 }
284
285 return true;
286 }
287
getReplacement(Value * V)288 Value *PointerReplacer::getReplacement(Value *V) { return WorkMap.lookup(V); }
289
replace(Instruction * I)290 void PointerReplacer::replace(Instruction *I) {
291 if (getReplacement(I))
292 return;
293
294 if (auto *LT = dyn_cast<LoadInst>(I)) {
295 auto *V = getReplacement(LT->getPointerOperand());
296 assert(V && "Operand not replaced");
297 auto *NewI = new LoadInst(LT->getType(), V, "", LT->isVolatile(),
298 LT->getAlign(), LT->getOrdering(),
299 LT->getSyncScopeID());
300 NewI->takeName(LT);
301 copyMetadataForLoad(*NewI, *LT);
302
303 IC.InsertNewInstWith(NewI, *LT);
304 IC.replaceInstUsesWith(*LT, NewI);
305 WorkMap[LT] = NewI;
306 } else if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) {
307 auto *V = getReplacement(GEP->getPointerOperand());
308 assert(V && "Operand not replaced");
309 SmallVector<Value *, 8> Indices;
310 Indices.append(GEP->idx_begin(), GEP->idx_end());
311 auto *NewI = GetElementPtrInst::Create(
312 V->getType()->getPointerElementType(), V, Indices);
313 IC.InsertNewInstWith(NewI, *GEP);
314 NewI->takeName(GEP);
315 WorkMap[GEP] = NewI;
316 } else if (auto *BC = dyn_cast<BitCastInst>(I)) {
317 auto *V = getReplacement(BC->getOperand(0));
318 assert(V && "Operand not replaced");
319 auto *NewT = PointerType::get(BC->getType()->getPointerElementType(),
320 V->getType()->getPointerAddressSpace());
321 auto *NewI = new BitCastInst(V, NewT);
322 IC.InsertNewInstWith(NewI, *BC);
323 NewI->takeName(BC);
324 WorkMap[BC] = NewI;
325 } else if (auto *MemCpy = dyn_cast<MemTransferInst>(I)) {
326 auto *SrcV = getReplacement(MemCpy->getRawSource());
327 // The pointer may appear in the destination of a copy, but we don't want to
328 // replace it.
329 if (!SrcV) {
330 assert(getReplacement(MemCpy->getRawDest()) &&
331 "destination not in replace list");
332 return;
333 }
334
335 IC.Builder.SetInsertPoint(MemCpy);
336 auto *NewI = IC.Builder.CreateMemTransferInst(
337 MemCpy->getIntrinsicID(), MemCpy->getRawDest(), MemCpy->getDestAlign(),
338 SrcV, MemCpy->getSourceAlign(), MemCpy->getLength(),
339 MemCpy->isVolatile());
340 AAMDNodes AAMD = MemCpy->getAAMetadata();
341 if (AAMD)
342 NewI->setAAMetadata(AAMD);
343
344 IC.eraseInstFromFunction(*MemCpy);
345 WorkMap[MemCpy] = NewI;
346 } else {
347 llvm_unreachable("should never reach here");
348 }
349 }
350
replacePointer(Instruction & I,Value * V)351 void PointerReplacer::replacePointer(Instruction &I, Value *V) {
352 #ifndef NDEBUG
353 auto *PT = cast<PointerType>(I.getType());
354 auto *NT = cast<PointerType>(V->getType());
355 assert(PT != NT && PT->getElementType() == NT->getElementType() &&
356 "Invalid usage");
357 #endif
358 WorkMap[&I] = V;
359
360 for (Instruction *Workitem : Worklist)
361 replace(Workitem);
362 }
363
visitAllocaInst(AllocaInst & AI)364 Instruction *InstCombinerImpl::visitAllocaInst(AllocaInst &AI) {
365 if (auto *I = simplifyAllocaArraySize(*this, AI))
366 return I;
367
368 if (AI.getAllocatedType()->isSized()) {
369 // Move all alloca's of zero byte objects to the entry block and merge them
370 // together. Note that we only do this for alloca's, because malloc should
371 // allocate and return a unique pointer, even for a zero byte allocation.
372 if (DL.getTypeAllocSize(AI.getAllocatedType()).getKnownMinSize() == 0) {
373 // For a zero sized alloca there is no point in doing an array allocation.
374 // This is helpful if the array size is a complicated expression not used
375 // elsewhere.
376 if (AI.isArrayAllocation())
377 return replaceOperand(AI, 0,
378 ConstantInt::get(AI.getArraySize()->getType(), 1));
379
380 // Get the first instruction in the entry block.
381 BasicBlock &EntryBlock = AI.getParent()->getParent()->getEntryBlock();
382 Instruction *FirstInst = EntryBlock.getFirstNonPHIOrDbg();
383 if (FirstInst != &AI) {
384 // If the entry block doesn't start with a zero-size alloca then move
385 // this one to the start of the entry block. There is no problem with
386 // dominance as the array size was forced to a constant earlier already.
387 AllocaInst *EntryAI = dyn_cast<AllocaInst>(FirstInst);
388 if (!EntryAI || !EntryAI->getAllocatedType()->isSized() ||
389 DL.getTypeAllocSize(EntryAI->getAllocatedType())
390 .getKnownMinSize() != 0) {
391 AI.moveBefore(FirstInst);
392 return &AI;
393 }
394
395 // Replace this zero-sized alloca with the one at the start of the entry
396 // block after ensuring that the address will be aligned enough for both
397 // types.
398 const Align MaxAlign = std::max(EntryAI->getAlign(), AI.getAlign());
399 EntryAI->setAlignment(MaxAlign);
400 if (AI.getType() != EntryAI->getType())
401 return new BitCastInst(EntryAI, AI.getType());
402 return replaceInstUsesWith(AI, EntryAI);
403 }
404 }
405 }
406
407 // Check to see if this allocation is only modified by a memcpy/memmove from
408 // a constant whose alignment is equal to or exceeds that of the allocation.
409 // If this is the case, we can change all users to use the constant global
410 // instead. This is commonly produced by the CFE by constructs like "void
411 // foo() { int A[] = {1,2,3,4,5,6,7,8,9...}; }" if 'A' is only subsequently
412 // read.
413 SmallVector<Instruction *, 4> ToDelete;
414 if (MemTransferInst *Copy = isOnlyCopiedFromConstantMemory(AA, &AI, ToDelete)) {
415 Value *TheSrc = Copy->getSource();
416 Align AllocaAlign = AI.getAlign();
417 Align SourceAlign = getOrEnforceKnownAlignment(
418 TheSrc, AllocaAlign, DL, &AI, &AC, &DT);
419 if (AllocaAlign <= SourceAlign &&
420 isDereferenceableForAllocaSize(TheSrc, &AI, DL) &&
421 !isa<Instruction>(TheSrc)) {
422 // FIXME: Can we sink instructions without violating dominance when TheSrc
423 // is an instruction instead of a constant or argument?
424 LLVM_DEBUG(dbgs() << "Found alloca equal to global: " << AI << '\n');
425 LLVM_DEBUG(dbgs() << " memcpy = " << *Copy << '\n');
426 unsigned SrcAddrSpace = TheSrc->getType()->getPointerAddressSpace();
427 auto *DestTy = PointerType::get(AI.getAllocatedType(), SrcAddrSpace);
428 if (AI.getType()->getAddressSpace() == SrcAddrSpace) {
429 for (Instruction *Delete : ToDelete)
430 eraseInstFromFunction(*Delete);
431
432 Value *Cast = Builder.CreateBitCast(TheSrc, DestTy);
433 Instruction *NewI = replaceInstUsesWith(AI, Cast);
434 eraseInstFromFunction(*Copy);
435 ++NumGlobalCopies;
436 return NewI;
437 }
438
439 PointerReplacer PtrReplacer(*this);
440 if (PtrReplacer.collectUsers(AI)) {
441 for (Instruction *Delete : ToDelete)
442 eraseInstFromFunction(*Delete);
443
444 Value *Cast = Builder.CreateBitCast(TheSrc, DestTy);
445 PtrReplacer.replacePointer(AI, Cast);
446 ++NumGlobalCopies;
447 }
448 }
449 }
450
451 // At last, use the generic allocation site handler to aggressively remove
452 // unused allocas.
453 return visitAllocSite(AI);
454 }
455
456 // Are we allowed to form a atomic load or store of this type?
isSupportedAtomicType(Type * Ty)457 static bool isSupportedAtomicType(Type *Ty) {
458 return Ty->isIntOrPtrTy() || Ty->isFloatingPointTy();
459 }
460
461 /// Helper to combine a load to a new type.
462 ///
463 /// This just does the work of combining a load to a new type. It handles
464 /// metadata, etc., and returns the new instruction. The \c NewTy should be the
465 /// loaded *value* type. This will convert it to a pointer, cast the operand to
466 /// that pointer type, load it, etc.
467 ///
468 /// Note that this will create all of the instructions with whatever insert
469 /// point the \c InstCombinerImpl currently is using.
combineLoadToNewType(LoadInst & LI,Type * NewTy,const Twine & Suffix)470 LoadInst *InstCombinerImpl::combineLoadToNewType(LoadInst &LI, Type *NewTy,
471 const Twine &Suffix) {
472 assert((!LI.isAtomic() || isSupportedAtomicType(NewTy)) &&
473 "can't fold an atomic load to requested type");
474
475 Value *Ptr = LI.getPointerOperand();
476 unsigned AS = LI.getPointerAddressSpace();
477 Type *NewPtrTy = NewTy->getPointerTo(AS);
478 Value *NewPtr = nullptr;
479 if (!(match(Ptr, m_BitCast(m_Value(NewPtr))) &&
480 NewPtr->getType() == NewPtrTy))
481 NewPtr = Builder.CreateBitCast(Ptr, NewPtrTy);
482
483 LoadInst *NewLoad = Builder.CreateAlignedLoad(
484 NewTy, NewPtr, LI.getAlign(), LI.isVolatile(), LI.getName() + Suffix);
485 NewLoad->setAtomic(LI.getOrdering(), LI.getSyncScopeID());
486 copyMetadataForLoad(*NewLoad, LI);
487 return NewLoad;
488 }
489
490 /// Combine a store to a new type.
491 ///
492 /// Returns the newly created store instruction.
combineStoreToNewValue(InstCombinerImpl & IC,StoreInst & SI,Value * V)493 static StoreInst *combineStoreToNewValue(InstCombinerImpl &IC, StoreInst &SI,
494 Value *V) {
495 assert((!SI.isAtomic() || isSupportedAtomicType(V->getType())) &&
496 "can't fold an atomic store of requested type");
497
498 Value *Ptr = SI.getPointerOperand();
499 unsigned AS = SI.getPointerAddressSpace();
500 SmallVector<std::pair<unsigned, MDNode *>, 8> MD;
501 SI.getAllMetadata(MD);
502
503 StoreInst *NewStore = IC.Builder.CreateAlignedStore(
504 V, IC.Builder.CreateBitCast(Ptr, V->getType()->getPointerTo(AS)),
505 SI.getAlign(), SI.isVolatile());
506 NewStore->setAtomic(SI.getOrdering(), SI.getSyncScopeID());
507 for (const auto &MDPair : MD) {
508 unsigned ID = MDPair.first;
509 MDNode *N = MDPair.second;
510 // Note, essentially every kind of metadata should be preserved here! This
511 // routine is supposed to clone a store instruction changing *only its
512 // type*. The only metadata it makes sense to drop is metadata which is
513 // invalidated when the pointer type changes. This should essentially
514 // never be the case in LLVM, but we explicitly switch over only known
515 // metadata to be conservatively correct. If you are adding metadata to
516 // LLVM which pertains to stores, you almost certainly want to add it
517 // here.
518 switch (ID) {
519 case LLVMContext::MD_dbg:
520 case LLVMContext::MD_tbaa:
521 case LLVMContext::MD_prof:
522 case LLVMContext::MD_fpmath:
523 case LLVMContext::MD_tbaa_struct:
524 case LLVMContext::MD_alias_scope:
525 case LLVMContext::MD_noalias:
526 case LLVMContext::MD_nontemporal:
527 case LLVMContext::MD_mem_parallel_loop_access:
528 case LLVMContext::MD_access_group:
529 // All of these directly apply.
530 NewStore->setMetadata(ID, N);
531 break;
532 case LLVMContext::MD_invariant_load:
533 case LLVMContext::MD_nonnull:
534 case LLVMContext::MD_noundef:
535 case LLVMContext::MD_range:
536 case LLVMContext::MD_align:
537 case LLVMContext::MD_dereferenceable:
538 case LLVMContext::MD_dereferenceable_or_null:
539 // These don't apply for stores.
540 break;
541 }
542 }
543
544 return NewStore;
545 }
546
547 /// Returns true if instruction represent minmax pattern like:
548 /// select ((cmp load V1, load V2), V1, V2).
isMinMaxWithLoads(Value * V,Type * & LoadTy)549 static bool isMinMaxWithLoads(Value *V, Type *&LoadTy) {
550 assert(V->getType()->isPointerTy() && "Expected pointer type.");
551 // Ignore possible ty* to ixx* bitcast.
552 V = InstCombiner::peekThroughBitcast(V);
553 // Check that select is select ((cmp load V1, load V2), V1, V2) - minmax
554 // pattern.
555 CmpInst::Predicate Pred;
556 Instruction *L1;
557 Instruction *L2;
558 Value *LHS;
559 Value *RHS;
560 if (!match(V, m_Select(m_Cmp(Pred, m_Instruction(L1), m_Instruction(L2)),
561 m_Value(LHS), m_Value(RHS))))
562 return false;
563 LoadTy = L1->getType();
564 return (match(L1, m_Load(m_Specific(LHS))) &&
565 match(L2, m_Load(m_Specific(RHS)))) ||
566 (match(L1, m_Load(m_Specific(RHS))) &&
567 match(L2, m_Load(m_Specific(LHS))));
568 }
569
570 /// Combine loads to match the type of their uses' value after looking
571 /// through intervening bitcasts.
572 ///
573 /// The core idea here is that if the result of a load is used in an operation,
574 /// we should load the type most conducive to that operation. For example, when
575 /// loading an integer and converting that immediately to a pointer, we should
576 /// instead directly load a pointer.
577 ///
578 /// However, this routine must never change the width of a load or the number of
579 /// loads as that would introduce a semantic change. This combine is expected to
580 /// be a semantic no-op which just allows loads to more closely model the types
581 /// of their consuming operations.
582 ///
583 /// Currently, we also refuse to change the precise type used for an atomic load
584 /// or a volatile load. This is debatable, and might be reasonable to change
585 /// later. However, it is risky in case some backend or other part of LLVM is
586 /// relying on the exact type loaded to select appropriate atomic operations.
combineLoadToOperationType(InstCombinerImpl & IC,LoadInst & LI)587 static Instruction *combineLoadToOperationType(InstCombinerImpl &IC,
588 LoadInst &LI) {
589 // FIXME: We could probably with some care handle both volatile and ordered
590 // atomic loads here but it isn't clear that this is important.
591 if (!LI.isUnordered())
592 return nullptr;
593
594 if (LI.use_empty())
595 return nullptr;
596
597 // swifterror values can't be bitcasted.
598 if (LI.getPointerOperand()->isSwiftError())
599 return nullptr;
600
601 const DataLayout &DL = IC.getDataLayout();
602
603 // Fold away bit casts of the loaded value by loading the desired type.
604 // Note that we should not do this for pointer<->integer casts,
605 // because that would result in type punning.
606 if (LI.hasOneUse()) {
607 // Don't transform when the type is x86_amx, it makes the pass that lower
608 // x86_amx type happy.
609 if (auto *BC = dyn_cast<BitCastInst>(LI.user_back())) {
610 assert(!LI.getType()->isX86_AMXTy() &&
611 "load from x86_amx* should not happen!");
612 if (BC->getType()->isX86_AMXTy())
613 return nullptr;
614 }
615
616 if (auto* CI = dyn_cast<CastInst>(LI.user_back()))
617 if (CI->isNoopCast(DL) && LI.getType()->isPtrOrPtrVectorTy() ==
618 CI->getDestTy()->isPtrOrPtrVectorTy())
619 if (!LI.isAtomic() || isSupportedAtomicType(CI->getDestTy())) {
620 LoadInst *NewLoad = IC.combineLoadToNewType(LI, CI->getDestTy());
621 CI->replaceAllUsesWith(NewLoad);
622 IC.eraseInstFromFunction(*CI);
623 return &LI;
624 }
625 }
626
627 // FIXME: We should also canonicalize loads of vectors when their elements are
628 // cast to other types.
629 return nullptr;
630 }
631
unpackLoadToAggregate(InstCombinerImpl & IC,LoadInst & LI)632 static Instruction *unpackLoadToAggregate(InstCombinerImpl &IC, LoadInst &LI) {
633 // FIXME: We could probably with some care handle both volatile and atomic
634 // stores here but it isn't clear that this is important.
635 if (!LI.isSimple())
636 return nullptr;
637
638 Type *T = LI.getType();
639 if (!T->isAggregateType())
640 return nullptr;
641
642 StringRef Name = LI.getName();
643 assert(LI.getAlignment() && "Alignment must be set at this point");
644
645 if (auto *ST = dyn_cast<StructType>(T)) {
646 // If the struct only have one element, we unpack.
647 auto NumElements = ST->getNumElements();
648 if (NumElements == 1) {
649 LoadInst *NewLoad = IC.combineLoadToNewType(LI, ST->getTypeAtIndex(0U),
650 ".unpack");
651 NewLoad->setAAMetadata(LI.getAAMetadata());
652 return IC.replaceInstUsesWith(LI, IC.Builder.CreateInsertValue(
653 UndefValue::get(T), NewLoad, 0, Name));
654 }
655
656 // We don't want to break loads with padding here as we'd loose
657 // the knowledge that padding exists for the rest of the pipeline.
658 const DataLayout &DL = IC.getDataLayout();
659 auto *SL = DL.getStructLayout(ST);
660 if (SL->hasPadding())
661 return nullptr;
662
663 const auto Align = LI.getAlign();
664 auto *Addr = LI.getPointerOperand();
665 auto *IdxType = Type::getInt32Ty(T->getContext());
666 auto *Zero = ConstantInt::get(IdxType, 0);
667
668 Value *V = UndefValue::get(T);
669 for (unsigned i = 0; i < NumElements; i++) {
670 Value *Indices[2] = {
671 Zero,
672 ConstantInt::get(IdxType, i),
673 };
674 auto *Ptr = IC.Builder.CreateInBoundsGEP(ST, Addr, makeArrayRef(Indices),
675 Name + ".elt");
676 auto *L = IC.Builder.CreateAlignedLoad(
677 ST->getElementType(i), Ptr,
678 commonAlignment(Align, SL->getElementOffset(i)), Name + ".unpack");
679 // Propagate AA metadata. It'll still be valid on the narrowed load.
680 L->setAAMetadata(LI.getAAMetadata());
681 V = IC.Builder.CreateInsertValue(V, L, i);
682 }
683
684 V->setName(Name);
685 return IC.replaceInstUsesWith(LI, V);
686 }
687
688 if (auto *AT = dyn_cast<ArrayType>(T)) {
689 auto *ET = AT->getElementType();
690 auto NumElements = AT->getNumElements();
691 if (NumElements == 1) {
692 LoadInst *NewLoad = IC.combineLoadToNewType(LI, ET, ".unpack");
693 NewLoad->setAAMetadata(LI.getAAMetadata());
694 return IC.replaceInstUsesWith(LI, IC.Builder.CreateInsertValue(
695 UndefValue::get(T), NewLoad, 0, Name));
696 }
697
698 // Bail out if the array is too large. Ideally we would like to optimize
699 // arrays of arbitrary size but this has a terrible impact on compile time.
700 // The threshold here is chosen arbitrarily, maybe needs a little bit of
701 // tuning.
702 if (NumElements > IC.MaxArraySizeForCombine)
703 return nullptr;
704
705 const DataLayout &DL = IC.getDataLayout();
706 auto EltSize = DL.getTypeAllocSize(ET);
707 const auto Align = LI.getAlign();
708
709 auto *Addr = LI.getPointerOperand();
710 auto *IdxType = Type::getInt64Ty(T->getContext());
711 auto *Zero = ConstantInt::get(IdxType, 0);
712
713 Value *V = UndefValue::get(T);
714 uint64_t Offset = 0;
715 for (uint64_t i = 0; i < NumElements; i++) {
716 Value *Indices[2] = {
717 Zero,
718 ConstantInt::get(IdxType, i),
719 };
720 auto *Ptr = IC.Builder.CreateInBoundsGEP(AT, Addr, makeArrayRef(Indices),
721 Name + ".elt");
722 auto *L = IC.Builder.CreateAlignedLoad(AT->getElementType(), Ptr,
723 commonAlignment(Align, Offset),
724 Name + ".unpack");
725 L->setAAMetadata(LI.getAAMetadata());
726 V = IC.Builder.CreateInsertValue(V, L, i);
727 Offset += EltSize;
728 }
729
730 V->setName(Name);
731 return IC.replaceInstUsesWith(LI, V);
732 }
733
734 return nullptr;
735 }
736
737 // If we can determine that all possible objects pointed to by the provided
738 // pointer value are, not only dereferenceable, but also definitively less than
739 // or equal to the provided maximum size, then return true. Otherwise, return
740 // false (constant global values and allocas fall into this category).
741 //
742 // FIXME: This should probably live in ValueTracking (or similar).
isObjectSizeLessThanOrEq(Value * V,uint64_t MaxSize,const DataLayout & DL)743 static bool isObjectSizeLessThanOrEq(Value *V, uint64_t MaxSize,
744 const DataLayout &DL) {
745 SmallPtrSet<Value *, 4> Visited;
746 SmallVector<Value *, 4> Worklist(1, V);
747
748 do {
749 Value *P = Worklist.pop_back_val();
750 P = P->stripPointerCasts();
751
752 if (!Visited.insert(P).second)
753 continue;
754
755 if (SelectInst *SI = dyn_cast<SelectInst>(P)) {
756 Worklist.push_back(SI->getTrueValue());
757 Worklist.push_back(SI->getFalseValue());
758 continue;
759 }
760
761 if (PHINode *PN = dyn_cast<PHINode>(P)) {
762 append_range(Worklist, PN->incoming_values());
763 continue;
764 }
765
766 if (GlobalAlias *GA = dyn_cast<GlobalAlias>(P)) {
767 if (GA->isInterposable())
768 return false;
769 Worklist.push_back(GA->getAliasee());
770 continue;
771 }
772
773 // If we know how big this object is, and it is less than MaxSize, continue
774 // searching. Otherwise, return false.
775 if (AllocaInst *AI = dyn_cast<AllocaInst>(P)) {
776 if (!AI->getAllocatedType()->isSized())
777 return false;
778
779 ConstantInt *CS = dyn_cast<ConstantInt>(AI->getArraySize());
780 if (!CS)
781 return false;
782
783 uint64_t TypeSize = DL.getTypeAllocSize(AI->getAllocatedType());
784 // Make sure that, even if the multiplication below would wrap as an
785 // uint64_t, we still do the right thing.
786 if ((CS->getValue().zextOrSelf(128)*APInt(128, TypeSize)).ugt(MaxSize))
787 return false;
788 continue;
789 }
790
791 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(P)) {
792 if (!GV->hasDefinitiveInitializer() || !GV->isConstant())
793 return false;
794
795 uint64_t InitSize = DL.getTypeAllocSize(GV->getValueType());
796 if (InitSize > MaxSize)
797 return false;
798 continue;
799 }
800
801 return false;
802 } while (!Worklist.empty());
803
804 return true;
805 }
806
807 // If we're indexing into an object of a known size, and the outer index is
808 // not a constant, but having any value but zero would lead to undefined
809 // behavior, replace it with zero.
810 //
811 // For example, if we have:
812 // @f.a = private unnamed_addr constant [1 x i32] [i32 12], align 4
813 // ...
814 // %arrayidx = getelementptr inbounds [1 x i32]* @f.a, i64 0, i64 %x
815 // ... = load i32* %arrayidx, align 4
816 // Then we know that we can replace %x in the GEP with i64 0.
817 //
818 // FIXME: We could fold any GEP index to zero that would cause UB if it were
819 // not zero. Currently, we only handle the first such index. Also, we could
820 // also search through non-zero constant indices if we kept track of the
821 // offsets those indices implied.
canReplaceGEPIdxWithZero(InstCombinerImpl & IC,GetElementPtrInst * GEPI,Instruction * MemI,unsigned & Idx)822 static bool canReplaceGEPIdxWithZero(InstCombinerImpl &IC,
823 GetElementPtrInst *GEPI, Instruction *MemI,
824 unsigned &Idx) {
825 if (GEPI->getNumOperands() < 2)
826 return false;
827
828 // Find the first non-zero index of a GEP. If all indices are zero, return
829 // one past the last index.
830 auto FirstNZIdx = [](const GetElementPtrInst *GEPI) {
831 unsigned I = 1;
832 for (unsigned IE = GEPI->getNumOperands(); I != IE; ++I) {
833 Value *V = GEPI->getOperand(I);
834 if (const ConstantInt *CI = dyn_cast<ConstantInt>(V))
835 if (CI->isZero())
836 continue;
837
838 break;
839 }
840
841 return I;
842 };
843
844 // Skip through initial 'zero' indices, and find the corresponding pointer
845 // type. See if the next index is not a constant.
846 Idx = FirstNZIdx(GEPI);
847 if (Idx == GEPI->getNumOperands())
848 return false;
849 if (isa<Constant>(GEPI->getOperand(Idx)))
850 return false;
851
852 SmallVector<Value *, 4> Ops(GEPI->idx_begin(), GEPI->idx_begin() + Idx);
853 Type *SourceElementType = GEPI->getSourceElementType();
854 // Size information about scalable vectors is not available, so we cannot
855 // deduce whether indexing at n is undefined behaviour or not. Bail out.
856 if (isa<ScalableVectorType>(SourceElementType))
857 return false;
858
859 Type *AllocTy = GetElementPtrInst::getIndexedType(SourceElementType, Ops);
860 if (!AllocTy || !AllocTy->isSized())
861 return false;
862 const DataLayout &DL = IC.getDataLayout();
863 uint64_t TyAllocSize = DL.getTypeAllocSize(AllocTy).getFixedSize();
864
865 // If there are more indices after the one we might replace with a zero, make
866 // sure they're all non-negative. If any of them are negative, the overall
867 // address being computed might be before the base address determined by the
868 // first non-zero index.
869 auto IsAllNonNegative = [&]() {
870 for (unsigned i = Idx+1, e = GEPI->getNumOperands(); i != e; ++i) {
871 KnownBits Known = IC.computeKnownBits(GEPI->getOperand(i), 0, MemI);
872 if (Known.isNonNegative())
873 continue;
874 return false;
875 }
876
877 return true;
878 };
879
880 // FIXME: If the GEP is not inbounds, and there are extra indices after the
881 // one we'll replace, those could cause the address computation to wrap
882 // (rendering the IsAllNonNegative() check below insufficient). We can do
883 // better, ignoring zero indices (and other indices we can prove small
884 // enough not to wrap).
885 if (Idx+1 != GEPI->getNumOperands() && !GEPI->isInBounds())
886 return false;
887
888 // Note that isObjectSizeLessThanOrEq will return true only if the pointer is
889 // also known to be dereferenceable.
890 return isObjectSizeLessThanOrEq(GEPI->getOperand(0), TyAllocSize, DL) &&
891 IsAllNonNegative();
892 }
893
894 // If we're indexing into an object with a variable index for the memory
895 // access, but the object has only one element, we can assume that the index
896 // will always be zero. If we replace the GEP, return it.
897 template <typename T>
replaceGEPIdxWithZero(InstCombinerImpl & IC,Value * Ptr,T & MemI)898 static Instruction *replaceGEPIdxWithZero(InstCombinerImpl &IC, Value *Ptr,
899 T &MemI) {
900 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Ptr)) {
901 unsigned Idx;
902 if (canReplaceGEPIdxWithZero(IC, GEPI, &MemI, Idx)) {
903 Instruction *NewGEPI = GEPI->clone();
904 NewGEPI->setOperand(Idx,
905 ConstantInt::get(GEPI->getOperand(Idx)->getType(), 0));
906 NewGEPI->insertBefore(GEPI);
907 MemI.setOperand(MemI.getPointerOperandIndex(), NewGEPI);
908 return NewGEPI;
909 }
910 }
911
912 return nullptr;
913 }
914
canSimplifyNullStoreOrGEP(StoreInst & SI)915 static bool canSimplifyNullStoreOrGEP(StoreInst &SI) {
916 if (NullPointerIsDefined(SI.getFunction(), SI.getPointerAddressSpace()))
917 return false;
918
919 auto *Ptr = SI.getPointerOperand();
920 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Ptr))
921 Ptr = GEPI->getOperand(0);
922 return (isa<ConstantPointerNull>(Ptr) &&
923 !NullPointerIsDefined(SI.getFunction(), SI.getPointerAddressSpace()));
924 }
925
canSimplifyNullLoadOrGEP(LoadInst & LI,Value * Op)926 static bool canSimplifyNullLoadOrGEP(LoadInst &LI, Value *Op) {
927 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Op)) {
928 const Value *GEPI0 = GEPI->getOperand(0);
929 if (isa<ConstantPointerNull>(GEPI0) &&
930 !NullPointerIsDefined(LI.getFunction(), GEPI->getPointerAddressSpace()))
931 return true;
932 }
933 if (isa<UndefValue>(Op) ||
934 (isa<ConstantPointerNull>(Op) &&
935 !NullPointerIsDefined(LI.getFunction(), LI.getPointerAddressSpace())))
936 return true;
937 return false;
938 }
939
visitLoadInst(LoadInst & LI)940 Instruction *InstCombinerImpl::visitLoadInst(LoadInst &LI) {
941 Value *Op = LI.getOperand(0);
942
943 // Try to canonicalize the loaded type.
944 if (Instruction *Res = combineLoadToOperationType(*this, LI))
945 return Res;
946
947 // Attempt to improve the alignment.
948 Align KnownAlign = getOrEnforceKnownAlignment(
949 Op, DL.getPrefTypeAlign(LI.getType()), DL, &LI, &AC, &DT);
950 if (KnownAlign > LI.getAlign())
951 LI.setAlignment(KnownAlign);
952
953 // Replace GEP indices if possible.
954 if (Instruction *NewGEPI = replaceGEPIdxWithZero(*this, Op, LI)) {
955 Worklist.push(NewGEPI);
956 return &LI;
957 }
958
959 if (Instruction *Res = unpackLoadToAggregate(*this, LI))
960 return Res;
961
962 // Do really simple store-to-load forwarding and load CSE, to catch cases
963 // where there are several consecutive memory accesses to the same location,
964 // separated by a few arithmetic operations.
965 bool IsLoadCSE = false;
966 if (Value *AvailableVal = FindAvailableLoadedValue(&LI, *AA, &IsLoadCSE)) {
967 if (IsLoadCSE)
968 combineMetadataForCSE(cast<LoadInst>(AvailableVal), &LI, false);
969
970 return replaceInstUsesWith(
971 LI, Builder.CreateBitOrPointerCast(AvailableVal, LI.getType(),
972 LI.getName() + ".cast"));
973 }
974
975 // None of the following transforms are legal for volatile/ordered atomic
976 // loads. Most of them do apply for unordered atomics.
977 if (!LI.isUnordered()) return nullptr;
978
979 // load(gep null, ...) -> unreachable
980 // load null/undef -> unreachable
981 // TODO: Consider a target hook for valid address spaces for this xforms.
982 if (canSimplifyNullLoadOrGEP(LI, Op)) {
983 // Insert a new store to null instruction before the load to indicate
984 // that this code is not reachable. We do this instead of inserting
985 // an unreachable instruction directly because we cannot modify the
986 // CFG.
987 StoreInst *SI = new StoreInst(PoisonValue::get(LI.getType()),
988 Constant::getNullValue(Op->getType()), &LI);
989 SI->setDebugLoc(LI.getDebugLoc());
990 return replaceInstUsesWith(LI, PoisonValue::get(LI.getType()));
991 }
992
993 if (Op->hasOneUse()) {
994 // Change select and PHI nodes to select values instead of addresses: this
995 // helps alias analysis out a lot, allows many others simplifications, and
996 // exposes redundancy in the code.
997 //
998 // Note that we cannot do the transformation unless we know that the
999 // introduced loads cannot trap! Something like this is valid as long as
1000 // the condition is always false: load (select bool %C, int* null, int* %G),
1001 // but it would not be valid if we transformed it to load from null
1002 // unconditionally.
1003 //
1004 if (SelectInst *SI = dyn_cast<SelectInst>(Op)) {
1005 // load (select (Cond, &V1, &V2)) --> select(Cond, load &V1, load &V2).
1006 Align Alignment = LI.getAlign();
1007 if (isSafeToLoadUnconditionally(SI->getOperand(1), LI.getType(),
1008 Alignment, DL, SI) &&
1009 isSafeToLoadUnconditionally(SI->getOperand(2), LI.getType(),
1010 Alignment, DL, SI)) {
1011 LoadInst *V1 =
1012 Builder.CreateLoad(LI.getType(), SI->getOperand(1),
1013 SI->getOperand(1)->getName() + ".val");
1014 LoadInst *V2 =
1015 Builder.CreateLoad(LI.getType(), SI->getOperand(2),
1016 SI->getOperand(2)->getName() + ".val");
1017 assert(LI.isUnordered() && "implied by above");
1018 V1->setAlignment(Alignment);
1019 V1->setAtomic(LI.getOrdering(), LI.getSyncScopeID());
1020 V2->setAlignment(Alignment);
1021 V2->setAtomic(LI.getOrdering(), LI.getSyncScopeID());
1022 return SelectInst::Create(SI->getCondition(), V1, V2);
1023 }
1024
1025 // load (select (cond, null, P)) -> load P
1026 if (isa<ConstantPointerNull>(SI->getOperand(1)) &&
1027 !NullPointerIsDefined(SI->getFunction(),
1028 LI.getPointerAddressSpace()))
1029 return replaceOperand(LI, 0, SI->getOperand(2));
1030
1031 // load (select (cond, P, null)) -> load P
1032 if (isa<ConstantPointerNull>(SI->getOperand(2)) &&
1033 !NullPointerIsDefined(SI->getFunction(),
1034 LI.getPointerAddressSpace()))
1035 return replaceOperand(LI, 0, SI->getOperand(1));
1036 }
1037 }
1038 return nullptr;
1039 }
1040
1041 /// Look for extractelement/insertvalue sequence that acts like a bitcast.
1042 ///
1043 /// \returns underlying value that was "cast", or nullptr otherwise.
1044 ///
1045 /// For example, if we have:
1046 ///
1047 /// %E0 = extractelement <2 x double> %U, i32 0
1048 /// %V0 = insertvalue [2 x double] undef, double %E0, 0
1049 /// %E1 = extractelement <2 x double> %U, i32 1
1050 /// %V1 = insertvalue [2 x double] %V0, double %E1, 1
1051 ///
1052 /// and the layout of a <2 x double> is isomorphic to a [2 x double],
1053 /// then %V1 can be safely approximated by a conceptual "bitcast" of %U.
1054 /// Note that %U may contain non-undef values where %V1 has undef.
likeBitCastFromVector(InstCombinerImpl & IC,Value * V)1055 static Value *likeBitCastFromVector(InstCombinerImpl &IC, Value *V) {
1056 Value *U = nullptr;
1057 while (auto *IV = dyn_cast<InsertValueInst>(V)) {
1058 auto *E = dyn_cast<ExtractElementInst>(IV->getInsertedValueOperand());
1059 if (!E)
1060 return nullptr;
1061 auto *W = E->getVectorOperand();
1062 if (!U)
1063 U = W;
1064 else if (U != W)
1065 return nullptr;
1066 auto *CI = dyn_cast<ConstantInt>(E->getIndexOperand());
1067 if (!CI || IV->getNumIndices() != 1 || CI->getZExtValue() != *IV->idx_begin())
1068 return nullptr;
1069 V = IV->getAggregateOperand();
1070 }
1071 if (!match(V, m_Undef()) || !U)
1072 return nullptr;
1073
1074 auto *UT = cast<VectorType>(U->getType());
1075 auto *VT = V->getType();
1076 // Check that types UT and VT are bitwise isomorphic.
1077 const auto &DL = IC.getDataLayout();
1078 if (DL.getTypeStoreSizeInBits(UT) != DL.getTypeStoreSizeInBits(VT)) {
1079 return nullptr;
1080 }
1081 if (auto *AT = dyn_cast<ArrayType>(VT)) {
1082 if (AT->getNumElements() != cast<FixedVectorType>(UT)->getNumElements())
1083 return nullptr;
1084 } else {
1085 auto *ST = cast<StructType>(VT);
1086 if (ST->getNumElements() != cast<FixedVectorType>(UT)->getNumElements())
1087 return nullptr;
1088 for (const auto *EltT : ST->elements()) {
1089 if (EltT != UT->getElementType())
1090 return nullptr;
1091 }
1092 }
1093 return U;
1094 }
1095
1096 /// Combine stores to match the type of value being stored.
1097 ///
1098 /// The core idea here is that the memory does not have any intrinsic type and
1099 /// where we can we should match the type of a store to the type of value being
1100 /// stored.
1101 ///
1102 /// However, this routine must never change the width of a store or the number of
1103 /// stores as that would introduce a semantic change. This combine is expected to
1104 /// be a semantic no-op which just allows stores to more closely model the types
1105 /// of their incoming values.
1106 ///
1107 /// Currently, we also refuse to change the precise type used for an atomic or
1108 /// volatile store. This is debatable, and might be reasonable to change later.
1109 /// However, it is risky in case some backend or other part of LLVM is relying
1110 /// on the exact type stored to select appropriate atomic operations.
1111 ///
1112 /// \returns true if the store was successfully combined away. This indicates
1113 /// the caller must erase the store instruction. We have to let the caller erase
1114 /// the store instruction as otherwise there is no way to signal whether it was
1115 /// combined or not: IC.EraseInstFromFunction returns a null pointer.
combineStoreToValueType(InstCombinerImpl & IC,StoreInst & SI)1116 static bool combineStoreToValueType(InstCombinerImpl &IC, StoreInst &SI) {
1117 // FIXME: We could probably with some care handle both volatile and ordered
1118 // atomic stores here but it isn't clear that this is important.
1119 if (!SI.isUnordered())
1120 return false;
1121
1122 // swifterror values can't be bitcasted.
1123 if (SI.getPointerOperand()->isSwiftError())
1124 return false;
1125
1126 Value *V = SI.getValueOperand();
1127
1128 // Fold away bit casts of the stored value by storing the original type.
1129 if (auto *BC = dyn_cast<BitCastInst>(V)) {
1130 assert(!BC->getType()->isX86_AMXTy() &&
1131 "store to x86_amx* should not happen!");
1132 V = BC->getOperand(0);
1133 // Don't transform when the type is x86_amx, it makes the pass that lower
1134 // x86_amx type happy.
1135 if (V->getType()->isX86_AMXTy())
1136 return false;
1137 if (!SI.isAtomic() || isSupportedAtomicType(V->getType())) {
1138 combineStoreToNewValue(IC, SI, V);
1139 return true;
1140 }
1141 }
1142
1143 if (Value *U = likeBitCastFromVector(IC, V))
1144 if (!SI.isAtomic() || isSupportedAtomicType(U->getType())) {
1145 combineStoreToNewValue(IC, SI, U);
1146 return true;
1147 }
1148
1149 // FIXME: We should also canonicalize stores of vectors when their elements
1150 // are cast to other types.
1151 return false;
1152 }
1153
unpackStoreToAggregate(InstCombinerImpl & IC,StoreInst & SI)1154 static bool unpackStoreToAggregate(InstCombinerImpl &IC, StoreInst &SI) {
1155 // FIXME: We could probably with some care handle both volatile and atomic
1156 // stores here but it isn't clear that this is important.
1157 if (!SI.isSimple())
1158 return false;
1159
1160 Value *V = SI.getValueOperand();
1161 Type *T = V->getType();
1162
1163 if (!T->isAggregateType())
1164 return false;
1165
1166 if (auto *ST = dyn_cast<StructType>(T)) {
1167 // If the struct only have one element, we unpack.
1168 unsigned Count = ST->getNumElements();
1169 if (Count == 1) {
1170 V = IC.Builder.CreateExtractValue(V, 0);
1171 combineStoreToNewValue(IC, SI, V);
1172 return true;
1173 }
1174
1175 // We don't want to break loads with padding here as we'd loose
1176 // the knowledge that padding exists for the rest of the pipeline.
1177 const DataLayout &DL = IC.getDataLayout();
1178 auto *SL = DL.getStructLayout(ST);
1179 if (SL->hasPadding())
1180 return false;
1181
1182 const auto Align = SI.getAlign();
1183
1184 SmallString<16> EltName = V->getName();
1185 EltName += ".elt";
1186 auto *Addr = SI.getPointerOperand();
1187 SmallString<16> AddrName = Addr->getName();
1188 AddrName += ".repack";
1189
1190 auto *IdxType = Type::getInt32Ty(ST->getContext());
1191 auto *Zero = ConstantInt::get(IdxType, 0);
1192 for (unsigned i = 0; i < Count; i++) {
1193 Value *Indices[2] = {
1194 Zero,
1195 ConstantInt::get(IdxType, i),
1196 };
1197 auto *Ptr = IC.Builder.CreateInBoundsGEP(ST, Addr, makeArrayRef(Indices),
1198 AddrName);
1199 auto *Val = IC.Builder.CreateExtractValue(V, i, EltName);
1200 auto EltAlign = commonAlignment(Align, SL->getElementOffset(i));
1201 llvm::Instruction *NS = IC.Builder.CreateAlignedStore(Val, Ptr, EltAlign);
1202 NS->setAAMetadata(SI.getAAMetadata());
1203 }
1204
1205 return true;
1206 }
1207
1208 if (auto *AT = dyn_cast<ArrayType>(T)) {
1209 // If the array only have one element, we unpack.
1210 auto NumElements = AT->getNumElements();
1211 if (NumElements == 1) {
1212 V = IC.Builder.CreateExtractValue(V, 0);
1213 combineStoreToNewValue(IC, SI, V);
1214 return true;
1215 }
1216
1217 // Bail out if the array is too large. Ideally we would like to optimize
1218 // arrays of arbitrary size but this has a terrible impact on compile time.
1219 // The threshold here is chosen arbitrarily, maybe needs a little bit of
1220 // tuning.
1221 if (NumElements > IC.MaxArraySizeForCombine)
1222 return false;
1223
1224 const DataLayout &DL = IC.getDataLayout();
1225 auto EltSize = DL.getTypeAllocSize(AT->getElementType());
1226 const auto Align = SI.getAlign();
1227
1228 SmallString<16> EltName = V->getName();
1229 EltName += ".elt";
1230 auto *Addr = SI.getPointerOperand();
1231 SmallString<16> AddrName = Addr->getName();
1232 AddrName += ".repack";
1233
1234 auto *IdxType = Type::getInt64Ty(T->getContext());
1235 auto *Zero = ConstantInt::get(IdxType, 0);
1236
1237 uint64_t Offset = 0;
1238 for (uint64_t i = 0; i < NumElements; i++) {
1239 Value *Indices[2] = {
1240 Zero,
1241 ConstantInt::get(IdxType, i),
1242 };
1243 auto *Ptr = IC.Builder.CreateInBoundsGEP(AT, Addr, makeArrayRef(Indices),
1244 AddrName);
1245 auto *Val = IC.Builder.CreateExtractValue(V, i, EltName);
1246 auto EltAlign = commonAlignment(Align, Offset);
1247 Instruction *NS = IC.Builder.CreateAlignedStore(Val, Ptr, EltAlign);
1248 NS->setAAMetadata(SI.getAAMetadata());
1249 Offset += EltSize;
1250 }
1251
1252 return true;
1253 }
1254
1255 return false;
1256 }
1257
1258 /// equivalentAddressValues - Test if A and B will obviously have the same
1259 /// value. This includes recognizing that %t0 and %t1 will have the same
1260 /// value in code like this:
1261 /// %t0 = getelementptr \@a, 0, 3
1262 /// store i32 0, i32* %t0
1263 /// %t1 = getelementptr \@a, 0, 3
1264 /// %t2 = load i32* %t1
1265 ///
equivalentAddressValues(Value * A,Value * B)1266 static bool equivalentAddressValues(Value *A, Value *B) {
1267 // Test if the values are trivially equivalent.
1268 if (A == B) return true;
1269
1270 // Test if the values come form identical arithmetic instructions.
1271 // This uses isIdenticalToWhenDefined instead of isIdenticalTo because
1272 // its only used to compare two uses within the same basic block, which
1273 // means that they'll always either have the same value or one of them
1274 // will have an undefined value.
1275 if (isa<BinaryOperator>(A) ||
1276 isa<CastInst>(A) ||
1277 isa<PHINode>(A) ||
1278 isa<GetElementPtrInst>(A))
1279 if (Instruction *BI = dyn_cast<Instruction>(B))
1280 if (cast<Instruction>(A)->isIdenticalToWhenDefined(BI))
1281 return true;
1282
1283 // Otherwise they may not be equivalent.
1284 return false;
1285 }
1286
1287 /// Converts store (bitcast (load (bitcast (select ...)))) to
1288 /// store (load (select ...)), where select is minmax:
1289 /// select ((cmp load V1, load V2), V1, V2).
removeBitcastsFromLoadStoreOnMinMax(InstCombinerImpl & IC,StoreInst & SI)1290 static bool removeBitcastsFromLoadStoreOnMinMax(InstCombinerImpl &IC,
1291 StoreInst &SI) {
1292 // bitcast?
1293 if (!match(SI.getPointerOperand(), m_BitCast(m_Value())))
1294 return false;
1295 // load? integer?
1296 Value *LoadAddr;
1297 if (!match(SI.getValueOperand(), m_Load(m_BitCast(m_Value(LoadAddr)))))
1298 return false;
1299 auto *LI = cast<LoadInst>(SI.getValueOperand());
1300 if (!LI->getType()->isIntegerTy())
1301 return false;
1302 Type *CmpLoadTy;
1303 if (!isMinMaxWithLoads(LoadAddr, CmpLoadTy))
1304 return false;
1305
1306 // Make sure the type would actually change.
1307 // This condition can be hit with chains of bitcasts.
1308 if (LI->getType() == CmpLoadTy)
1309 return false;
1310
1311 // Make sure we're not changing the size of the load/store.
1312 const auto &DL = IC.getDataLayout();
1313 if (DL.getTypeStoreSizeInBits(LI->getType()) !=
1314 DL.getTypeStoreSizeInBits(CmpLoadTy))
1315 return false;
1316
1317 if (!all_of(LI->users(), [LI, LoadAddr](User *U) {
1318 auto *SI = dyn_cast<StoreInst>(U);
1319 return SI && SI->getPointerOperand() != LI &&
1320 InstCombiner::peekThroughBitcast(SI->getPointerOperand()) !=
1321 LoadAddr &&
1322 !SI->getPointerOperand()->isSwiftError();
1323 }))
1324 return false;
1325
1326 IC.Builder.SetInsertPoint(LI);
1327 LoadInst *NewLI = IC.combineLoadToNewType(*LI, CmpLoadTy);
1328 // Replace all the stores with stores of the newly loaded value.
1329 for (auto *UI : LI->users()) {
1330 auto *USI = cast<StoreInst>(UI);
1331 IC.Builder.SetInsertPoint(USI);
1332 combineStoreToNewValue(IC, *USI, NewLI);
1333 }
1334 IC.replaceInstUsesWith(*LI, PoisonValue::get(LI->getType()));
1335 IC.eraseInstFromFunction(*LI);
1336 return true;
1337 }
1338
visitStoreInst(StoreInst & SI)1339 Instruction *InstCombinerImpl::visitStoreInst(StoreInst &SI) {
1340 Value *Val = SI.getOperand(0);
1341 Value *Ptr = SI.getOperand(1);
1342
1343 // Try to canonicalize the stored type.
1344 if (combineStoreToValueType(*this, SI))
1345 return eraseInstFromFunction(SI);
1346
1347 // Attempt to improve the alignment.
1348 const Align KnownAlign = getOrEnforceKnownAlignment(
1349 Ptr, DL.getPrefTypeAlign(Val->getType()), DL, &SI, &AC, &DT);
1350 if (KnownAlign > SI.getAlign())
1351 SI.setAlignment(KnownAlign);
1352
1353 // Try to canonicalize the stored type.
1354 if (unpackStoreToAggregate(*this, SI))
1355 return eraseInstFromFunction(SI);
1356
1357 if (removeBitcastsFromLoadStoreOnMinMax(*this, SI))
1358 return eraseInstFromFunction(SI);
1359
1360 // Replace GEP indices if possible.
1361 if (Instruction *NewGEPI = replaceGEPIdxWithZero(*this, Ptr, SI)) {
1362 Worklist.push(NewGEPI);
1363 return &SI;
1364 }
1365
1366 // Don't hack volatile/ordered stores.
1367 // FIXME: Some bits are legal for ordered atomic stores; needs refactoring.
1368 if (!SI.isUnordered()) return nullptr;
1369
1370 // If the RHS is an alloca with a single use, zapify the store, making the
1371 // alloca dead.
1372 if (Ptr->hasOneUse()) {
1373 if (isa<AllocaInst>(Ptr))
1374 return eraseInstFromFunction(SI);
1375 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr)) {
1376 if (isa<AllocaInst>(GEP->getOperand(0))) {
1377 if (GEP->getOperand(0)->hasOneUse())
1378 return eraseInstFromFunction(SI);
1379 }
1380 }
1381 }
1382
1383 // If we have a store to a location which is known constant, we can conclude
1384 // that the store must be storing the constant value (else the memory
1385 // wouldn't be constant), and this must be a noop.
1386 if (AA->pointsToConstantMemory(Ptr))
1387 return eraseInstFromFunction(SI);
1388
1389 // Do really simple DSE, to catch cases where there are several consecutive
1390 // stores to the same location, separated by a few arithmetic operations. This
1391 // situation often occurs with bitfield accesses.
1392 BasicBlock::iterator BBI(SI);
1393 for (unsigned ScanInsts = 6; BBI != SI.getParent()->begin() && ScanInsts;
1394 --ScanInsts) {
1395 --BBI;
1396 // Don't count debug info directives, lest they affect codegen,
1397 // and we skip pointer-to-pointer bitcasts, which are NOPs.
1398 if (BBI->isDebugOrPseudoInst() ||
1399 (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy())) {
1400 ScanInsts++;
1401 continue;
1402 }
1403
1404 if (StoreInst *PrevSI = dyn_cast<StoreInst>(BBI)) {
1405 // Prev store isn't volatile, and stores to the same location?
1406 if (PrevSI->isUnordered() && equivalentAddressValues(PrevSI->getOperand(1),
1407 SI.getOperand(1))) {
1408 ++NumDeadStore;
1409 // Manually add back the original store to the worklist now, so it will
1410 // be processed after the operands of the removed store, as this may
1411 // expose additional DSE opportunities.
1412 Worklist.push(&SI);
1413 eraseInstFromFunction(*PrevSI);
1414 return nullptr;
1415 }
1416 break;
1417 }
1418
1419 // If this is a load, we have to stop. However, if the loaded value is from
1420 // the pointer we're loading and is producing the pointer we're storing,
1421 // then *this* store is dead (X = load P; store X -> P).
1422 if (LoadInst *LI = dyn_cast<LoadInst>(BBI)) {
1423 if (LI == Val && equivalentAddressValues(LI->getOperand(0), Ptr)) {
1424 assert(SI.isUnordered() && "can't eliminate ordering operation");
1425 return eraseInstFromFunction(SI);
1426 }
1427
1428 // Otherwise, this is a load from some other location. Stores before it
1429 // may not be dead.
1430 break;
1431 }
1432
1433 // Don't skip over loads, throws or things that can modify memory.
1434 if (BBI->mayWriteToMemory() || BBI->mayReadFromMemory() || BBI->mayThrow())
1435 break;
1436 }
1437
1438 // store X, null -> turns into 'unreachable' in SimplifyCFG
1439 // store X, GEP(null, Y) -> turns into 'unreachable' in SimplifyCFG
1440 if (canSimplifyNullStoreOrGEP(SI)) {
1441 if (!isa<PoisonValue>(Val))
1442 return replaceOperand(SI, 0, PoisonValue::get(Val->getType()));
1443 return nullptr; // Do not modify these!
1444 }
1445
1446 // store undef, Ptr -> noop
1447 if (isa<UndefValue>(Val))
1448 return eraseInstFromFunction(SI);
1449
1450 return nullptr;
1451 }
1452
1453 /// Try to transform:
1454 /// if () { *P = v1; } else { *P = v2 }
1455 /// or:
1456 /// *P = v1; if () { *P = v2; }
1457 /// into a phi node with a store in the successor.
mergeStoreIntoSuccessor(StoreInst & SI)1458 bool InstCombinerImpl::mergeStoreIntoSuccessor(StoreInst &SI) {
1459 if (!SI.isUnordered())
1460 return false; // This code has not been audited for volatile/ordered case.
1461
1462 // Check if the successor block has exactly 2 incoming edges.
1463 BasicBlock *StoreBB = SI.getParent();
1464 BasicBlock *DestBB = StoreBB->getTerminator()->getSuccessor(0);
1465 if (!DestBB->hasNPredecessors(2))
1466 return false;
1467
1468 // Capture the other block (the block that doesn't contain our store).
1469 pred_iterator PredIter = pred_begin(DestBB);
1470 if (*PredIter == StoreBB)
1471 ++PredIter;
1472 BasicBlock *OtherBB = *PredIter;
1473
1474 // Bail out if all of the relevant blocks aren't distinct. This can happen,
1475 // for example, if SI is in an infinite loop.
1476 if (StoreBB == DestBB || OtherBB == DestBB)
1477 return false;
1478
1479 // Verify that the other block ends in a branch and is not otherwise empty.
1480 BasicBlock::iterator BBI(OtherBB->getTerminator());
1481 BranchInst *OtherBr = dyn_cast<BranchInst>(BBI);
1482 if (!OtherBr || BBI == OtherBB->begin())
1483 return false;
1484
1485 // If the other block ends in an unconditional branch, check for the 'if then
1486 // else' case. There is an instruction before the branch.
1487 StoreInst *OtherStore = nullptr;
1488 if (OtherBr->isUnconditional()) {
1489 --BBI;
1490 // Skip over debugging info.
1491 while (isa<DbgInfoIntrinsic>(BBI) ||
1492 (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy())) {
1493 if (BBI==OtherBB->begin())
1494 return false;
1495 --BBI;
1496 }
1497 // If this isn't a store, isn't a store to the same location, or is not the
1498 // right kind of store, bail out.
1499 OtherStore = dyn_cast<StoreInst>(BBI);
1500 if (!OtherStore || OtherStore->getOperand(1) != SI.getOperand(1) ||
1501 !SI.isSameOperationAs(OtherStore))
1502 return false;
1503 } else {
1504 // Otherwise, the other block ended with a conditional branch. If one of the
1505 // destinations is StoreBB, then we have the if/then case.
1506 if (OtherBr->getSuccessor(0) != StoreBB &&
1507 OtherBr->getSuccessor(1) != StoreBB)
1508 return false;
1509
1510 // Okay, we know that OtherBr now goes to Dest and StoreBB, so this is an
1511 // if/then triangle. See if there is a store to the same ptr as SI that
1512 // lives in OtherBB.
1513 for (;; --BBI) {
1514 // Check to see if we find the matching store.
1515 if ((OtherStore = dyn_cast<StoreInst>(BBI))) {
1516 if (OtherStore->getOperand(1) != SI.getOperand(1) ||
1517 !SI.isSameOperationAs(OtherStore))
1518 return false;
1519 break;
1520 }
1521 // If we find something that may be using or overwriting the stored
1522 // value, or if we run out of instructions, we can't do the transform.
1523 if (BBI->mayReadFromMemory() || BBI->mayThrow() ||
1524 BBI->mayWriteToMemory() || BBI == OtherBB->begin())
1525 return false;
1526 }
1527
1528 // In order to eliminate the store in OtherBr, we have to make sure nothing
1529 // reads or overwrites the stored value in StoreBB.
1530 for (BasicBlock::iterator I = StoreBB->begin(); &*I != &SI; ++I) {
1531 // FIXME: This should really be AA driven.
1532 if (I->mayReadFromMemory() || I->mayThrow() || I->mayWriteToMemory())
1533 return false;
1534 }
1535 }
1536
1537 // Insert a PHI node now if we need it.
1538 Value *MergedVal = OtherStore->getOperand(0);
1539 // The debug locations of the original instructions might differ. Merge them.
1540 DebugLoc MergedLoc = DILocation::getMergedLocation(SI.getDebugLoc(),
1541 OtherStore->getDebugLoc());
1542 if (MergedVal != SI.getOperand(0)) {
1543 PHINode *PN = PHINode::Create(MergedVal->getType(), 2, "storemerge");
1544 PN->addIncoming(SI.getOperand(0), SI.getParent());
1545 PN->addIncoming(OtherStore->getOperand(0), OtherBB);
1546 MergedVal = InsertNewInstBefore(PN, DestBB->front());
1547 PN->setDebugLoc(MergedLoc);
1548 }
1549
1550 // Advance to a place where it is safe to insert the new store and insert it.
1551 BBI = DestBB->getFirstInsertionPt();
1552 StoreInst *NewSI =
1553 new StoreInst(MergedVal, SI.getOperand(1), SI.isVolatile(), SI.getAlign(),
1554 SI.getOrdering(), SI.getSyncScopeID());
1555 InsertNewInstBefore(NewSI, *BBI);
1556 NewSI->setDebugLoc(MergedLoc);
1557
1558 // If the two stores had AA tags, merge them.
1559 AAMDNodes AATags = SI.getAAMetadata();
1560 if (AATags)
1561 NewSI->setAAMetadata(AATags.merge(OtherStore->getAAMetadata()));
1562
1563 // Nuke the old stores.
1564 eraseInstFromFunction(SI);
1565 eraseInstFromFunction(*OtherStore);
1566 return true;
1567 }
1568