1 //===- LoadStoreVectorizer.cpp - GPU Load & Store Vectorizer --------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This pass merges loads/stores to/from sequential memory addresses into vector
10 // loads/stores. Although there's nothing GPU-specific in here, this pass is
11 // motivated by the microarchitectural quirks of nVidia and AMD GPUs.
12 //
13 // (For simplicity below we talk about loads only, but everything also applies
14 // to stores.)
15 //
16 // This pass is intended to be run late in the pipeline, after other
17 // vectorization opportunities have been exploited. So the assumption here is
18 // that immediately following our new vector load we'll need to extract out the
19 // individual elements of the load, so we can operate on them individually.
20 //
21 // On CPUs this transformation is usually not beneficial, because extracting the
22 // elements of a vector register is expensive on most architectures. It's
23 // usually better just to load each element individually into its own scalar
24 // register.
25 //
26 // However, nVidia and AMD GPUs don't have proper vector registers. Instead, a
27 // "vector load" loads directly into a series of scalar registers. In effect,
28 // extracting the elements of the vector is free. It's therefore always
29 // beneficial to vectorize a sequence of loads on these architectures.
30 //
31 // Vectorizing (perhaps a better name might be "coalescing") loads can have
32 // large performance impacts on GPU kernels, and opportunities for vectorizing
33 // are common in GPU code. This pass tries very hard to find such
34 // opportunities; its runtime is quadratic in the number of loads in a BB.
35 //
36 // Some CPU architectures, such as ARM, have instructions that load into
37 // multiple scalar registers, similar to a GPU vectorized load. In theory ARM
38 // could use this pass (with some modifications), but currently it implements
39 // its own pass to do something similar to what we do here.
40
41 #include "llvm/Transforms/Vectorize/LoadStoreVectorizer.h"
42 #include "llvm/ADT/APInt.h"
43 #include "llvm/ADT/ArrayRef.h"
44 #include "llvm/ADT/MapVector.h"
45 #include "llvm/ADT/PostOrderIterator.h"
46 #include "llvm/ADT/STLExtras.h"
47 #include "llvm/ADT/SmallPtrSet.h"
48 #include "llvm/ADT/SmallVector.h"
49 #include "llvm/ADT/Statistic.h"
50 #include "llvm/ADT/iterator_range.h"
51 #include "llvm/Analysis/AliasAnalysis.h"
52 #include "llvm/Analysis/MemoryLocation.h"
53 #include "llvm/Analysis/ScalarEvolution.h"
54 #include "llvm/Analysis/TargetTransformInfo.h"
55 #include "llvm/Analysis/ValueTracking.h"
56 #include "llvm/Analysis/VectorUtils.h"
57 #include "llvm/IR/Attributes.h"
58 #include "llvm/IR/BasicBlock.h"
59 #include "llvm/IR/Constants.h"
60 #include "llvm/IR/DataLayout.h"
61 #include "llvm/IR/DerivedTypes.h"
62 #include "llvm/IR/Dominators.h"
63 #include "llvm/IR/Function.h"
64 #include "llvm/IR/IRBuilder.h"
65 #include "llvm/IR/InstrTypes.h"
66 #include "llvm/IR/Instruction.h"
67 #include "llvm/IR/Instructions.h"
68 #include "llvm/IR/IntrinsicInst.h"
69 #include "llvm/IR/Module.h"
70 #include "llvm/IR/Type.h"
71 #include "llvm/IR/User.h"
72 #include "llvm/IR/Value.h"
73 #include "llvm/InitializePasses.h"
74 #include "llvm/Pass.h"
75 #include "llvm/Support/Casting.h"
76 #include "llvm/Support/Debug.h"
77 #include "llvm/Support/KnownBits.h"
78 #include "llvm/Support/MathExtras.h"
79 #include "llvm/Support/raw_ostream.h"
80 #include "llvm/Transforms/Utils/Local.h"
81 #include "llvm/Transforms/Vectorize.h"
82 #include <algorithm>
83 #include <cassert>
84 #include <cstdlib>
85 #include <tuple>
86 #include <utility>
87
88 using namespace llvm;
89
90 #define DEBUG_TYPE "load-store-vectorizer"
91
92 STATISTIC(NumVectorInstructions, "Number of vector accesses generated");
93 STATISTIC(NumScalarsVectorized, "Number of scalar accesses vectorized");
94
95 // FIXME: Assuming stack alignment of 4 is always good enough
96 static const unsigned StackAdjustedAlignment = 4;
97
98 namespace {
99
100 /// ChainID is an arbitrary token that is allowed to be different only for the
101 /// accesses that are guaranteed to be considered non-consecutive by
102 /// Vectorizer::isConsecutiveAccess. It's used for grouping instructions
103 /// together and reducing the number of instructions the main search operates on
104 /// at a time, i.e. this is to reduce compile time and nothing else as the main
105 /// search has O(n^2) time complexity. The underlying type of ChainID should not
106 /// be relied upon.
107 using ChainID = const Value *;
108 using InstrList = SmallVector<Instruction *, 8>;
109 using InstrListMap = MapVector<ChainID, InstrList>;
110
111 class Vectorizer {
112 Function &F;
113 AliasAnalysis &AA;
114 DominatorTree &DT;
115 ScalarEvolution &SE;
116 TargetTransformInfo &TTI;
117 const DataLayout &DL;
118 IRBuilder<> Builder;
119
120 public:
Vectorizer(Function & F,AliasAnalysis & AA,DominatorTree & DT,ScalarEvolution & SE,TargetTransformInfo & TTI)121 Vectorizer(Function &F, AliasAnalysis &AA, DominatorTree &DT,
122 ScalarEvolution &SE, TargetTransformInfo &TTI)
123 : F(F), AA(AA), DT(DT), SE(SE), TTI(TTI),
124 DL(F.getParent()->getDataLayout()), Builder(SE.getContext()) {}
125
126 bool run();
127
128 private:
129 unsigned getPointerAddressSpace(Value *I);
130
131 static const unsigned MaxDepth = 3;
132
133 bool isConsecutiveAccess(Value *A, Value *B);
134 bool areConsecutivePointers(Value *PtrA, Value *PtrB, APInt PtrDelta,
135 unsigned Depth = 0) const;
136 bool lookThroughComplexAddresses(Value *PtrA, Value *PtrB, APInt PtrDelta,
137 unsigned Depth) const;
138 bool lookThroughSelects(Value *PtrA, Value *PtrB, const APInt &PtrDelta,
139 unsigned Depth) const;
140
141 /// After vectorization, reorder the instructions that I depends on
142 /// (the instructions defining its operands), to ensure they dominate I.
143 void reorder(Instruction *I);
144
145 /// Returns the first and the last instructions in Chain.
146 std::pair<BasicBlock::iterator, BasicBlock::iterator>
147 getBoundaryInstrs(ArrayRef<Instruction *> Chain);
148
149 /// Erases the original instructions after vectorizing.
150 void eraseInstructions(ArrayRef<Instruction *> Chain);
151
152 /// "Legalize" the vector type that would be produced by combining \p
153 /// ElementSizeBits elements in \p Chain. Break into two pieces such that the
154 /// total size of each piece is 1, 2 or a multiple of 4 bytes. \p Chain is
155 /// expected to have more than 4 elements.
156 std::pair<ArrayRef<Instruction *>, ArrayRef<Instruction *>>
157 splitOddVectorElts(ArrayRef<Instruction *> Chain, unsigned ElementSizeBits);
158
159 /// Finds the largest prefix of Chain that's vectorizable, checking for
160 /// intervening instructions which may affect the memory accessed by the
161 /// instructions within Chain.
162 ///
163 /// The elements of \p Chain must be all loads or all stores and must be in
164 /// address order.
165 ArrayRef<Instruction *> getVectorizablePrefix(ArrayRef<Instruction *> Chain);
166
167 /// Collects load and store instructions to vectorize.
168 std::pair<InstrListMap, InstrListMap> collectInstructions(BasicBlock *BB);
169
170 /// Processes the collected instructions, the \p Map. The values of \p Map
171 /// should be all loads or all stores.
172 bool vectorizeChains(InstrListMap &Map);
173
174 /// Finds the load/stores to consecutive memory addresses and vectorizes them.
175 bool vectorizeInstructions(ArrayRef<Instruction *> Instrs);
176
177 /// Vectorizes the load instructions in Chain.
178 bool
179 vectorizeLoadChain(ArrayRef<Instruction *> Chain,
180 SmallPtrSet<Instruction *, 16> *InstructionsProcessed);
181
182 /// Vectorizes the store instructions in Chain.
183 bool
184 vectorizeStoreChain(ArrayRef<Instruction *> Chain,
185 SmallPtrSet<Instruction *, 16> *InstructionsProcessed);
186
187 /// Check if this load/store access is misaligned accesses.
188 bool accessIsMisaligned(unsigned SzInBytes, unsigned AddressSpace,
189 unsigned Alignment);
190 };
191
192 class LoadStoreVectorizerLegacyPass : public FunctionPass {
193 public:
194 static char ID;
195
LoadStoreVectorizerLegacyPass()196 LoadStoreVectorizerLegacyPass() : FunctionPass(ID) {
197 initializeLoadStoreVectorizerLegacyPassPass(*PassRegistry::getPassRegistry());
198 }
199
200 bool runOnFunction(Function &F) override;
201
getPassName() const202 StringRef getPassName() const override {
203 return "GPU Load and Store Vectorizer";
204 }
205
getAnalysisUsage(AnalysisUsage & AU) const206 void getAnalysisUsage(AnalysisUsage &AU) const override {
207 AU.addRequired<AAResultsWrapperPass>();
208 AU.addRequired<ScalarEvolutionWrapperPass>();
209 AU.addRequired<DominatorTreeWrapperPass>();
210 AU.addRequired<TargetTransformInfoWrapperPass>();
211 AU.setPreservesCFG();
212 }
213 };
214
215 } // end anonymous namespace
216
217 char LoadStoreVectorizerLegacyPass::ID = 0;
218
219 INITIALIZE_PASS_BEGIN(LoadStoreVectorizerLegacyPass, DEBUG_TYPE,
220 "Vectorize load and Store instructions", false, false)
INITIALIZE_PASS_DEPENDENCY(SCEVAAWrapperPass)221 INITIALIZE_PASS_DEPENDENCY(SCEVAAWrapperPass)
222 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
223 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
224 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)
225 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
226 INITIALIZE_PASS_END(LoadStoreVectorizerLegacyPass, DEBUG_TYPE,
227 "Vectorize load and store instructions", false, false)
228
229 Pass *llvm::createLoadStoreVectorizerPass() {
230 return new LoadStoreVectorizerLegacyPass();
231 }
232
runOnFunction(Function & F)233 bool LoadStoreVectorizerLegacyPass::runOnFunction(Function &F) {
234 // Don't vectorize when the attribute NoImplicitFloat is used.
235 if (skipFunction(F) || F.hasFnAttribute(Attribute::NoImplicitFloat))
236 return false;
237
238 AliasAnalysis &AA = getAnalysis<AAResultsWrapperPass>().getAAResults();
239 DominatorTree &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
240 ScalarEvolution &SE = getAnalysis<ScalarEvolutionWrapperPass>().getSE();
241 TargetTransformInfo &TTI =
242 getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
243
244 Vectorizer V(F, AA, DT, SE, TTI);
245 return V.run();
246 }
247
run(Function & F,FunctionAnalysisManager & AM)248 PreservedAnalyses LoadStoreVectorizerPass::run(Function &F, FunctionAnalysisManager &AM) {
249 // Don't vectorize when the attribute NoImplicitFloat is used.
250 if (F.hasFnAttribute(Attribute::NoImplicitFloat))
251 return PreservedAnalyses::all();
252
253 AliasAnalysis &AA = AM.getResult<AAManager>(F);
254 DominatorTree &DT = AM.getResult<DominatorTreeAnalysis>(F);
255 ScalarEvolution &SE = AM.getResult<ScalarEvolutionAnalysis>(F);
256 TargetTransformInfo &TTI = AM.getResult<TargetIRAnalysis>(F);
257
258 Vectorizer V(F, AA, DT, SE, TTI);
259 bool Changed = V.run();
260 PreservedAnalyses PA;
261 PA.preserveSet<CFGAnalyses>();
262 return Changed ? PA : PreservedAnalyses::all();
263 }
264
265 // The real propagateMetadata expects a SmallVector<Value*>, but we deal in
266 // vectors of Instructions.
propagateMetadata(Instruction * I,ArrayRef<Instruction * > IL)267 static void propagateMetadata(Instruction *I, ArrayRef<Instruction *> IL) {
268 SmallVector<Value *, 8> VL(IL.begin(), IL.end());
269 propagateMetadata(I, VL);
270 }
271
272 // Vectorizer Implementation
run()273 bool Vectorizer::run() {
274 bool Changed = false;
275
276 // Scan the blocks in the function in post order.
277 for (BasicBlock *BB : post_order(&F)) {
278 InstrListMap LoadRefs, StoreRefs;
279 std::tie(LoadRefs, StoreRefs) = collectInstructions(BB);
280 Changed |= vectorizeChains(LoadRefs);
281 Changed |= vectorizeChains(StoreRefs);
282 }
283
284 return Changed;
285 }
286
getPointerAddressSpace(Value * I)287 unsigned Vectorizer::getPointerAddressSpace(Value *I) {
288 if (LoadInst *L = dyn_cast<LoadInst>(I))
289 return L->getPointerAddressSpace();
290 if (StoreInst *S = dyn_cast<StoreInst>(I))
291 return S->getPointerAddressSpace();
292 return -1;
293 }
294
295 // FIXME: Merge with llvm::isConsecutiveAccess
isConsecutiveAccess(Value * A,Value * B)296 bool Vectorizer::isConsecutiveAccess(Value *A, Value *B) {
297 Value *PtrA = getLoadStorePointerOperand(A);
298 Value *PtrB = getLoadStorePointerOperand(B);
299 unsigned ASA = getPointerAddressSpace(A);
300 unsigned ASB = getPointerAddressSpace(B);
301
302 // Check that the address spaces match and that the pointers are valid.
303 if (!PtrA || !PtrB || (ASA != ASB))
304 return false;
305
306 // Make sure that A and B are different pointers of the same size type.
307 Type *PtrATy = PtrA->getType()->getPointerElementType();
308 Type *PtrBTy = PtrB->getType()->getPointerElementType();
309 if (PtrA == PtrB ||
310 PtrATy->isVectorTy() != PtrBTy->isVectorTy() ||
311 DL.getTypeStoreSize(PtrATy) != DL.getTypeStoreSize(PtrBTy) ||
312 DL.getTypeStoreSize(PtrATy->getScalarType()) !=
313 DL.getTypeStoreSize(PtrBTy->getScalarType()))
314 return false;
315
316 unsigned PtrBitWidth = DL.getPointerSizeInBits(ASA);
317 APInt Size(PtrBitWidth, DL.getTypeStoreSize(PtrATy));
318
319 return areConsecutivePointers(PtrA, PtrB, Size);
320 }
321
areConsecutivePointers(Value * PtrA,Value * PtrB,APInt PtrDelta,unsigned Depth) const322 bool Vectorizer::areConsecutivePointers(Value *PtrA, Value *PtrB,
323 APInt PtrDelta, unsigned Depth) const {
324 unsigned PtrBitWidth = DL.getPointerTypeSizeInBits(PtrA->getType());
325 APInt OffsetA(PtrBitWidth, 0);
326 APInt OffsetB(PtrBitWidth, 0);
327 PtrA = PtrA->stripAndAccumulateInBoundsConstantOffsets(DL, OffsetA);
328 PtrB = PtrB->stripAndAccumulateInBoundsConstantOffsets(DL, OffsetB);
329
330 unsigned NewPtrBitWidth = DL.getTypeStoreSizeInBits(PtrA->getType());
331
332 if (NewPtrBitWidth != DL.getTypeStoreSizeInBits(PtrB->getType()))
333 return false;
334
335 // In case if we have to shrink the pointer
336 // stripAndAccumulateInBoundsConstantOffsets should properly handle a
337 // possible overflow and the value should fit into a smallest data type
338 // used in the cast/gep chain.
339 assert(OffsetA.getMinSignedBits() <= NewPtrBitWidth &&
340 OffsetB.getMinSignedBits() <= NewPtrBitWidth);
341
342 OffsetA = OffsetA.sextOrTrunc(NewPtrBitWidth);
343 OffsetB = OffsetB.sextOrTrunc(NewPtrBitWidth);
344 PtrDelta = PtrDelta.sextOrTrunc(NewPtrBitWidth);
345
346 APInt OffsetDelta = OffsetB - OffsetA;
347
348 // Check if they are based on the same pointer. That makes the offsets
349 // sufficient.
350 if (PtrA == PtrB)
351 return OffsetDelta == PtrDelta;
352
353 // Compute the necessary base pointer delta to have the necessary final delta
354 // equal to the pointer delta requested.
355 APInt BaseDelta = PtrDelta - OffsetDelta;
356
357 // Compute the distance with SCEV between the base pointers.
358 const SCEV *PtrSCEVA = SE.getSCEV(PtrA);
359 const SCEV *PtrSCEVB = SE.getSCEV(PtrB);
360 const SCEV *C = SE.getConstant(BaseDelta);
361 const SCEV *X = SE.getAddExpr(PtrSCEVA, C);
362 if (X == PtrSCEVB)
363 return true;
364
365 // The above check will not catch the cases where one of the pointers is
366 // factorized but the other one is not, such as (C + (S * (A + B))) vs
367 // (AS + BS). Get the minus scev. That will allow re-combining the expresions
368 // and getting the simplified difference.
369 const SCEV *Dist = SE.getMinusSCEV(PtrSCEVB, PtrSCEVA);
370 if (C == Dist)
371 return true;
372
373 // Sometimes even this doesn't work, because SCEV can't always see through
374 // patterns that look like (gep (ext (add (shl X, C1), C2))). Try checking
375 // things the hard way.
376 return lookThroughComplexAddresses(PtrA, PtrB, BaseDelta, Depth);
377 }
378
lookThroughComplexAddresses(Value * PtrA,Value * PtrB,APInt PtrDelta,unsigned Depth) const379 bool Vectorizer::lookThroughComplexAddresses(Value *PtrA, Value *PtrB,
380 APInt PtrDelta,
381 unsigned Depth) const {
382 auto *GEPA = dyn_cast<GetElementPtrInst>(PtrA);
383 auto *GEPB = dyn_cast<GetElementPtrInst>(PtrB);
384 if (!GEPA || !GEPB)
385 return lookThroughSelects(PtrA, PtrB, PtrDelta, Depth);
386
387 // Look through GEPs after checking they're the same except for the last
388 // index.
389 if (GEPA->getNumOperands() != GEPB->getNumOperands() ||
390 GEPA->getPointerOperand() != GEPB->getPointerOperand())
391 return false;
392 gep_type_iterator GTIA = gep_type_begin(GEPA);
393 gep_type_iterator GTIB = gep_type_begin(GEPB);
394 for (unsigned I = 0, E = GEPA->getNumIndices() - 1; I < E; ++I) {
395 if (GTIA.getOperand() != GTIB.getOperand())
396 return false;
397 ++GTIA;
398 ++GTIB;
399 }
400
401 Instruction *OpA = dyn_cast<Instruction>(GTIA.getOperand());
402 Instruction *OpB = dyn_cast<Instruction>(GTIB.getOperand());
403 if (!OpA || !OpB || OpA->getOpcode() != OpB->getOpcode() ||
404 OpA->getType() != OpB->getType())
405 return false;
406
407 if (PtrDelta.isNegative()) {
408 if (PtrDelta.isMinSignedValue())
409 return false;
410 PtrDelta.negate();
411 std::swap(OpA, OpB);
412 }
413 uint64_t Stride = DL.getTypeAllocSize(GTIA.getIndexedType());
414 if (PtrDelta.urem(Stride) != 0)
415 return false;
416 unsigned IdxBitWidth = OpA->getType()->getScalarSizeInBits();
417 APInt IdxDiff = PtrDelta.udiv(Stride).zextOrSelf(IdxBitWidth);
418
419 // Only look through a ZExt/SExt.
420 if (!isa<SExtInst>(OpA) && !isa<ZExtInst>(OpA))
421 return false;
422
423 bool Signed = isa<SExtInst>(OpA);
424
425 // At this point A could be a function parameter, i.e. not an instruction
426 Value *ValA = OpA->getOperand(0);
427 OpB = dyn_cast<Instruction>(OpB->getOperand(0));
428 if (!OpB || ValA->getType() != OpB->getType())
429 return false;
430
431 // Now we need to prove that adding IdxDiff to ValA won't overflow.
432 bool Safe = false;
433 auto CheckFlags = [](Instruction *I, bool Signed) {
434 BinaryOperator *BinOpI = cast<BinaryOperator>(I);
435 return (Signed && BinOpI->hasNoSignedWrap()) ||
436 (!Signed && BinOpI->hasNoUnsignedWrap());
437 };
438
439 // First attempt: if OpB is an add with NSW/NUW, and OpB is IdxDiff added to
440 // ValA, we're okay.
441 if (OpB->getOpcode() == Instruction::Add &&
442 isa<ConstantInt>(OpB->getOperand(1)) &&
443 IdxDiff.sle(cast<ConstantInt>(OpB->getOperand(1))->getSExtValue()) &&
444 CheckFlags(OpB, Signed))
445 Safe = true;
446
447 // Second attempt: If both OpA and OpB is an add with NSW/NUW and with
448 // the same LHS operand, we can guarantee that the transformation is safe
449 // if we can prove that OpA won't overflow when IdxDiff added to the RHS
450 // of OpA.
451 // For example:
452 // %tmp7 = add nsw i32 %tmp2, %v0
453 // %tmp8 = sext i32 %tmp7 to i64
454 // ...
455 // %tmp11 = add nsw i32 %v0, 1
456 // %tmp12 = add nsw i32 %tmp2, %tmp11
457 // %tmp13 = sext i32 %tmp12 to i64
458 //
459 // Both %tmp7 and %tmp2 has the nsw flag and the first operand
460 // is %tmp2. It's guaranteed that adding 1 to %tmp7 won't overflow
461 // because %tmp11 adds 1 to %v0 and both %tmp11 and %tmp12 has the
462 // nsw flag.
463 OpA = dyn_cast<Instruction>(ValA);
464 if (!Safe && OpA && OpA->getOpcode() == Instruction::Add &&
465 OpB->getOpcode() == Instruction::Add &&
466 OpA->getOperand(0) == OpB->getOperand(0) && CheckFlags(OpA, Signed) &&
467 CheckFlags(OpB, Signed)) {
468 Value *RHSA = OpA->getOperand(1);
469 Value *RHSB = OpB->getOperand(1);
470 Instruction *OpRHSA = dyn_cast<Instruction>(RHSA);
471 Instruction *OpRHSB = dyn_cast<Instruction>(RHSB);
472 // Match `x +nsw/nuw y` and `x +nsw/nuw (y +nsw/nuw IdxDiff)`.
473 if (OpRHSB && OpRHSB->getOpcode() == Instruction::Add &&
474 CheckFlags(OpRHSB, Signed) && isa<ConstantInt>(OpRHSB->getOperand(1))) {
475 int64_t CstVal = cast<ConstantInt>(OpRHSB->getOperand(1))->getSExtValue();
476 if (OpRHSB->getOperand(0) == RHSA && IdxDiff.getSExtValue() == CstVal)
477 Safe = true;
478 }
479 // Match `x +nsw/nuw (y +nsw/nuw -Idx)` and `x +nsw/nuw (y +nsw/nuw x)`.
480 if (OpRHSA && OpRHSA->getOpcode() == Instruction::Add &&
481 CheckFlags(OpRHSA, Signed) && isa<ConstantInt>(OpRHSA->getOperand(1))) {
482 int64_t CstVal = cast<ConstantInt>(OpRHSA->getOperand(1))->getSExtValue();
483 if (OpRHSA->getOperand(0) == RHSB && IdxDiff.getSExtValue() == -CstVal)
484 Safe = true;
485 }
486 // Match `x +nsw/nuw (y +nsw/nuw c)` and
487 // `x +nsw/nuw (y +nsw/nuw (c + IdxDiff))`.
488 if (OpRHSA && OpRHSB && OpRHSA->getOpcode() == Instruction::Add &&
489 OpRHSB->getOpcode() == Instruction::Add && CheckFlags(OpRHSA, Signed) &&
490 CheckFlags(OpRHSB, Signed) && isa<ConstantInt>(OpRHSA->getOperand(1)) &&
491 isa<ConstantInt>(OpRHSB->getOperand(1))) {
492 int64_t CstValA =
493 cast<ConstantInt>(OpRHSA->getOperand(1))->getSExtValue();
494 int64_t CstValB =
495 cast<ConstantInt>(OpRHSB->getOperand(1))->getSExtValue();
496 if (OpRHSA->getOperand(0) == OpRHSB->getOperand(0) &&
497 IdxDiff.getSExtValue() == (CstValB - CstValA))
498 Safe = true;
499 }
500 }
501
502 unsigned BitWidth = ValA->getType()->getScalarSizeInBits();
503
504 // Third attempt:
505 // If all set bits of IdxDiff or any higher order bit other than the sign bit
506 // are known to be zero in ValA, we can add Diff to it while guaranteeing no
507 // overflow of any sort.
508 if (!Safe) {
509 OpA = dyn_cast<Instruction>(ValA);
510 if (!OpA)
511 return false;
512 KnownBits Known(BitWidth);
513 computeKnownBits(OpA, Known, DL, 0, nullptr, OpA, &DT);
514 APInt BitsAllowedToBeSet = Known.Zero.zext(IdxDiff.getBitWidth());
515 if (Signed)
516 BitsAllowedToBeSet.clearBit(BitWidth - 1);
517 if (BitsAllowedToBeSet.ult(IdxDiff))
518 return false;
519 }
520
521 const SCEV *OffsetSCEVA = SE.getSCEV(ValA);
522 const SCEV *OffsetSCEVB = SE.getSCEV(OpB);
523 const SCEV *C = SE.getConstant(IdxDiff.trunc(BitWidth));
524 const SCEV *X = SE.getAddExpr(OffsetSCEVA, C);
525 return X == OffsetSCEVB;
526 }
527
lookThroughSelects(Value * PtrA,Value * PtrB,const APInt & PtrDelta,unsigned Depth) const528 bool Vectorizer::lookThroughSelects(Value *PtrA, Value *PtrB,
529 const APInt &PtrDelta,
530 unsigned Depth) const {
531 if (Depth++ == MaxDepth)
532 return false;
533
534 if (auto *SelectA = dyn_cast<SelectInst>(PtrA)) {
535 if (auto *SelectB = dyn_cast<SelectInst>(PtrB)) {
536 return SelectA->getCondition() == SelectB->getCondition() &&
537 areConsecutivePointers(SelectA->getTrueValue(),
538 SelectB->getTrueValue(), PtrDelta, Depth) &&
539 areConsecutivePointers(SelectA->getFalseValue(),
540 SelectB->getFalseValue(), PtrDelta, Depth);
541 }
542 }
543 return false;
544 }
545
reorder(Instruction * I)546 void Vectorizer::reorder(Instruction *I) {
547 SmallPtrSet<Instruction *, 16> InstructionsToMove;
548 SmallVector<Instruction *, 16> Worklist;
549
550 Worklist.push_back(I);
551 while (!Worklist.empty()) {
552 Instruction *IW = Worklist.pop_back_val();
553 int NumOperands = IW->getNumOperands();
554 for (int i = 0; i < NumOperands; i++) {
555 Instruction *IM = dyn_cast<Instruction>(IW->getOperand(i));
556 if (!IM || IM->getOpcode() == Instruction::PHI)
557 continue;
558
559 // If IM is in another BB, no need to move it, because this pass only
560 // vectorizes instructions within one BB.
561 if (IM->getParent() != I->getParent())
562 continue;
563
564 if (!IM->comesBefore(I)) {
565 InstructionsToMove.insert(IM);
566 Worklist.push_back(IM);
567 }
568 }
569 }
570
571 // All instructions to move should follow I. Start from I, not from begin().
572 for (auto BBI = I->getIterator(), E = I->getParent()->end(); BBI != E;
573 ++BBI) {
574 if (!InstructionsToMove.count(&*BBI))
575 continue;
576 Instruction *IM = &*BBI;
577 --BBI;
578 IM->removeFromParent();
579 IM->insertBefore(I);
580 }
581 }
582
583 std::pair<BasicBlock::iterator, BasicBlock::iterator>
getBoundaryInstrs(ArrayRef<Instruction * > Chain)584 Vectorizer::getBoundaryInstrs(ArrayRef<Instruction *> Chain) {
585 Instruction *C0 = Chain[0];
586 BasicBlock::iterator FirstInstr = C0->getIterator();
587 BasicBlock::iterator LastInstr = C0->getIterator();
588
589 BasicBlock *BB = C0->getParent();
590 unsigned NumFound = 0;
591 for (Instruction &I : *BB) {
592 if (!is_contained(Chain, &I))
593 continue;
594
595 ++NumFound;
596 if (NumFound == 1) {
597 FirstInstr = I.getIterator();
598 }
599 if (NumFound == Chain.size()) {
600 LastInstr = I.getIterator();
601 break;
602 }
603 }
604
605 // Range is [first, last).
606 return std::make_pair(FirstInstr, ++LastInstr);
607 }
608
eraseInstructions(ArrayRef<Instruction * > Chain)609 void Vectorizer::eraseInstructions(ArrayRef<Instruction *> Chain) {
610 SmallVector<Instruction *, 16> Instrs;
611 for (Instruction *I : Chain) {
612 Value *PtrOperand = getLoadStorePointerOperand(I);
613 assert(PtrOperand && "Instruction must have a pointer operand.");
614 Instrs.push_back(I);
615 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(PtrOperand))
616 Instrs.push_back(GEP);
617 }
618
619 // Erase instructions.
620 for (Instruction *I : Instrs)
621 if (I->use_empty())
622 I->eraseFromParent();
623 }
624
625 std::pair<ArrayRef<Instruction *>, ArrayRef<Instruction *>>
splitOddVectorElts(ArrayRef<Instruction * > Chain,unsigned ElementSizeBits)626 Vectorizer::splitOddVectorElts(ArrayRef<Instruction *> Chain,
627 unsigned ElementSizeBits) {
628 unsigned ElementSizeBytes = ElementSizeBits / 8;
629 unsigned SizeBytes = ElementSizeBytes * Chain.size();
630 unsigned NumLeft = (SizeBytes - (SizeBytes % 4)) / ElementSizeBytes;
631 if (NumLeft == Chain.size()) {
632 if ((NumLeft & 1) == 0)
633 NumLeft /= 2; // Split even in half
634 else
635 --NumLeft; // Split off last element
636 } else if (NumLeft == 0)
637 NumLeft = 1;
638 return std::make_pair(Chain.slice(0, NumLeft), Chain.slice(NumLeft));
639 }
640
641 ArrayRef<Instruction *>
getVectorizablePrefix(ArrayRef<Instruction * > Chain)642 Vectorizer::getVectorizablePrefix(ArrayRef<Instruction *> Chain) {
643 // These are in BB order, unlike Chain, which is in address order.
644 SmallVector<Instruction *, 16> MemoryInstrs;
645 SmallVector<Instruction *, 16> ChainInstrs;
646
647 bool IsLoadChain = isa<LoadInst>(Chain[0]);
648 LLVM_DEBUG({
649 for (Instruction *I : Chain) {
650 if (IsLoadChain)
651 assert(isa<LoadInst>(I) &&
652 "All elements of Chain must be loads, or all must be stores.");
653 else
654 assert(isa<StoreInst>(I) &&
655 "All elements of Chain must be loads, or all must be stores.");
656 }
657 });
658
659 for (Instruction &I : make_range(getBoundaryInstrs(Chain))) {
660 if (isa<LoadInst>(I) || isa<StoreInst>(I)) {
661 if (!is_contained(Chain, &I))
662 MemoryInstrs.push_back(&I);
663 else
664 ChainInstrs.push_back(&I);
665 } else if (isa<IntrinsicInst>(&I) &&
666 cast<IntrinsicInst>(&I)->getIntrinsicID() ==
667 Intrinsic::sideeffect) {
668 // Ignore llvm.sideeffect calls.
669 } else if (IsLoadChain && (I.mayWriteToMemory() || I.mayThrow())) {
670 LLVM_DEBUG(dbgs() << "LSV: Found may-write/throw operation: " << I
671 << '\n');
672 break;
673 } else if (!IsLoadChain && (I.mayReadOrWriteMemory() || I.mayThrow())) {
674 LLVM_DEBUG(dbgs() << "LSV: Found may-read/write/throw operation: " << I
675 << '\n');
676 break;
677 }
678 }
679
680 // Loop until we find an instruction in ChainInstrs that we can't vectorize.
681 unsigned ChainInstrIdx = 0;
682 Instruction *BarrierMemoryInstr = nullptr;
683
684 for (unsigned E = ChainInstrs.size(); ChainInstrIdx < E; ++ChainInstrIdx) {
685 Instruction *ChainInstr = ChainInstrs[ChainInstrIdx];
686
687 // If a barrier memory instruction was found, chain instructions that follow
688 // will not be added to the valid prefix.
689 if (BarrierMemoryInstr && BarrierMemoryInstr->comesBefore(ChainInstr))
690 break;
691
692 // Check (in BB order) if any instruction prevents ChainInstr from being
693 // vectorized. Find and store the first such "conflicting" instruction.
694 for (Instruction *MemInstr : MemoryInstrs) {
695 // If a barrier memory instruction was found, do not check past it.
696 if (BarrierMemoryInstr && BarrierMemoryInstr->comesBefore(MemInstr))
697 break;
698
699 auto *MemLoad = dyn_cast<LoadInst>(MemInstr);
700 auto *ChainLoad = dyn_cast<LoadInst>(ChainInstr);
701 if (MemLoad && ChainLoad)
702 continue;
703
704 // We can ignore the alias if the we have a load store pair and the load
705 // is known to be invariant. The load cannot be clobbered by the store.
706 auto IsInvariantLoad = [](const LoadInst *LI) -> bool {
707 return LI->hasMetadata(LLVMContext::MD_invariant_load);
708 };
709
710 // We can ignore the alias as long as the load comes before the store,
711 // because that means we won't be moving the load past the store to
712 // vectorize it (the vectorized load is inserted at the location of the
713 // first load in the chain).
714 if (isa<StoreInst>(MemInstr) && ChainLoad &&
715 (IsInvariantLoad(ChainLoad) || ChainLoad->comesBefore(MemInstr)))
716 continue;
717
718 // Same case, but in reverse.
719 if (MemLoad && isa<StoreInst>(ChainInstr) &&
720 (IsInvariantLoad(MemLoad) || MemLoad->comesBefore(ChainInstr)))
721 continue;
722
723 if (!AA.isNoAlias(MemoryLocation::get(MemInstr),
724 MemoryLocation::get(ChainInstr))) {
725 LLVM_DEBUG({
726 dbgs() << "LSV: Found alias:\n"
727 " Aliasing instruction and pointer:\n"
728 << " " << *MemInstr << '\n'
729 << " " << *getLoadStorePointerOperand(MemInstr) << '\n'
730 << " Aliased instruction and pointer:\n"
731 << " " << *ChainInstr << '\n'
732 << " " << *getLoadStorePointerOperand(ChainInstr) << '\n';
733 });
734 // Save this aliasing memory instruction as a barrier, but allow other
735 // instructions that precede the barrier to be vectorized with this one.
736 BarrierMemoryInstr = MemInstr;
737 break;
738 }
739 }
740 // Continue the search only for store chains, since vectorizing stores that
741 // precede an aliasing load is valid. Conversely, vectorizing loads is valid
742 // up to an aliasing store, but should not pull loads from further down in
743 // the basic block.
744 if (IsLoadChain && BarrierMemoryInstr) {
745 // The BarrierMemoryInstr is a store that precedes ChainInstr.
746 assert(BarrierMemoryInstr->comesBefore(ChainInstr));
747 break;
748 }
749 }
750
751 // Find the largest prefix of Chain whose elements are all in
752 // ChainInstrs[0, ChainInstrIdx). This is the largest vectorizable prefix of
753 // Chain. (Recall that Chain is in address order, but ChainInstrs is in BB
754 // order.)
755 SmallPtrSet<Instruction *, 8> VectorizableChainInstrs(
756 ChainInstrs.begin(), ChainInstrs.begin() + ChainInstrIdx);
757 unsigned ChainIdx = 0;
758 for (unsigned ChainLen = Chain.size(); ChainIdx < ChainLen; ++ChainIdx) {
759 if (!VectorizableChainInstrs.count(Chain[ChainIdx]))
760 break;
761 }
762 return Chain.slice(0, ChainIdx);
763 }
764
getChainID(const Value * Ptr,const DataLayout & DL)765 static ChainID getChainID(const Value *Ptr, const DataLayout &DL) {
766 const Value *ObjPtr = GetUnderlyingObject(Ptr, DL);
767 if (const auto *Sel = dyn_cast<SelectInst>(ObjPtr)) {
768 // The select's themselves are distinct instructions even if they share the
769 // same condition and evaluate to consecutive pointers for true and false
770 // values of the condition. Therefore using the select's themselves for
771 // grouping instructions would put consecutive accesses into different lists
772 // and they won't be even checked for being consecutive, and won't be
773 // vectorized.
774 return Sel->getCondition();
775 }
776 return ObjPtr;
777 }
778
779 std::pair<InstrListMap, InstrListMap>
collectInstructions(BasicBlock * BB)780 Vectorizer::collectInstructions(BasicBlock *BB) {
781 InstrListMap LoadRefs;
782 InstrListMap StoreRefs;
783
784 for (Instruction &I : *BB) {
785 if (!I.mayReadOrWriteMemory())
786 continue;
787
788 if (LoadInst *LI = dyn_cast<LoadInst>(&I)) {
789 if (!LI->isSimple())
790 continue;
791
792 // Skip if it's not legal.
793 if (!TTI.isLegalToVectorizeLoad(LI))
794 continue;
795
796 Type *Ty = LI->getType();
797 if (!VectorType::isValidElementType(Ty->getScalarType()))
798 continue;
799
800 // Skip weird non-byte sizes. They probably aren't worth the effort of
801 // handling correctly.
802 unsigned TySize = DL.getTypeSizeInBits(Ty);
803 if ((TySize % 8) != 0)
804 continue;
805
806 // Skip vectors of pointers. The vectorizeLoadChain/vectorizeStoreChain
807 // functions are currently using an integer type for the vectorized
808 // load/store, and does not support casting between the integer type and a
809 // vector of pointers (e.g. i64 to <2 x i16*>)
810 if (Ty->isVectorTy() && Ty->isPtrOrPtrVectorTy())
811 continue;
812
813 Value *Ptr = LI->getPointerOperand();
814 unsigned AS = Ptr->getType()->getPointerAddressSpace();
815 unsigned VecRegSize = TTI.getLoadStoreVecRegBitWidth(AS);
816
817 unsigned VF = VecRegSize / TySize;
818 VectorType *VecTy = dyn_cast<VectorType>(Ty);
819
820 // No point in looking at these if they're too big to vectorize.
821 if (TySize > VecRegSize / 2 ||
822 (VecTy && TTI.getLoadVectorFactor(VF, TySize, TySize / 8, VecTy) == 0))
823 continue;
824
825 // Make sure all the users of a vector are constant-index extracts.
826 if (isa<VectorType>(Ty) && !llvm::all_of(LI->users(), [](const User *U) {
827 const ExtractElementInst *EEI = dyn_cast<ExtractElementInst>(U);
828 return EEI && isa<ConstantInt>(EEI->getOperand(1));
829 }))
830 continue;
831
832 // Save the load locations.
833 const ChainID ID = getChainID(Ptr, DL);
834 LoadRefs[ID].push_back(LI);
835 } else if (StoreInst *SI = dyn_cast<StoreInst>(&I)) {
836 if (!SI->isSimple())
837 continue;
838
839 // Skip if it's not legal.
840 if (!TTI.isLegalToVectorizeStore(SI))
841 continue;
842
843 Type *Ty = SI->getValueOperand()->getType();
844 if (!VectorType::isValidElementType(Ty->getScalarType()))
845 continue;
846
847 // Skip vectors of pointers. The vectorizeLoadChain/vectorizeStoreChain
848 // functions are currently using an integer type for the vectorized
849 // load/store, and does not support casting between the integer type and a
850 // vector of pointers (e.g. i64 to <2 x i16*>)
851 if (Ty->isVectorTy() && Ty->isPtrOrPtrVectorTy())
852 continue;
853
854 // Skip weird non-byte sizes. They probably aren't worth the effort of
855 // handling correctly.
856 unsigned TySize = DL.getTypeSizeInBits(Ty);
857 if ((TySize % 8) != 0)
858 continue;
859
860 Value *Ptr = SI->getPointerOperand();
861 unsigned AS = Ptr->getType()->getPointerAddressSpace();
862 unsigned VecRegSize = TTI.getLoadStoreVecRegBitWidth(AS);
863
864 unsigned VF = VecRegSize / TySize;
865 VectorType *VecTy = dyn_cast<VectorType>(Ty);
866
867 // No point in looking at these if they're too big to vectorize.
868 if (TySize > VecRegSize / 2 ||
869 (VecTy && TTI.getStoreVectorFactor(VF, TySize, TySize / 8, VecTy) == 0))
870 continue;
871
872 if (isa<VectorType>(Ty) && !llvm::all_of(SI->users(), [](const User *U) {
873 const ExtractElementInst *EEI = dyn_cast<ExtractElementInst>(U);
874 return EEI && isa<ConstantInt>(EEI->getOperand(1));
875 }))
876 continue;
877
878 // Save store location.
879 const ChainID ID = getChainID(Ptr, DL);
880 StoreRefs[ID].push_back(SI);
881 }
882 }
883
884 return {LoadRefs, StoreRefs};
885 }
886
vectorizeChains(InstrListMap & Map)887 bool Vectorizer::vectorizeChains(InstrListMap &Map) {
888 bool Changed = false;
889
890 for (const std::pair<ChainID, InstrList> &Chain : Map) {
891 unsigned Size = Chain.second.size();
892 if (Size < 2)
893 continue;
894
895 LLVM_DEBUG(dbgs() << "LSV: Analyzing a chain of length " << Size << ".\n");
896
897 // Process the stores in chunks of 64.
898 for (unsigned CI = 0, CE = Size; CI < CE; CI += 64) {
899 unsigned Len = std::min<unsigned>(CE - CI, 64);
900 ArrayRef<Instruction *> Chunk(&Chain.second[CI], Len);
901 Changed |= vectorizeInstructions(Chunk);
902 }
903 }
904
905 return Changed;
906 }
907
vectorizeInstructions(ArrayRef<Instruction * > Instrs)908 bool Vectorizer::vectorizeInstructions(ArrayRef<Instruction *> Instrs) {
909 LLVM_DEBUG(dbgs() << "LSV: Vectorizing " << Instrs.size()
910 << " instructions.\n");
911 SmallVector<int, 16> Heads, Tails;
912 int ConsecutiveChain[64];
913
914 // Do a quadratic search on all of the given loads/stores and find all of the
915 // pairs of loads/stores that follow each other.
916 for (int i = 0, e = Instrs.size(); i < e; ++i) {
917 ConsecutiveChain[i] = -1;
918 for (int j = e - 1; j >= 0; --j) {
919 if (i == j)
920 continue;
921
922 if (isConsecutiveAccess(Instrs[i], Instrs[j])) {
923 if (ConsecutiveChain[i] != -1) {
924 int CurDistance = std::abs(ConsecutiveChain[i] - i);
925 int NewDistance = std::abs(ConsecutiveChain[i] - j);
926 if (j < i || NewDistance > CurDistance)
927 continue; // Should not insert.
928 }
929
930 Tails.push_back(j);
931 Heads.push_back(i);
932 ConsecutiveChain[i] = j;
933 }
934 }
935 }
936
937 bool Changed = false;
938 SmallPtrSet<Instruction *, 16> InstructionsProcessed;
939
940 for (int Head : Heads) {
941 if (InstructionsProcessed.count(Instrs[Head]))
942 continue;
943 bool LongerChainExists = false;
944 for (unsigned TIt = 0; TIt < Tails.size(); TIt++)
945 if (Head == Tails[TIt] &&
946 !InstructionsProcessed.count(Instrs[Heads[TIt]])) {
947 LongerChainExists = true;
948 break;
949 }
950 if (LongerChainExists)
951 continue;
952
953 // We found an instr that starts a chain. Now follow the chain and try to
954 // vectorize it.
955 SmallVector<Instruction *, 16> Operands;
956 int I = Head;
957 while (I != -1 && (is_contained(Tails, I) || is_contained(Heads, I))) {
958 if (InstructionsProcessed.count(Instrs[I]))
959 break;
960
961 Operands.push_back(Instrs[I]);
962 I = ConsecutiveChain[I];
963 }
964
965 bool Vectorized = false;
966 if (isa<LoadInst>(*Operands.begin()))
967 Vectorized = vectorizeLoadChain(Operands, &InstructionsProcessed);
968 else
969 Vectorized = vectorizeStoreChain(Operands, &InstructionsProcessed);
970
971 Changed |= Vectorized;
972 }
973
974 return Changed;
975 }
976
vectorizeStoreChain(ArrayRef<Instruction * > Chain,SmallPtrSet<Instruction *,16> * InstructionsProcessed)977 bool Vectorizer::vectorizeStoreChain(
978 ArrayRef<Instruction *> Chain,
979 SmallPtrSet<Instruction *, 16> *InstructionsProcessed) {
980 StoreInst *S0 = cast<StoreInst>(Chain[0]);
981
982 // If the vector has an int element, default to int for the whole store.
983 Type *StoreTy = nullptr;
984 for (Instruction *I : Chain) {
985 StoreTy = cast<StoreInst>(I)->getValueOperand()->getType();
986 if (StoreTy->isIntOrIntVectorTy())
987 break;
988
989 if (StoreTy->isPtrOrPtrVectorTy()) {
990 StoreTy = Type::getIntNTy(F.getParent()->getContext(),
991 DL.getTypeSizeInBits(StoreTy));
992 break;
993 }
994 }
995 assert(StoreTy && "Failed to find store type");
996
997 unsigned Sz = DL.getTypeSizeInBits(StoreTy);
998 unsigned AS = S0->getPointerAddressSpace();
999 unsigned VecRegSize = TTI.getLoadStoreVecRegBitWidth(AS);
1000 unsigned VF = VecRegSize / Sz;
1001 unsigned ChainSize = Chain.size();
1002 Align Alignment = S0->getAlign();
1003
1004 if (!isPowerOf2_32(Sz) || VF < 2 || ChainSize < 2) {
1005 InstructionsProcessed->insert(Chain.begin(), Chain.end());
1006 return false;
1007 }
1008
1009 ArrayRef<Instruction *> NewChain = getVectorizablePrefix(Chain);
1010 if (NewChain.empty()) {
1011 // No vectorization possible.
1012 InstructionsProcessed->insert(Chain.begin(), Chain.end());
1013 return false;
1014 }
1015 if (NewChain.size() == 1) {
1016 // Failed after the first instruction. Discard it and try the smaller chain.
1017 InstructionsProcessed->insert(NewChain.front());
1018 return false;
1019 }
1020
1021 // Update Chain to the valid vectorizable subchain.
1022 Chain = NewChain;
1023 ChainSize = Chain.size();
1024
1025 // Check if it's legal to vectorize this chain. If not, split the chain and
1026 // try again.
1027 unsigned EltSzInBytes = Sz / 8;
1028 unsigned SzInBytes = EltSzInBytes * ChainSize;
1029
1030 VectorType *VecTy;
1031 VectorType *VecStoreTy = dyn_cast<VectorType>(StoreTy);
1032 if (VecStoreTy)
1033 VecTy = FixedVectorType::get(StoreTy->getScalarType(),
1034 Chain.size() * VecStoreTy->getNumElements());
1035 else
1036 VecTy = FixedVectorType::get(StoreTy, Chain.size());
1037
1038 // If it's more than the max vector size or the target has a better
1039 // vector factor, break it into two pieces.
1040 unsigned TargetVF = TTI.getStoreVectorFactor(VF, Sz, SzInBytes, VecTy);
1041 if (ChainSize > VF || (VF != TargetVF && TargetVF < ChainSize)) {
1042 LLVM_DEBUG(dbgs() << "LSV: Chain doesn't match with the vector factor."
1043 " Creating two separate arrays.\n");
1044 return vectorizeStoreChain(Chain.slice(0, TargetVF),
1045 InstructionsProcessed) |
1046 vectorizeStoreChain(Chain.slice(TargetVF), InstructionsProcessed);
1047 }
1048
1049 LLVM_DEBUG({
1050 dbgs() << "LSV: Stores to vectorize:\n";
1051 for (Instruction *I : Chain)
1052 dbgs() << " " << *I << "\n";
1053 });
1054
1055 // We won't try again to vectorize the elements of the chain, regardless of
1056 // whether we succeed below.
1057 InstructionsProcessed->insert(Chain.begin(), Chain.end());
1058
1059 // If the store is going to be misaligned, don't vectorize it.
1060 if (accessIsMisaligned(SzInBytes, AS, Alignment.value())) {
1061 if (S0->getPointerAddressSpace() != DL.getAllocaAddrSpace()) {
1062 auto Chains = splitOddVectorElts(Chain, Sz);
1063 return vectorizeStoreChain(Chains.first, InstructionsProcessed) |
1064 vectorizeStoreChain(Chains.second, InstructionsProcessed);
1065 }
1066
1067 Align NewAlign = getOrEnforceKnownAlignment(S0->getPointerOperand(),
1068 Align(StackAdjustedAlignment),
1069 DL, S0, nullptr, &DT);
1070 if (NewAlign >= Alignment)
1071 Alignment = NewAlign;
1072 else
1073 return false;
1074 }
1075
1076 if (!TTI.isLegalToVectorizeStoreChain(SzInBytes, Alignment, AS)) {
1077 auto Chains = splitOddVectorElts(Chain, Sz);
1078 return vectorizeStoreChain(Chains.first, InstructionsProcessed) |
1079 vectorizeStoreChain(Chains.second, InstructionsProcessed);
1080 }
1081
1082 BasicBlock::iterator First, Last;
1083 std::tie(First, Last) = getBoundaryInstrs(Chain);
1084 Builder.SetInsertPoint(&*Last);
1085
1086 Value *Vec = UndefValue::get(VecTy);
1087
1088 if (VecStoreTy) {
1089 unsigned VecWidth = VecStoreTy->getNumElements();
1090 for (unsigned I = 0, E = Chain.size(); I != E; ++I) {
1091 StoreInst *Store = cast<StoreInst>(Chain[I]);
1092 for (unsigned J = 0, NE = VecStoreTy->getNumElements(); J != NE; ++J) {
1093 unsigned NewIdx = J + I * VecWidth;
1094 Value *Extract = Builder.CreateExtractElement(Store->getValueOperand(),
1095 Builder.getInt32(J));
1096 if (Extract->getType() != StoreTy->getScalarType())
1097 Extract = Builder.CreateBitCast(Extract, StoreTy->getScalarType());
1098
1099 Value *Insert =
1100 Builder.CreateInsertElement(Vec, Extract, Builder.getInt32(NewIdx));
1101 Vec = Insert;
1102 }
1103 }
1104 } else {
1105 for (unsigned I = 0, E = Chain.size(); I != E; ++I) {
1106 StoreInst *Store = cast<StoreInst>(Chain[I]);
1107 Value *Extract = Store->getValueOperand();
1108 if (Extract->getType() != StoreTy->getScalarType())
1109 Extract =
1110 Builder.CreateBitOrPointerCast(Extract, StoreTy->getScalarType());
1111
1112 Value *Insert =
1113 Builder.CreateInsertElement(Vec, Extract, Builder.getInt32(I));
1114 Vec = Insert;
1115 }
1116 }
1117
1118 StoreInst *SI = Builder.CreateAlignedStore(
1119 Vec,
1120 Builder.CreateBitCast(S0->getPointerOperand(), VecTy->getPointerTo(AS)),
1121 Alignment);
1122 propagateMetadata(SI, Chain);
1123
1124 eraseInstructions(Chain);
1125 ++NumVectorInstructions;
1126 NumScalarsVectorized += Chain.size();
1127 return true;
1128 }
1129
vectorizeLoadChain(ArrayRef<Instruction * > Chain,SmallPtrSet<Instruction *,16> * InstructionsProcessed)1130 bool Vectorizer::vectorizeLoadChain(
1131 ArrayRef<Instruction *> Chain,
1132 SmallPtrSet<Instruction *, 16> *InstructionsProcessed) {
1133 LoadInst *L0 = cast<LoadInst>(Chain[0]);
1134
1135 // If the vector has an int element, default to int for the whole load.
1136 Type *LoadTy = nullptr;
1137 for (const auto &V : Chain) {
1138 LoadTy = cast<LoadInst>(V)->getType();
1139 if (LoadTy->isIntOrIntVectorTy())
1140 break;
1141
1142 if (LoadTy->isPtrOrPtrVectorTy()) {
1143 LoadTy = Type::getIntNTy(F.getParent()->getContext(),
1144 DL.getTypeSizeInBits(LoadTy));
1145 break;
1146 }
1147 }
1148 assert(LoadTy && "Can't determine LoadInst type from chain");
1149
1150 unsigned Sz = DL.getTypeSizeInBits(LoadTy);
1151 unsigned AS = L0->getPointerAddressSpace();
1152 unsigned VecRegSize = TTI.getLoadStoreVecRegBitWidth(AS);
1153 unsigned VF = VecRegSize / Sz;
1154 unsigned ChainSize = Chain.size();
1155 Align Alignment = L0->getAlign();
1156
1157 if (!isPowerOf2_32(Sz) || VF < 2 || ChainSize < 2) {
1158 InstructionsProcessed->insert(Chain.begin(), Chain.end());
1159 return false;
1160 }
1161
1162 ArrayRef<Instruction *> NewChain = getVectorizablePrefix(Chain);
1163 if (NewChain.empty()) {
1164 // No vectorization possible.
1165 InstructionsProcessed->insert(Chain.begin(), Chain.end());
1166 return false;
1167 }
1168 if (NewChain.size() == 1) {
1169 // Failed after the first instruction. Discard it and try the smaller chain.
1170 InstructionsProcessed->insert(NewChain.front());
1171 return false;
1172 }
1173
1174 // Update Chain to the valid vectorizable subchain.
1175 Chain = NewChain;
1176 ChainSize = Chain.size();
1177
1178 // Check if it's legal to vectorize this chain. If not, split the chain and
1179 // try again.
1180 unsigned EltSzInBytes = Sz / 8;
1181 unsigned SzInBytes = EltSzInBytes * ChainSize;
1182 VectorType *VecTy;
1183 VectorType *VecLoadTy = dyn_cast<VectorType>(LoadTy);
1184 if (VecLoadTy)
1185 VecTy = FixedVectorType::get(LoadTy->getScalarType(),
1186 Chain.size() * VecLoadTy->getNumElements());
1187 else
1188 VecTy = FixedVectorType::get(LoadTy, Chain.size());
1189
1190 // If it's more than the max vector size or the target has a better
1191 // vector factor, break it into two pieces.
1192 unsigned TargetVF = TTI.getLoadVectorFactor(VF, Sz, SzInBytes, VecTy);
1193 if (ChainSize > VF || (VF != TargetVF && TargetVF < ChainSize)) {
1194 LLVM_DEBUG(dbgs() << "LSV: Chain doesn't match with the vector factor."
1195 " Creating two separate arrays.\n");
1196 return vectorizeLoadChain(Chain.slice(0, TargetVF), InstructionsProcessed) |
1197 vectorizeLoadChain(Chain.slice(TargetVF), InstructionsProcessed);
1198 }
1199
1200 // We won't try again to vectorize the elements of the chain, regardless of
1201 // whether we succeed below.
1202 InstructionsProcessed->insert(Chain.begin(), Chain.end());
1203
1204 // If the load is going to be misaligned, don't vectorize it.
1205 if (accessIsMisaligned(SzInBytes, AS, Alignment.value())) {
1206 if (L0->getPointerAddressSpace() != DL.getAllocaAddrSpace()) {
1207 auto Chains = splitOddVectorElts(Chain, Sz);
1208 return vectorizeLoadChain(Chains.first, InstructionsProcessed) |
1209 vectorizeLoadChain(Chains.second, InstructionsProcessed);
1210 }
1211
1212 Align NewAlign = getOrEnforceKnownAlignment(L0->getPointerOperand(),
1213 Align(StackAdjustedAlignment),
1214 DL, L0, nullptr, &DT);
1215 if (NewAlign >= Alignment)
1216 Alignment = NewAlign;
1217 else
1218 return false;
1219 }
1220
1221 if (!TTI.isLegalToVectorizeLoadChain(SzInBytes, Alignment, AS)) {
1222 auto Chains = splitOddVectorElts(Chain, Sz);
1223 return vectorizeLoadChain(Chains.first, InstructionsProcessed) |
1224 vectorizeLoadChain(Chains.second, InstructionsProcessed);
1225 }
1226
1227 LLVM_DEBUG({
1228 dbgs() << "LSV: Loads to vectorize:\n";
1229 for (Instruction *I : Chain)
1230 I->dump();
1231 });
1232
1233 // getVectorizablePrefix already computed getBoundaryInstrs. The value of
1234 // Last may have changed since then, but the value of First won't have. If it
1235 // matters, we could compute getBoundaryInstrs only once and reuse it here.
1236 BasicBlock::iterator First, Last;
1237 std::tie(First, Last) = getBoundaryInstrs(Chain);
1238 Builder.SetInsertPoint(&*First);
1239
1240 Value *Bitcast =
1241 Builder.CreateBitCast(L0->getPointerOperand(), VecTy->getPointerTo(AS));
1242 LoadInst *LI =
1243 Builder.CreateAlignedLoad(VecTy, Bitcast, MaybeAlign(Alignment));
1244 propagateMetadata(LI, Chain);
1245
1246 if (VecLoadTy) {
1247 SmallVector<Instruction *, 16> InstrsToErase;
1248
1249 unsigned VecWidth = VecLoadTy->getNumElements();
1250 for (unsigned I = 0, E = Chain.size(); I != E; ++I) {
1251 for (auto Use : Chain[I]->users()) {
1252 // All users of vector loads are ExtractElement instructions with
1253 // constant indices, otherwise we would have bailed before now.
1254 Instruction *UI = cast<Instruction>(Use);
1255 unsigned Idx = cast<ConstantInt>(UI->getOperand(1))->getZExtValue();
1256 unsigned NewIdx = Idx + I * VecWidth;
1257 Value *V = Builder.CreateExtractElement(LI, Builder.getInt32(NewIdx),
1258 UI->getName());
1259 if (V->getType() != UI->getType())
1260 V = Builder.CreateBitCast(V, UI->getType());
1261
1262 // Replace the old instruction.
1263 UI->replaceAllUsesWith(V);
1264 InstrsToErase.push_back(UI);
1265 }
1266 }
1267
1268 // Bitcast might not be an Instruction, if the value being loaded is a
1269 // constant. In that case, no need to reorder anything.
1270 if (Instruction *BitcastInst = dyn_cast<Instruction>(Bitcast))
1271 reorder(BitcastInst);
1272
1273 for (auto I : InstrsToErase)
1274 I->eraseFromParent();
1275 } else {
1276 for (unsigned I = 0, E = Chain.size(); I != E; ++I) {
1277 Value *CV = Chain[I];
1278 Value *V =
1279 Builder.CreateExtractElement(LI, Builder.getInt32(I), CV->getName());
1280 if (V->getType() != CV->getType()) {
1281 V = Builder.CreateBitOrPointerCast(V, CV->getType());
1282 }
1283
1284 // Replace the old instruction.
1285 CV->replaceAllUsesWith(V);
1286 }
1287
1288 if (Instruction *BitcastInst = dyn_cast<Instruction>(Bitcast))
1289 reorder(BitcastInst);
1290 }
1291
1292 eraseInstructions(Chain);
1293
1294 ++NumVectorInstructions;
1295 NumScalarsVectorized += Chain.size();
1296 return true;
1297 }
1298
accessIsMisaligned(unsigned SzInBytes,unsigned AddressSpace,unsigned Alignment)1299 bool Vectorizer::accessIsMisaligned(unsigned SzInBytes, unsigned AddressSpace,
1300 unsigned Alignment) {
1301 if (Alignment % SzInBytes == 0)
1302 return false;
1303
1304 bool Fast = false;
1305 bool Allows = TTI.allowsMisalignedMemoryAccesses(F.getParent()->getContext(),
1306 SzInBytes * 8, AddressSpace,
1307 Alignment, &Fast);
1308 LLVM_DEBUG(dbgs() << "LSV: Target said misaligned is allowed? " << Allows
1309 << " and fast? " << Fast << "\n";);
1310 return !Allows || !Fast;
1311 }
1312