1 //===- LoadStoreVectorizer.cpp - GPU Load & Store Vectorizer --------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This pass merges loads/stores to/from sequential memory addresses into vector 10 // loads/stores. Although there's nothing GPU-specific in here, this pass is 11 // motivated by the microarchitectural quirks of nVidia and AMD GPUs. 12 // 13 // (For simplicity below we talk about loads only, but everything also applies 14 // to stores.) 15 // 16 // This pass is intended to be run late in the pipeline, after other 17 // vectorization opportunities have been exploited. So the assumption here is 18 // that immediately following our new vector load we'll need to extract out the 19 // individual elements of the load, so we can operate on them individually. 20 // 21 // On CPUs this transformation is usually not beneficial, because extracting the 22 // elements of a vector register is expensive on most architectures. It's 23 // usually better just to load each element individually into its own scalar 24 // register. 25 // 26 // However, nVidia and AMD GPUs don't have proper vector registers. Instead, a 27 // "vector load" loads directly into a series of scalar registers. In effect, 28 // extracting the elements of the vector is free. It's therefore always 29 // beneficial to vectorize a sequence of loads on these architectures. 30 // 31 // Vectorizing (perhaps a better name might be "coalescing") loads can have 32 // large performance impacts on GPU kernels, and opportunities for vectorizing 33 // are common in GPU code. This pass tries very hard to find such 34 // opportunities; its runtime is quadratic in the number of loads in a BB. 35 // 36 // Some CPU architectures, such as ARM, have instructions that load into 37 // multiple scalar registers, similar to a GPU vectorized load. In theory ARM 38 // could use this pass (with some modifications), but currently it implements 39 // its own pass to do something similar to what we do here. 40 41 #include "llvm/Transforms/Vectorize/LoadStoreVectorizer.h" 42 #include "llvm/ADT/APInt.h" 43 #include "llvm/ADT/ArrayRef.h" 44 #include "llvm/ADT/MapVector.h" 45 #include "llvm/ADT/PostOrderIterator.h" 46 #include "llvm/ADT/STLExtras.h" 47 #include "llvm/ADT/SmallPtrSet.h" 48 #include "llvm/ADT/SmallVector.h" 49 #include "llvm/ADT/Statistic.h" 50 #include "llvm/ADT/iterator_range.h" 51 #include "llvm/Analysis/AliasAnalysis.h" 52 #include "llvm/Analysis/MemoryLocation.h" 53 #include "llvm/Analysis/ScalarEvolution.h" 54 #include "llvm/Analysis/TargetTransformInfo.h" 55 #include "llvm/Analysis/ValueTracking.h" 56 #include "llvm/Analysis/VectorUtils.h" 57 #include "llvm/IR/Attributes.h" 58 #include "llvm/IR/BasicBlock.h" 59 #include "llvm/IR/Constants.h" 60 #include "llvm/IR/DataLayout.h" 61 #include "llvm/IR/DerivedTypes.h" 62 #include "llvm/IR/Dominators.h" 63 #include "llvm/IR/Function.h" 64 #include "llvm/IR/IRBuilder.h" 65 #include "llvm/IR/InstrTypes.h" 66 #include "llvm/IR/Instruction.h" 67 #include "llvm/IR/Instructions.h" 68 #include "llvm/IR/IntrinsicInst.h" 69 #include "llvm/IR/Module.h" 70 #include "llvm/IR/Type.h" 71 #include "llvm/IR/User.h" 72 #include "llvm/IR/Value.h" 73 #include "llvm/InitializePasses.h" 74 #include "llvm/Pass.h" 75 #include "llvm/Support/Casting.h" 76 #include "llvm/Support/Debug.h" 77 #include "llvm/Support/KnownBits.h" 78 #include "llvm/Support/MathExtras.h" 79 #include "llvm/Support/raw_ostream.h" 80 #include "llvm/Transforms/Utils/Local.h" 81 #include "llvm/Transforms/Vectorize.h" 82 #include <algorithm> 83 #include <cassert> 84 #include <cstdlib> 85 #include <tuple> 86 #include <utility> 87 88 using namespace llvm; 89 90 #define DEBUG_TYPE "load-store-vectorizer" 91 92 STATISTIC(NumVectorInstructions, "Number of vector accesses generated"); 93 STATISTIC(NumScalarsVectorized, "Number of scalar accesses vectorized"); 94 95 // FIXME: Assuming stack alignment of 4 is always good enough 96 static const unsigned StackAdjustedAlignment = 4; 97 98 namespace { 99 100 /// ChainID is an arbitrary token that is allowed to be different only for the 101 /// accesses that are guaranteed to be considered non-consecutive by 102 /// Vectorizer::isConsecutiveAccess. It's used for grouping instructions 103 /// together and reducing the number of instructions the main search operates on 104 /// at a time, i.e. this is to reduce compile time and nothing else as the main 105 /// search has O(n^2) time complexity. The underlying type of ChainID should not 106 /// be relied upon. 107 using ChainID = const Value *; 108 using InstrList = SmallVector<Instruction *, 8>; 109 using InstrListMap = MapVector<ChainID, InstrList>; 110 111 class Vectorizer { 112 Function &F; 113 AliasAnalysis &AA; 114 DominatorTree &DT; 115 ScalarEvolution &SE; 116 TargetTransformInfo &TTI; 117 const DataLayout &DL; 118 IRBuilder<> Builder; 119 120 public: 121 Vectorizer(Function &F, AliasAnalysis &AA, DominatorTree &DT, 122 ScalarEvolution &SE, TargetTransformInfo &TTI) 123 : F(F), AA(AA), DT(DT), SE(SE), TTI(TTI), 124 DL(F.getParent()->getDataLayout()), Builder(SE.getContext()) {} 125 126 bool run(); 127 128 private: 129 unsigned getPointerAddressSpace(Value *I); 130 131 static const unsigned MaxDepth = 3; 132 133 bool isConsecutiveAccess(Value *A, Value *B); 134 bool areConsecutivePointers(Value *PtrA, Value *PtrB, APInt PtrDelta, 135 unsigned Depth = 0) const; 136 bool lookThroughComplexAddresses(Value *PtrA, Value *PtrB, APInt PtrDelta, 137 unsigned Depth) const; 138 bool lookThroughSelects(Value *PtrA, Value *PtrB, const APInt &PtrDelta, 139 unsigned Depth) const; 140 141 /// After vectorization, reorder the instructions that I depends on 142 /// (the instructions defining its operands), to ensure they dominate I. 143 void reorder(Instruction *I); 144 145 /// Returns the first and the last instructions in Chain. 146 std::pair<BasicBlock::iterator, BasicBlock::iterator> 147 getBoundaryInstrs(ArrayRef<Instruction *> Chain); 148 149 /// Erases the original instructions after vectorizing. 150 void eraseInstructions(ArrayRef<Instruction *> Chain); 151 152 /// "Legalize" the vector type that would be produced by combining \p 153 /// ElementSizeBits elements in \p Chain. Break into two pieces such that the 154 /// total size of each piece is 1, 2 or a multiple of 4 bytes. \p Chain is 155 /// expected to have more than 4 elements. 156 std::pair<ArrayRef<Instruction *>, ArrayRef<Instruction *>> 157 splitOddVectorElts(ArrayRef<Instruction *> Chain, unsigned ElementSizeBits); 158 159 /// Finds the largest prefix of Chain that's vectorizable, checking for 160 /// intervening instructions which may affect the memory accessed by the 161 /// instructions within Chain. 162 /// 163 /// The elements of \p Chain must be all loads or all stores and must be in 164 /// address order. 165 ArrayRef<Instruction *> getVectorizablePrefix(ArrayRef<Instruction *> Chain); 166 167 /// Collects load and store instructions to vectorize. 168 std::pair<InstrListMap, InstrListMap> collectInstructions(BasicBlock *BB); 169 170 /// Processes the collected instructions, the \p Map. The values of \p Map 171 /// should be all loads or all stores. 172 bool vectorizeChains(InstrListMap &Map); 173 174 /// Finds the load/stores to consecutive memory addresses and vectorizes them. 175 bool vectorizeInstructions(ArrayRef<Instruction *> Instrs); 176 177 /// Vectorizes the load instructions in Chain. 178 bool 179 vectorizeLoadChain(ArrayRef<Instruction *> Chain, 180 SmallPtrSet<Instruction *, 16> *InstructionsProcessed); 181 182 /// Vectorizes the store instructions in Chain. 183 bool 184 vectorizeStoreChain(ArrayRef<Instruction *> Chain, 185 SmallPtrSet<Instruction *, 16> *InstructionsProcessed); 186 187 /// Check if this load/store access is misaligned accesses. 188 bool accessIsMisaligned(unsigned SzInBytes, unsigned AddressSpace, 189 unsigned Alignment); 190 }; 191 192 class LoadStoreVectorizerLegacyPass : public FunctionPass { 193 public: 194 static char ID; 195 196 LoadStoreVectorizerLegacyPass() : FunctionPass(ID) { 197 initializeLoadStoreVectorizerLegacyPassPass(*PassRegistry::getPassRegistry()); 198 } 199 200 bool runOnFunction(Function &F) override; 201 202 StringRef getPassName() const override { 203 return "GPU Load and Store Vectorizer"; 204 } 205 206 void getAnalysisUsage(AnalysisUsage &AU) const override { 207 AU.addRequired<AAResultsWrapperPass>(); 208 AU.addRequired<ScalarEvolutionWrapperPass>(); 209 AU.addRequired<DominatorTreeWrapperPass>(); 210 AU.addRequired<TargetTransformInfoWrapperPass>(); 211 AU.setPreservesCFG(); 212 } 213 }; 214 215 } // end anonymous namespace 216 217 char LoadStoreVectorizerLegacyPass::ID = 0; 218 219 INITIALIZE_PASS_BEGIN(LoadStoreVectorizerLegacyPass, DEBUG_TYPE, 220 "Vectorize load and Store instructions", false, false) 221 INITIALIZE_PASS_DEPENDENCY(SCEVAAWrapperPass) 222 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 223 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 224 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass) 225 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 226 INITIALIZE_PASS_END(LoadStoreVectorizerLegacyPass, DEBUG_TYPE, 227 "Vectorize load and store instructions", false, false) 228 229 Pass *llvm::createLoadStoreVectorizerPass() { 230 return new LoadStoreVectorizerLegacyPass(); 231 } 232 233 bool LoadStoreVectorizerLegacyPass::runOnFunction(Function &F) { 234 // Don't vectorize when the attribute NoImplicitFloat is used. 235 if (skipFunction(F) || F.hasFnAttribute(Attribute::NoImplicitFloat)) 236 return false; 237 238 AliasAnalysis &AA = getAnalysis<AAResultsWrapperPass>().getAAResults(); 239 DominatorTree &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 240 ScalarEvolution &SE = getAnalysis<ScalarEvolutionWrapperPass>().getSE(); 241 TargetTransformInfo &TTI = 242 getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 243 244 Vectorizer V(F, AA, DT, SE, TTI); 245 return V.run(); 246 } 247 248 PreservedAnalyses LoadStoreVectorizerPass::run(Function &F, FunctionAnalysisManager &AM) { 249 // Don't vectorize when the attribute NoImplicitFloat is used. 250 if (F.hasFnAttribute(Attribute::NoImplicitFloat)) 251 return PreservedAnalyses::all(); 252 253 AliasAnalysis &AA = AM.getResult<AAManager>(F); 254 DominatorTree &DT = AM.getResult<DominatorTreeAnalysis>(F); 255 ScalarEvolution &SE = AM.getResult<ScalarEvolutionAnalysis>(F); 256 TargetTransformInfo &TTI = AM.getResult<TargetIRAnalysis>(F); 257 258 Vectorizer V(F, AA, DT, SE, TTI); 259 bool Changed = V.run(); 260 PreservedAnalyses PA; 261 PA.preserveSet<CFGAnalyses>(); 262 return Changed ? PA : PreservedAnalyses::all(); 263 } 264 265 // The real propagateMetadata expects a SmallVector<Value*>, but we deal in 266 // vectors of Instructions. 267 static void propagateMetadata(Instruction *I, ArrayRef<Instruction *> IL) { 268 SmallVector<Value *, 8> VL(IL.begin(), IL.end()); 269 propagateMetadata(I, VL); 270 } 271 272 // Vectorizer Implementation 273 bool Vectorizer::run() { 274 bool Changed = false; 275 276 // Scan the blocks in the function in post order. 277 for (BasicBlock *BB : post_order(&F)) { 278 InstrListMap LoadRefs, StoreRefs; 279 std::tie(LoadRefs, StoreRefs) = collectInstructions(BB); 280 Changed |= vectorizeChains(LoadRefs); 281 Changed |= vectorizeChains(StoreRefs); 282 } 283 284 return Changed; 285 } 286 287 unsigned Vectorizer::getPointerAddressSpace(Value *I) { 288 if (LoadInst *L = dyn_cast<LoadInst>(I)) 289 return L->getPointerAddressSpace(); 290 if (StoreInst *S = dyn_cast<StoreInst>(I)) 291 return S->getPointerAddressSpace(); 292 return -1; 293 } 294 295 // FIXME: Merge with llvm::isConsecutiveAccess 296 bool Vectorizer::isConsecutiveAccess(Value *A, Value *B) { 297 Value *PtrA = getLoadStorePointerOperand(A); 298 Value *PtrB = getLoadStorePointerOperand(B); 299 unsigned ASA = getPointerAddressSpace(A); 300 unsigned ASB = getPointerAddressSpace(B); 301 302 // Check that the address spaces match and that the pointers are valid. 303 if (!PtrA || !PtrB || (ASA != ASB)) 304 return false; 305 306 // Make sure that A and B are different pointers of the same size type. 307 Type *PtrATy = PtrA->getType()->getPointerElementType(); 308 Type *PtrBTy = PtrB->getType()->getPointerElementType(); 309 if (PtrA == PtrB || 310 PtrATy->isVectorTy() != PtrBTy->isVectorTy() || 311 DL.getTypeStoreSize(PtrATy) != DL.getTypeStoreSize(PtrBTy) || 312 DL.getTypeStoreSize(PtrATy->getScalarType()) != 313 DL.getTypeStoreSize(PtrBTy->getScalarType())) 314 return false; 315 316 unsigned PtrBitWidth = DL.getPointerSizeInBits(ASA); 317 APInt Size(PtrBitWidth, DL.getTypeStoreSize(PtrATy)); 318 319 return areConsecutivePointers(PtrA, PtrB, Size); 320 } 321 322 bool Vectorizer::areConsecutivePointers(Value *PtrA, Value *PtrB, 323 APInt PtrDelta, unsigned Depth) const { 324 unsigned PtrBitWidth = DL.getPointerTypeSizeInBits(PtrA->getType()); 325 APInt OffsetA(PtrBitWidth, 0); 326 APInt OffsetB(PtrBitWidth, 0); 327 PtrA = PtrA->stripAndAccumulateInBoundsConstantOffsets(DL, OffsetA); 328 PtrB = PtrB->stripAndAccumulateInBoundsConstantOffsets(DL, OffsetB); 329 330 unsigned NewPtrBitWidth = DL.getTypeStoreSizeInBits(PtrA->getType()); 331 332 if (NewPtrBitWidth != DL.getTypeStoreSizeInBits(PtrB->getType())) 333 return false; 334 335 // In case if we have to shrink the pointer 336 // stripAndAccumulateInBoundsConstantOffsets should properly handle a 337 // possible overflow and the value should fit into a smallest data type 338 // used in the cast/gep chain. 339 assert(OffsetA.getMinSignedBits() <= NewPtrBitWidth && 340 OffsetB.getMinSignedBits() <= NewPtrBitWidth); 341 342 OffsetA = OffsetA.sextOrTrunc(NewPtrBitWidth); 343 OffsetB = OffsetB.sextOrTrunc(NewPtrBitWidth); 344 PtrDelta = PtrDelta.sextOrTrunc(NewPtrBitWidth); 345 346 APInt OffsetDelta = OffsetB - OffsetA; 347 348 // Check if they are based on the same pointer. That makes the offsets 349 // sufficient. 350 if (PtrA == PtrB) 351 return OffsetDelta == PtrDelta; 352 353 // Compute the necessary base pointer delta to have the necessary final delta 354 // equal to the pointer delta requested. 355 APInt BaseDelta = PtrDelta - OffsetDelta; 356 357 // Compute the distance with SCEV between the base pointers. 358 const SCEV *PtrSCEVA = SE.getSCEV(PtrA); 359 const SCEV *PtrSCEVB = SE.getSCEV(PtrB); 360 const SCEV *C = SE.getConstant(BaseDelta); 361 const SCEV *X = SE.getAddExpr(PtrSCEVA, C); 362 if (X == PtrSCEVB) 363 return true; 364 365 // The above check will not catch the cases where one of the pointers is 366 // factorized but the other one is not, such as (C + (S * (A + B))) vs 367 // (AS + BS). Get the minus scev. That will allow re-combining the expresions 368 // and getting the simplified difference. 369 const SCEV *Dist = SE.getMinusSCEV(PtrSCEVB, PtrSCEVA); 370 if (C == Dist) 371 return true; 372 373 // Sometimes even this doesn't work, because SCEV can't always see through 374 // patterns that look like (gep (ext (add (shl X, C1), C2))). Try checking 375 // things the hard way. 376 return lookThroughComplexAddresses(PtrA, PtrB, BaseDelta, Depth); 377 } 378 379 bool Vectorizer::lookThroughComplexAddresses(Value *PtrA, Value *PtrB, 380 APInt PtrDelta, 381 unsigned Depth) const { 382 auto *GEPA = dyn_cast<GetElementPtrInst>(PtrA); 383 auto *GEPB = dyn_cast<GetElementPtrInst>(PtrB); 384 if (!GEPA || !GEPB) 385 return lookThroughSelects(PtrA, PtrB, PtrDelta, Depth); 386 387 // Look through GEPs after checking they're the same except for the last 388 // index. 389 if (GEPA->getNumOperands() != GEPB->getNumOperands() || 390 GEPA->getPointerOperand() != GEPB->getPointerOperand()) 391 return false; 392 gep_type_iterator GTIA = gep_type_begin(GEPA); 393 gep_type_iterator GTIB = gep_type_begin(GEPB); 394 for (unsigned I = 0, E = GEPA->getNumIndices() - 1; I < E; ++I) { 395 if (GTIA.getOperand() != GTIB.getOperand()) 396 return false; 397 ++GTIA; 398 ++GTIB; 399 } 400 401 Instruction *OpA = dyn_cast<Instruction>(GTIA.getOperand()); 402 Instruction *OpB = dyn_cast<Instruction>(GTIB.getOperand()); 403 if (!OpA || !OpB || OpA->getOpcode() != OpB->getOpcode() || 404 OpA->getType() != OpB->getType()) 405 return false; 406 407 if (PtrDelta.isNegative()) { 408 if (PtrDelta.isMinSignedValue()) 409 return false; 410 PtrDelta.negate(); 411 std::swap(OpA, OpB); 412 } 413 uint64_t Stride = DL.getTypeAllocSize(GTIA.getIndexedType()); 414 if (PtrDelta.urem(Stride) != 0) 415 return false; 416 unsigned IdxBitWidth = OpA->getType()->getScalarSizeInBits(); 417 APInt IdxDiff = PtrDelta.udiv(Stride).zextOrSelf(IdxBitWidth); 418 419 // Only look through a ZExt/SExt. 420 if (!isa<SExtInst>(OpA) && !isa<ZExtInst>(OpA)) 421 return false; 422 423 bool Signed = isa<SExtInst>(OpA); 424 425 // At this point A could be a function parameter, i.e. not an instruction 426 Value *ValA = OpA->getOperand(0); 427 OpB = dyn_cast<Instruction>(OpB->getOperand(0)); 428 if (!OpB || ValA->getType() != OpB->getType()) 429 return false; 430 431 // Now we need to prove that adding IdxDiff to ValA won't overflow. 432 bool Safe = false; 433 auto CheckFlags = [](Instruction *I, bool Signed) { 434 BinaryOperator *BinOpI = cast<BinaryOperator>(I); 435 return (Signed && BinOpI->hasNoSignedWrap()) || 436 (!Signed && BinOpI->hasNoUnsignedWrap()); 437 }; 438 439 // First attempt: if OpB is an add with NSW/NUW, and OpB is IdxDiff added to 440 // ValA, we're okay. 441 if (OpB->getOpcode() == Instruction::Add && 442 isa<ConstantInt>(OpB->getOperand(1)) && 443 IdxDiff.sle(cast<ConstantInt>(OpB->getOperand(1))->getSExtValue()) && 444 CheckFlags(OpB, Signed)) 445 Safe = true; 446 447 // Second attempt: If both OpA and OpB is an add with NSW/NUW and with 448 // the same LHS operand, we can guarantee that the transformation is safe 449 // if we can prove that OpA won't overflow when IdxDiff added to the RHS 450 // of OpA. 451 // For example: 452 // %tmp7 = add nsw i32 %tmp2, %v0 453 // %tmp8 = sext i32 %tmp7 to i64 454 // ... 455 // %tmp11 = add nsw i32 %v0, 1 456 // %tmp12 = add nsw i32 %tmp2, %tmp11 457 // %tmp13 = sext i32 %tmp12 to i64 458 // 459 // Both %tmp7 and %tmp2 has the nsw flag and the first operand 460 // is %tmp2. It's guaranteed that adding 1 to %tmp7 won't overflow 461 // because %tmp11 adds 1 to %v0 and both %tmp11 and %tmp12 has the 462 // nsw flag. 463 OpA = dyn_cast<Instruction>(ValA); 464 if (!Safe && OpA && OpA->getOpcode() == Instruction::Add && 465 OpB->getOpcode() == Instruction::Add && 466 OpA->getOperand(0) == OpB->getOperand(0) && CheckFlags(OpA, Signed) && 467 CheckFlags(OpB, Signed)) { 468 Value *RHSA = OpA->getOperand(1); 469 Value *RHSB = OpB->getOperand(1); 470 Instruction *OpRHSA = dyn_cast<Instruction>(RHSA); 471 Instruction *OpRHSB = dyn_cast<Instruction>(RHSB); 472 // Match `x +nsw/nuw y` and `x +nsw/nuw (y +nsw/nuw IdxDiff)`. 473 if (OpRHSB && OpRHSB->getOpcode() == Instruction::Add && 474 CheckFlags(OpRHSB, Signed) && isa<ConstantInt>(OpRHSB->getOperand(1))) { 475 int64_t CstVal = cast<ConstantInt>(OpRHSB->getOperand(1))->getSExtValue(); 476 if (OpRHSB->getOperand(0) == RHSA && IdxDiff.getSExtValue() == CstVal) 477 Safe = true; 478 } 479 // Match `x +nsw/nuw (y +nsw/nuw -Idx)` and `x +nsw/nuw (y +nsw/nuw x)`. 480 if (OpRHSA && OpRHSA->getOpcode() == Instruction::Add && 481 CheckFlags(OpRHSA, Signed) && isa<ConstantInt>(OpRHSA->getOperand(1))) { 482 int64_t CstVal = cast<ConstantInt>(OpRHSA->getOperand(1))->getSExtValue(); 483 if (OpRHSA->getOperand(0) == RHSB && IdxDiff.getSExtValue() == -CstVal) 484 Safe = true; 485 } 486 // Match `x +nsw/nuw (y +nsw/nuw c)` and 487 // `x +nsw/nuw (y +nsw/nuw (c + IdxDiff))`. 488 if (OpRHSA && OpRHSB && OpRHSA->getOpcode() == Instruction::Add && 489 OpRHSB->getOpcode() == Instruction::Add && CheckFlags(OpRHSA, Signed) && 490 CheckFlags(OpRHSB, Signed) && isa<ConstantInt>(OpRHSA->getOperand(1)) && 491 isa<ConstantInt>(OpRHSB->getOperand(1))) { 492 int64_t CstValA = 493 cast<ConstantInt>(OpRHSA->getOperand(1))->getSExtValue(); 494 int64_t CstValB = 495 cast<ConstantInt>(OpRHSB->getOperand(1))->getSExtValue(); 496 if (OpRHSA->getOperand(0) == OpRHSB->getOperand(0) && 497 IdxDiff.getSExtValue() == (CstValB - CstValA)) 498 Safe = true; 499 } 500 } 501 502 unsigned BitWidth = ValA->getType()->getScalarSizeInBits(); 503 504 // Third attempt: 505 // If all set bits of IdxDiff or any higher order bit other than the sign bit 506 // are known to be zero in ValA, we can add Diff to it while guaranteeing no 507 // overflow of any sort. 508 if (!Safe) { 509 OpA = dyn_cast<Instruction>(ValA); 510 if (!OpA) 511 return false; 512 KnownBits Known(BitWidth); 513 computeKnownBits(OpA, Known, DL, 0, nullptr, OpA, &DT); 514 APInt BitsAllowedToBeSet = Known.Zero.zext(IdxDiff.getBitWidth()); 515 if (Signed) 516 BitsAllowedToBeSet.clearBit(BitWidth - 1); 517 if (BitsAllowedToBeSet.ult(IdxDiff)) 518 return false; 519 } 520 521 const SCEV *OffsetSCEVA = SE.getSCEV(ValA); 522 const SCEV *OffsetSCEVB = SE.getSCEV(OpB); 523 const SCEV *C = SE.getConstant(IdxDiff.trunc(BitWidth)); 524 const SCEV *X = SE.getAddExpr(OffsetSCEVA, C); 525 return X == OffsetSCEVB; 526 } 527 528 bool Vectorizer::lookThroughSelects(Value *PtrA, Value *PtrB, 529 const APInt &PtrDelta, 530 unsigned Depth) const { 531 if (Depth++ == MaxDepth) 532 return false; 533 534 if (auto *SelectA = dyn_cast<SelectInst>(PtrA)) { 535 if (auto *SelectB = dyn_cast<SelectInst>(PtrB)) { 536 return SelectA->getCondition() == SelectB->getCondition() && 537 areConsecutivePointers(SelectA->getTrueValue(), 538 SelectB->getTrueValue(), PtrDelta, Depth) && 539 areConsecutivePointers(SelectA->getFalseValue(), 540 SelectB->getFalseValue(), PtrDelta, Depth); 541 } 542 } 543 return false; 544 } 545 546 void Vectorizer::reorder(Instruction *I) { 547 SmallPtrSet<Instruction *, 16> InstructionsToMove; 548 SmallVector<Instruction *, 16> Worklist; 549 550 Worklist.push_back(I); 551 while (!Worklist.empty()) { 552 Instruction *IW = Worklist.pop_back_val(); 553 int NumOperands = IW->getNumOperands(); 554 for (int i = 0; i < NumOperands; i++) { 555 Instruction *IM = dyn_cast<Instruction>(IW->getOperand(i)); 556 if (!IM || IM->getOpcode() == Instruction::PHI) 557 continue; 558 559 // If IM is in another BB, no need to move it, because this pass only 560 // vectorizes instructions within one BB. 561 if (IM->getParent() != I->getParent()) 562 continue; 563 564 if (!IM->comesBefore(I)) { 565 InstructionsToMove.insert(IM); 566 Worklist.push_back(IM); 567 } 568 } 569 } 570 571 // All instructions to move should follow I. Start from I, not from begin(). 572 for (auto BBI = I->getIterator(), E = I->getParent()->end(); BBI != E; 573 ++BBI) { 574 if (!InstructionsToMove.count(&*BBI)) 575 continue; 576 Instruction *IM = &*BBI; 577 --BBI; 578 IM->removeFromParent(); 579 IM->insertBefore(I); 580 } 581 } 582 583 std::pair<BasicBlock::iterator, BasicBlock::iterator> 584 Vectorizer::getBoundaryInstrs(ArrayRef<Instruction *> Chain) { 585 Instruction *C0 = Chain[0]; 586 BasicBlock::iterator FirstInstr = C0->getIterator(); 587 BasicBlock::iterator LastInstr = C0->getIterator(); 588 589 BasicBlock *BB = C0->getParent(); 590 unsigned NumFound = 0; 591 for (Instruction &I : *BB) { 592 if (!is_contained(Chain, &I)) 593 continue; 594 595 ++NumFound; 596 if (NumFound == 1) { 597 FirstInstr = I.getIterator(); 598 } 599 if (NumFound == Chain.size()) { 600 LastInstr = I.getIterator(); 601 break; 602 } 603 } 604 605 // Range is [first, last). 606 return std::make_pair(FirstInstr, ++LastInstr); 607 } 608 609 void Vectorizer::eraseInstructions(ArrayRef<Instruction *> Chain) { 610 SmallVector<Instruction *, 16> Instrs; 611 for (Instruction *I : Chain) { 612 Value *PtrOperand = getLoadStorePointerOperand(I); 613 assert(PtrOperand && "Instruction must have a pointer operand."); 614 Instrs.push_back(I); 615 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(PtrOperand)) 616 Instrs.push_back(GEP); 617 } 618 619 // Erase instructions. 620 for (Instruction *I : Instrs) 621 if (I->use_empty()) 622 I->eraseFromParent(); 623 } 624 625 std::pair<ArrayRef<Instruction *>, ArrayRef<Instruction *>> 626 Vectorizer::splitOddVectorElts(ArrayRef<Instruction *> Chain, 627 unsigned ElementSizeBits) { 628 unsigned ElementSizeBytes = ElementSizeBits / 8; 629 unsigned SizeBytes = ElementSizeBytes * Chain.size(); 630 unsigned NumLeft = (SizeBytes - (SizeBytes % 4)) / ElementSizeBytes; 631 if (NumLeft == Chain.size()) { 632 if ((NumLeft & 1) == 0) 633 NumLeft /= 2; // Split even in half 634 else 635 --NumLeft; // Split off last element 636 } else if (NumLeft == 0) 637 NumLeft = 1; 638 return std::make_pair(Chain.slice(0, NumLeft), Chain.slice(NumLeft)); 639 } 640 641 ArrayRef<Instruction *> 642 Vectorizer::getVectorizablePrefix(ArrayRef<Instruction *> Chain) { 643 // These are in BB order, unlike Chain, which is in address order. 644 SmallVector<Instruction *, 16> MemoryInstrs; 645 SmallVector<Instruction *, 16> ChainInstrs; 646 647 bool IsLoadChain = isa<LoadInst>(Chain[0]); 648 LLVM_DEBUG({ 649 for (Instruction *I : Chain) { 650 if (IsLoadChain) 651 assert(isa<LoadInst>(I) && 652 "All elements of Chain must be loads, or all must be stores."); 653 else 654 assert(isa<StoreInst>(I) && 655 "All elements of Chain must be loads, or all must be stores."); 656 } 657 }); 658 659 for (Instruction &I : make_range(getBoundaryInstrs(Chain))) { 660 if (isa<LoadInst>(I) || isa<StoreInst>(I)) { 661 if (!is_contained(Chain, &I)) 662 MemoryInstrs.push_back(&I); 663 else 664 ChainInstrs.push_back(&I); 665 } else if (isa<IntrinsicInst>(&I) && 666 cast<IntrinsicInst>(&I)->getIntrinsicID() == 667 Intrinsic::sideeffect) { 668 // Ignore llvm.sideeffect calls. 669 } else if (isa<IntrinsicInst>(&I) && 670 cast<IntrinsicInst>(&I)->getIntrinsicID() == 671 Intrinsic::pseudoprobe) { 672 // Ignore llvm.pseudoprobe calls. 673 } else if (IsLoadChain && (I.mayWriteToMemory() || I.mayThrow())) { 674 LLVM_DEBUG(dbgs() << "LSV: Found may-write/throw operation: " << I 675 << '\n'); 676 break; 677 } else if (!IsLoadChain && (I.mayReadOrWriteMemory() || I.mayThrow())) { 678 LLVM_DEBUG(dbgs() << "LSV: Found may-read/write/throw operation: " << I 679 << '\n'); 680 break; 681 } 682 } 683 684 // Loop until we find an instruction in ChainInstrs that we can't vectorize. 685 unsigned ChainInstrIdx = 0; 686 Instruction *BarrierMemoryInstr = nullptr; 687 688 for (unsigned E = ChainInstrs.size(); ChainInstrIdx < E; ++ChainInstrIdx) { 689 Instruction *ChainInstr = ChainInstrs[ChainInstrIdx]; 690 691 // If a barrier memory instruction was found, chain instructions that follow 692 // will not be added to the valid prefix. 693 if (BarrierMemoryInstr && BarrierMemoryInstr->comesBefore(ChainInstr)) 694 break; 695 696 // Check (in BB order) if any instruction prevents ChainInstr from being 697 // vectorized. Find and store the first such "conflicting" instruction. 698 for (Instruction *MemInstr : MemoryInstrs) { 699 // If a barrier memory instruction was found, do not check past it. 700 if (BarrierMemoryInstr && BarrierMemoryInstr->comesBefore(MemInstr)) 701 break; 702 703 auto *MemLoad = dyn_cast<LoadInst>(MemInstr); 704 auto *ChainLoad = dyn_cast<LoadInst>(ChainInstr); 705 if (MemLoad && ChainLoad) 706 continue; 707 708 // We can ignore the alias if the we have a load store pair and the load 709 // is known to be invariant. The load cannot be clobbered by the store. 710 auto IsInvariantLoad = [](const LoadInst *LI) -> bool { 711 return LI->hasMetadata(LLVMContext::MD_invariant_load); 712 }; 713 714 // We can ignore the alias as long as the load comes before the store, 715 // because that means we won't be moving the load past the store to 716 // vectorize it (the vectorized load is inserted at the location of the 717 // first load in the chain). 718 if (isa<StoreInst>(MemInstr) && ChainLoad && 719 (IsInvariantLoad(ChainLoad) || ChainLoad->comesBefore(MemInstr))) 720 continue; 721 722 // Same case, but in reverse. 723 if (MemLoad && isa<StoreInst>(ChainInstr) && 724 (IsInvariantLoad(MemLoad) || MemLoad->comesBefore(ChainInstr))) 725 continue; 726 727 if (!AA.isNoAlias(MemoryLocation::get(MemInstr), 728 MemoryLocation::get(ChainInstr))) { 729 LLVM_DEBUG({ 730 dbgs() << "LSV: Found alias:\n" 731 " Aliasing instruction and pointer:\n" 732 << " " << *MemInstr << '\n' 733 << " " << *getLoadStorePointerOperand(MemInstr) << '\n' 734 << " Aliased instruction and pointer:\n" 735 << " " << *ChainInstr << '\n' 736 << " " << *getLoadStorePointerOperand(ChainInstr) << '\n'; 737 }); 738 // Save this aliasing memory instruction as a barrier, but allow other 739 // instructions that precede the barrier to be vectorized with this one. 740 BarrierMemoryInstr = MemInstr; 741 break; 742 } 743 } 744 // Continue the search only for store chains, since vectorizing stores that 745 // precede an aliasing load is valid. Conversely, vectorizing loads is valid 746 // up to an aliasing store, but should not pull loads from further down in 747 // the basic block. 748 if (IsLoadChain && BarrierMemoryInstr) { 749 // The BarrierMemoryInstr is a store that precedes ChainInstr. 750 assert(BarrierMemoryInstr->comesBefore(ChainInstr)); 751 break; 752 } 753 } 754 755 // Find the largest prefix of Chain whose elements are all in 756 // ChainInstrs[0, ChainInstrIdx). This is the largest vectorizable prefix of 757 // Chain. (Recall that Chain is in address order, but ChainInstrs is in BB 758 // order.) 759 SmallPtrSet<Instruction *, 8> VectorizableChainInstrs( 760 ChainInstrs.begin(), ChainInstrs.begin() + ChainInstrIdx); 761 unsigned ChainIdx = 0; 762 for (unsigned ChainLen = Chain.size(); ChainIdx < ChainLen; ++ChainIdx) { 763 if (!VectorizableChainInstrs.count(Chain[ChainIdx])) 764 break; 765 } 766 return Chain.slice(0, ChainIdx); 767 } 768 769 static ChainID getChainID(const Value *Ptr) { 770 const Value *ObjPtr = getUnderlyingObject(Ptr); 771 if (const auto *Sel = dyn_cast<SelectInst>(ObjPtr)) { 772 // The select's themselves are distinct instructions even if they share the 773 // same condition and evaluate to consecutive pointers for true and false 774 // values of the condition. Therefore using the select's themselves for 775 // grouping instructions would put consecutive accesses into different lists 776 // and they won't be even checked for being consecutive, and won't be 777 // vectorized. 778 return Sel->getCondition(); 779 } 780 return ObjPtr; 781 } 782 783 std::pair<InstrListMap, InstrListMap> 784 Vectorizer::collectInstructions(BasicBlock *BB) { 785 InstrListMap LoadRefs; 786 InstrListMap StoreRefs; 787 788 for (Instruction &I : *BB) { 789 if (!I.mayReadOrWriteMemory()) 790 continue; 791 792 if (LoadInst *LI = dyn_cast<LoadInst>(&I)) { 793 if (!LI->isSimple()) 794 continue; 795 796 // Skip if it's not legal. 797 if (!TTI.isLegalToVectorizeLoad(LI)) 798 continue; 799 800 Type *Ty = LI->getType(); 801 if (!VectorType::isValidElementType(Ty->getScalarType())) 802 continue; 803 804 // Skip weird non-byte sizes. They probably aren't worth the effort of 805 // handling correctly. 806 unsigned TySize = DL.getTypeSizeInBits(Ty); 807 if ((TySize % 8) != 0) 808 continue; 809 810 // Skip vectors of pointers. The vectorizeLoadChain/vectorizeStoreChain 811 // functions are currently using an integer type for the vectorized 812 // load/store, and does not support casting between the integer type and a 813 // vector of pointers (e.g. i64 to <2 x i16*>) 814 if (Ty->isVectorTy() && Ty->isPtrOrPtrVectorTy()) 815 continue; 816 817 Value *Ptr = LI->getPointerOperand(); 818 unsigned AS = Ptr->getType()->getPointerAddressSpace(); 819 unsigned VecRegSize = TTI.getLoadStoreVecRegBitWidth(AS); 820 821 unsigned VF = VecRegSize / TySize; 822 VectorType *VecTy = dyn_cast<VectorType>(Ty); 823 824 // No point in looking at these if they're too big to vectorize. 825 if (TySize > VecRegSize / 2 || 826 (VecTy && TTI.getLoadVectorFactor(VF, TySize, TySize / 8, VecTy) == 0)) 827 continue; 828 829 // Make sure all the users of a vector are constant-index extracts. 830 if (isa<VectorType>(Ty) && !llvm::all_of(LI->users(), [](const User *U) { 831 const ExtractElementInst *EEI = dyn_cast<ExtractElementInst>(U); 832 return EEI && isa<ConstantInt>(EEI->getOperand(1)); 833 })) 834 continue; 835 836 // Save the load locations. 837 const ChainID ID = getChainID(Ptr); 838 LoadRefs[ID].push_back(LI); 839 } else if (StoreInst *SI = dyn_cast<StoreInst>(&I)) { 840 if (!SI->isSimple()) 841 continue; 842 843 // Skip if it's not legal. 844 if (!TTI.isLegalToVectorizeStore(SI)) 845 continue; 846 847 Type *Ty = SI->getValueOperand()->getType(); 848 if (!VectorType::isValidElementType(Ty->getScalarType())) 849 continue; 850 851 // Skip vectors of pointers. The vectorizeLoadChain/vectorizeStoreChain 852 // functions are currently using an integer type for the vectorized 853 // load/store, and does not support casting between the integer type and a 854 // vector of pointers (e.g. i64 to <2 x i16*>) 855 if (Ty->isVectorTy() && Ty->isPtrOrPtrVectorTy()) 856 continue; 857 858 // Skip weird non-byte sizes. They probably aren't worth the effort of 859 // handling correctly. 860 unsigned TySize = DL.getTypeSizeInBits(Ty); 861 if ((TySize % 8) != 0) 862 continue; 863 864 Value *Ptr = SI->getPointerOperand(); 865 unsigned AS = Ptr->getType()->getPointerAddressSpace(); 866 unsigned VecRegSize = TTI.getLoadStoreVecRegBitWidth(AS); 867 868 unsigned VF = VecRegSize / TySize; 869 VectorType *VecTy = dyn_cast<VectorType>(Ty); 870 871 // No point in looking at these if they're too big to vectorize. 872 if (TySize > VecRegSize / 2 || 873 (VecTy && TTI.getStoreVectorFactor(VF, TySize, TySize / 8, VecTy) == 0)) 874 continue; 875 876 if (isa<VectorType>(Ty) && !llvm::all_of(SI->users(), [](const User *U) { 877 const ExtractElementInst *EEI = dyn_cast<ExtractElementInst>(U); 878 return EEI && isa<ConstantInt>(EEI->getOperand(1)); 879 })) 880 continue; 881 882 // Save store location. 883 const ChainID ID = getChainID(Ptr); 884 StoreRefs[ID].push_back(SI); 885 } 886 } 887 888 return {LoadRefs, StoreRefs}; 889 } 890 891 bool Vectorizer::vectorizeChains(InstrListMap &Map) { 892 bool Changed = false; 893 894 for (const std::pair<ChainID, InstrList> &Chain : Map) { 895 unsigned Size = Chain.second.size(); 896 if (Size < 2) 897 continue; 898 899 LLVM_DEBUG(dbgs() << "LSV: Analyzing a chain of length " << Size << ".\n"); 900 901 // Process the stores in chunks of 64. 902 for (unsigned CI = 0, CE = Size; CI < CE; CI += 64) { 903 unsigned Len = std::min<unsigned>(CE - CI, 64); 904 ArrayRef<Instruction *> Chunk(&Chain.second[CI], Len); 905 Changed |= vectorizeInstructions(Chunk); 906 } 907 } 908 909 return Changed; 910 } 911 912 bool Vectorizer::vectorizeInstructions(ArrayRef<Instruction *> Instrs) { 913 LLVM_DEBUG(dbgs() << "LSV: Vectorizing " << Instrs.size() 914 << " instructions.\n"); 915 SmallVector<int, 16> Heads, Tails; 916 int ConsecutiveChain[64]; 917 918 // Do a quadratic search on all of the given loads/stores and find all of the 919 // pairs of loads/stores that follow each other. 920 for (int i = 0, e = Instrs.size(); i < e; ++i) { 921 ConsecutiveChain[i] = -1; 922 for (int j = e - 1; j >= 0; --j) { 923 if (i == j) 924 continue; 925 926 if (isConsecutiveAccess(Instrs[i], Instrs[j])) { 927 if (ConsecutiveChain[i] != -1) { 928 int CurDistance = std::abs(ConsecutiveChain[i] - i); 929 int NewDistance = std::abs(ConsecutiveChain[i] - j); 930 if (j < i || NewDistance > CurDistance) 931 continue; // Should not insert. 932 } 933 934 Tails.push_back(j); 935 Heads.push_back(i); 936 ConsecutiveChain[i] = j; 937 } 938 } 939 } 940 941 bool Changed = false; 942 SmallPtrSet<Instruction *, 16> InstructionsProcessed; 943 944 for (int Head : Heads) { 945 if (InstructionsProcessed.count(Instrs[Head])) 946 continue; 947 bool LongerChainExists = false; 948 for (unsigned TIt = 0; TIt < Tails.size(); TIt++) 949 if (Head == Tails[TIt] && 950 !InstructionsProcessed.count(Instrs[Heads[TIt]])) { 951 LongerChainExists = true; 952 break; 953 } 954 if (LongerChainExists) 955 continue; 956 957 // We found an instr that starts a chain. Now follow the chain and try to 958 // vectorize it. 959 SmallVector<Instruction *, 16> Operands; 960 int I = Head; 961 while (I != -1 && (is_contained(Tails, I) || is_contained(Heads, I))) { 962 if (InstructionsProcessed.count(Instrs[I])) 963 break; 964 965 Operands.push_back(Instrs[I]); 966 I = ConsecutiveChain[I]; 967 } 968 969 bool Vectorized = false; 970 if (isa<LoadInst>(*Operands.begin())) 971 Vectorized = vectorizeLoadChain(Operands, &InstructionsProcessed); 972 else 973 Vectorized = vectorizeStoreChain(Operands, &InstructionsProcessed); 974 975 Changed |= Vectorized; 976 } 977 978 return Changed; 979 } 980 981 bool Vectorizer::vectorizeStoreChain( 982 ArrayRef<Instruction *> Chain, 983 SmallPtrSet<Instruction *, 16> *InstructionsProcessed) { 984 StoreInst *S0 = cast<StoreInst>(Chain[0]); 985 986 // If the vector has an int element, default to int for the whole store. 987 Type *StoreTy = nullptr; 988 for (Instruction *I : Chain) { 989 StoreTy = cast<StoreInst>(I)->getValueOperand()->getType(); 990 if (StoreTy->isIntOrIntVectorTy()) 991 break; 992 993 if (StoreTy->isPtrOrPtrVectorTy()) { 994 StoreTy = Type::getIntNTy(F.getParent()->getContext(), 995 DL.getTypeSizeInBits(StoreTy)); 996 break; 997 } 998 } 999 assert(StoreTy && "Failed to find store type"); 1000 1001 unsigned Sz = DL.getTypeSizeInBits(StoreTy); 1002 unsigned AS = S0->getPointerAddressSpace(); 1003 unsigned VecRegSize = TTI.getLoadStoreVecRegBitWidth(AS); 1004 unsigned VF = VecRegSize / Sz; 1005 unsigned ChainSize = Chain.size(); 1006 Align Alignment = S0->getAlign(); 1007 1008 if (!isPowerOf2_32(Sz) || VF < 2 || ChainSize < 2) { 1009 InstructionsProcessed->insert(Chain.begin(), Chain.end()); 1010 return false; 1011 } 1012 1013 ArrayRef<Instruction *> NewChain = getVectorizablePrefix(Chain); 1014 if (NewChain.empty()) { 1015 // No vectorization possible. 1016 InstructionsProcessed->insert(Chain.begin(), Chain.end()); 1017 return false; 1018 } 1019 if (NewChain.size() == 1) { 1020 // Failed after the first instruction. Discard it and try the smaller chain. 1021 InstructionsProcessed->insert(NewChain.front()); 1022 return false; 1023 } 1024 1025 // Update Chain to the valid vectorizable subchain. 1026 Chain = NewChain; 1027 ChainSize = Chain.size(); 1028 1029 // Check if it's legal to vectorize this chain. If not, split the chain and 1030 // try again. 1031 unsigned EltSzInBytes = Sz / 8; 1032 unsigned SzInBytes = EltSzInBytes * ChainSize; 1033 1034 FixedVectorType *VecTy; 1035 auto *VecStoreTy = dyn_cast<FixedVectorType>(StoreTy); 1036 if (VecStoreTy) 1037 VecTy = FixedVectorType::get(StoreTy->getScalarType(), 1038 Chain.size() * VecStoreTy->getNumElements()); 1039 else 1040 VecTy = FixedVectorType::get(StoreTy, Chain.size()); 1041 1042 // If it's more than the max vector size or the target has a better 1043 // vector factor, break it into two pieces. 1044 unsigned TargetVF = TTI.getStoreVectorFactor(VF, Sz, SzInBytes, VecTy); 1045 if (ChainSize > VF || (VF != TargetVF && TargetVF < ChainSize)) { 1046 LLVM_DEBUG(dbgs() << "LSV: Chain doesn't match with the vector factor." 1047 " Creating two separate arrays.\n"); 1048 return vectorizeStoreChain(Chain.slice(0, TargetVF), 1049 InstructionsProcessed) | 1050 vectorizeStoreChain(Chain.slice(TargetVF), InstructionsProcessed); 1051 } 1052 1053 LLVM_DEBUG({ 1054 dbgs() << "LSV: Stores to vectorize:\n"; 1055 for (Instruction *I : Chain) 1056 dbgs() << " " << *I << "\n"; 1057 }); 1058 1059 // We won't try again to vectorize the elements of the chain, regardless of 1060 // whether we succeed below. 1061 InstructionsProcessed->insert(Chain.begin(), Chain.end()); 1062 1063 // If the store is going to be misaligned, don't vectorize it. 1064 if (accessIsMisaligned(SzInBytes, AS, Alignment.value())) { 1065 if (S0->getPointerAddressSpace() != DL.getAllocaAddrSpace()) { 1066 auto Chains = splitOddVectorElts(Chain, Sz); 1067 return vectorizeStoreChain(Chains.first, InstructionsProcessed) | 1068 vectorizeStoreChain(Chains.second, InstructionsProcessed); 1069 } 1070 1071 Align NewAlign = getOrEnforceKnownAlignment(S0->getPointerOperand(), 1072 Align(StackAdjustedAlignment), 1073 DL, S0, nullptr, &DT); 1074 if (NewAlign >= Alignment) 1075 Alignment = NewAlign; 1076 else 1077 return false; 1078 } 1079 1080 if (!TTI.isLegalToVectorizeStoreChain(SzInBytes, Alignment, AS)) { 1081 auto Chains = splitOddVectorElts(Chain, Sz); 1082 return vectorizeStoreChain(Chains.first, InstructionsProcessed) | 1083 vectorizeStoreChain(Chains.second, InstructionsProcessed); 1084 } 1085 1086 BasicBlock::iterator First, Last; 1087 std::tie(First, Last) = getBoundaryInstrs(Chain); 1088 Builder.SetInsertPoint(&*Last); 1089 1090 Value *Vec = UndefValue::get(VecTy); 1091 1092 if (VecStoreTy) { 1093 unsigned VecWidth = VecStoreTy->getNumElements(); 1094 for (unsigned I = 0, E = Chain.size(); I != E; ++I) { 1095 StoreInst *Store = cast<StoreInst>(Chain[I]); 1096 for (unsigned J = 0, NE = VecStoreTy->getNumElements(); J != NE; ++J) { 1097 unsigned NewIdx = J + I * VecWidth; 1098 Value *Extract = Builder.CreateExtractElement(Store->getValueOperand(), 1099 Builder.getInt32(J)); 1100 if (Extract->getType() != StoreTy->getScalarType()) 1101 Extract = Builder.CreateBitCast(Extract, StoreTy->getScalarType()); 1102 1103 Value *Insert = 1104 Builder.CreateInsertElement(Vec, Extract, Builder.getInt32(NewIdx)); 1105 Vec = Insert; 1106 } 1107 } 1108 } else { 1109 for (unsigned I = 0, E = Chain.size(); I != E; ++I) { 1110 StoreInst *Store = cast<StoreInst>(Chain[I]); 1111 Value *Extract = Store->getValueOperand(); 1112 if (Extract->getType() != StoreTy->getScalarType()) 1113 Extract = 1114 Builder.CreateBitOrPointerCast(Extract, StoreTy->getScalarType()); 1115 1116 Value *Insert = 1117 Builder.CreateInsertElement(Vec, Extract, Builder.getInt32(I)); 1118 Vec = Insert; 1119 } 1120 } 1121 1122 StoreInst *SI = Builder.CreateAlignedStore( 1123 Vec, 1124 Builder.CreateBitCast(S0->getPointerOperand(), VecTy->getPointerTo(AS)), 1125 Alignment); 1126 propagateMetadata(SI, Chain); 1127 1128 eraseInstructions(Chain); 1129 ++NumVectorInstructions; 1130 NumScalarsVectorized += Chain.size(); 1131 return true; 1132 } 1133 1134 bool Vectorizer::vectorizeLoadChain( 1135 ArrayRef<Instruction *> Chain, 1136 SmallPtrSet<Instruction *, 16> *InstructionsProcessed) { 1137 LoadInst *L0 = cast<LoadInst>(Chain[0]); 1138 1139 // If the vector has an int element, default to int for the whole load. 1140 Type *LoadTy = nullptr; 1141 for (const auto &V : Chain) { 1142 LoadTy = cast<LoadInst>(V)->getType(); 1143 if (LoadTy->isIntOrIntVectorTy()) 1144 break; 1145 1146 if (LoadTy->isPtrOrPtrVectorTy()) { 1147 LoadTy = Type::getIntNTy(F.getParent()->getContext(), 1148 DL.getTypeSizeInBits(LoadTy)); 1149 break; 1150 } 1151 } 1152 assert(LoadTy && "Can't determine LoadInst type from chain"); 1153 1154 unsigned Sz = DL.getTypeSizeInBits(LoadTy); 1155 unsigned AS = L0->getPointerAddressSpace(); 1156 unsigned VecRegSize = TTI.getLoadStoreVecRegBitWidth(AS); 1157 unsigned VF = VecRegSize / Sz; 1158 unsigned ChainSize = Chain.size(); 1159 Align Alignment = L0->getAlign(); 1160 1161 if (!isPowerOf2_32(Sz) || VF < 2 || ChainSize < 2) { 1162 InstructionsProcessed->insert(Chain.begin(), Chain.end()); 1163 return false; 1164 } 1165 1166 ArrayRef<Instruction *> NewChain = getVectorizablePrefix(Chain); 1167 if (NewChain.empty()) { 1168 // No vectorization possible. 1169 InstructionsProcessed->insert(Chain.begin(), Chain.end()); 1170 return false; 1171 } 1172 if (NewChain.size() == 1) { 1173 // Failed after the first instruction. Discard it and try the smaller chain. 1174 InstructionsProcessed->insert(NewChain.front()); 1175 return false; 1176 } 1177 1178 // Update Chain to the valid vectorizable subchain. 1179 Chain = NewChain; 1180 ChainSize = Chain.size(); 1181 1182 // Check if it's legal to vectorize this chain. If not, split the chain and 1183 // try again. 1184 unsigned EltSzInBytes = Sz / 8; 1185 unsigned SzInBytes = EltSzInBytes * ChainSize; 1186 VectorType *VecTy; 1187 auto *VecLoadTy = dyn_cast<FixedVectorType>(LoadTy); 1188 if (VecLoadTy) 1189 VecTy = FixedVectorType::get(LoadTy->getScalarType(), 1190 Chain.size() * VecLoadTy->getNumElements()); 1191 else 1192 VecTy = FixedVectorType::get(LoadTy, Chain.size()); 1193 1194 // If it's more than the max vector size or the target has a better 1195 // vector factor, break it into two pieces. 1196 unsigned TargetVF = TTI.getLoadVectorFactor(VF, Sz, SzInBytes, VecTy); 1197 if (ChainSize > VF || (VF != TargetVF && TargetVF < ChainSize)) { 1198 LLVM_DEBUG(dbgs() << "LSV: Chain doesn't match with the vector factor." 1199 " Creating two separate arrays.\n"); 1200 return vectorizeLoadChain(Chain.slice(0, TargetVF), InstructionsProcessed) | 1201 vectorizeLoadChain(Chain.slice(TargetVF), InstructionsProcessed); 1202 } 1203 1204 // We won't try again to vectorize the elements of the chain, regardless of 1205 // whether we succeed below. 1206 InstructionsProcessed->insert(Chain.begin(), Chain.end()); 1207 1208 // If the load is going to be misaligned, don't vectorize it. 1209 if (accessIsMisaligned(SzInBytes, AS, Alignment.value())) { 1210 if (L0->getPointerAddressSpace() != DL.getAllocaAddrSpace()) { 1211 auto Chains = splitOddVectorElts(Chain, Sz); 1212 return vectorizeLoadChain(Chains.first, InstructionsProcessed) | 1213 vectorizeLoadChain(Chains.second, InstructionsProcessed); 1214 } 1215 1216 Align NewAlign = getOrEnforceKnownAlignment(L0->getPointerOperand(), 1217 Align(StackAdjustedAlignment), 1218 DL, L0, nullptr, &DT); 1219 if (NewAlign >= Alignment) 1220 Alignment = NewAlign; 1221 else 1222 return false; 1223 } 1224 1225 if (!TTI.isLegalToVectorizeLoadChain(SzInBytes, Alignment, AS)) { 1226 auto Chains = splitOddVectorElts(Chain, Sz); 1227 return vectorizeLoadChain(Chains.first, InstructionsProcessed) | 1228 vectorizeLoadChain(Chains.second, InstructionsProcessed); 1229 } 1230 1231 LLVM_DEBUG({ 1232 dbgs() << "LSV: Loads to vectorize:\n"; 1233 for (Instruction *I : Chain) 1234 I->dump(); 1235 }); 1236 1237 // getVectorizablePrefix already computed getBoundaryInstrs. The value of 1238 // Last may have changed since then, but the value of First won't have. If it 1239 // matters, we could compute getBoundaryInstrs only once and reuse it here. 1240 BasicBlock::iterator First, Last; 1241 std::tie(First, Last) = getBoundaryInstrs(Chain); 1242 Builder.SetInsertPoint(&*First); 1243 1244 Value *Bitcast = 1245 Builder.CreateBitCast(L0->getPointerOperand(), VecTy->getPointerTo(AS)); 1246 LoadInst *LI = 1247 Builder.CreateAlignedLoad(VecTy, Bitcast, MaybeAlign(Alignment)); 1248 propagateMetadata(LI, Chain); 1249 1250 if (VecLoadTy) { 1251 SmallVector<Instruction *, 16> InstrsToErase; 1252 1253 unsigned VecWidth = VecLoadTy->getNumElements(); 1254 for (unsigned I = 0, E = Chain.size(); I != E; ++I) { 1255 for (auto Use : Chain[I]->users()) { 1256 // All users of vector loads are ExtractElement instructions with 1257 // constant indices, otherwise we would have bailed before now. 1258 Instruction *UI = cast<Instruction>(Use); 1259 unsigned Idx = cast<ConstantInt>(UI->getOperand(1))->getZExtValue(); 1260 unsigned NewIdx = Idx + I * VecWidth; 1261 Value *V = Builder.CreateExtractElement(LI, Builder.getInt32(NewIdx), 1262 UI->getName()); 1263 if (V->getType() != UI->getType()) 1264 V = Builder.CreateBitCast(V, UI->getType()); 1265 1266 // Replace the old instruction. 1267 UI->replaceAllUsesWith(V); 1268 InstrsToErase.push_back(UI); 1269 } 1270 } 1271 1272 // Bitcast might not be an Instruction, if the value being loaded is a 1273 // constant. In that case, no need to reorder anything. 1274 if (Instruction *BitcastInst = dyn_cast<Instruction>(Bitcast)) 1275 reorder(BitcastInst); 1276 1277 for (auto I : InstrsToErase) 1278 I->eraseFromParent(); 1279 } else { 1280 for (unsigned I = 0, E = Chain.size(); I != E; ++I) { 1281 Value *CV = Chain[I]; 1282 Value *V = 1283 Builder.CreateExtractElement(LI, Builder.getInt32(I), CV->getName()); 1284 if (V->getType() != CV->getType()) { 1285 V = Builder.CreateBitOrPointerCast(V, CV->getType()); 1286 } 1287 1288 // Replace the old instruction. 1289 CV->replaceAllUsesWith(V); 1290 } 1291 1292 if (Instruction *BitcastInst = dyn_cast<Instruction>(Bitcast)) 1293 reorder(BitcastInst); 1294 } 1295 1296 eraseInstructions(Chain); 1297 1298 ++NumVectorInstructions; 1299 NumScalarsVectorized += Chain.size(); 1300 return true; 1301 } 1302 1303 bool Vectorizer::accessIsMisaligned(unsigned SzInBytes, unsigned AddressSpace, 1304 unsigned Alignment) { 1305 if (Alignment % SzInBytes == 0) 1306 return false; 1307 1308 bool Fast = false; 1309 bool Allows = TTI.allowsMisalignedMemoryAccesses(F.getParent()->getContext(), 1310 SzInBytes * 8, AddressSpace, 1311 Alignment, &Fast); 1312 LLVM_DEBUG(dbgs() << "LSV: Target said misaligned is allowed? " << Allows 1313 << " and fast? " << Fast << "\n";); 1314 return !Allows || !Fast; 1315 } 1316