1 //===------- VectorCombine.cpp - Optimize partial vector operations -------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This pass optimizes scalar/vector interactions using target cost models. The 10 // transforms implemented here may not fit in traditional loop-based or SLP 11 // vectorization passes. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/Transforms/Vectorize/VectorCombine.h" 16 #include "llvm/ADT/Statistic.h" 17 #include "llvm/Analysis/AssumptionCache.h" 18 #include "llvm/Analysis/BasicAliasAnalysis.h" 19 #include "llvm/Analysis/GlobalsModRef.h" 20 #include "llvm/Analysis/Loads.h" 21 #include "llvm/Analysis/TargetTransformInfo.h" 22 #include "llvm/Analysis/ValueTracking.h" 23 #include "llvm/Analysis/VectorUtils.h" 24 #include "llvm/IR/Dominators.h" 25 #include "llvm/IR/Function.h" 26 #include "llvm/IR/IRBuilder.h" 27 #include "llvm/IR/PatternMatch.h" 28 #include "llvm/InitializePasses.h" 29 #include "llvm/Pass.h" 30 #include "llvm/Support/CommandLine.h" 31 #include "llvm/Transforms/Utils/Local.h" 32 #include "llvm/Transforms/Vectorize.h" 33 34 #define DEBUG_TYPE "vector-combine" 35 #include "llvm/Transforms/Utils/InstructionWorklist.h" 36 37 using namespace llvm; 38 using namespace llvm::PatternMatch; 39 40 STATISTIC(NumVecLoad, "Number of vector loads formed"); 41 STATISTIC(NumVecCmp, "Number of vector compares formed"); 42 STATISTIC(NumVecBO, "Number of vector binops formed"); 43 STATISTIC(NumVecCmpBO, "Number of vector compare + binop formed"); 44 STATISTIC(NumShufOfBitcast, "Number of shuffles moved after bitcast"); 45 STATISTIC(NumScalarBO, "Number of scalar binops formed"); 46 STATISTIC(NumScalarCmp, "Number of scalar compares formed"); 47 48 static cl::opt<bool> DisableVectorCombine( 49 "disable-vector-combine", cl::init(false), cl::Hidden, 50 cl::desc("Disable all vector combine transforms")); 51 52 static cl::opt<bool> DisableBinopExtractShuffle( 53 "disable-binop-extract-shuffle", cl::init(false), cl::Hidden, 54 cl::desc("Disable binop extract to shuffle transforms")); 55 56 static cl::opt<unsigned> MaxInstrsToScan( 57 "vector-combine-max-scan-instrs", cl::init(30), cl::Hidden, 58 cl::desc("Max number of instructions to scan for vector combining.")); 59 60 static const unsigned InvalidIndex = std::numeric_limits<unsigned>::max(); 61 62 namespace { 63 class VectorCombine { 64 public: 65 VectorCombine(Function &F, const TargetTransformInfo &TTI, 66 const DominatorTree &DT, AAResults &AA, AssumptionCache &AC, 67 bool ScalarizationOnly) 68 : F(F), Builder(F.getContext()), TTI(TTI), DT(DT), AA(AA), AC(AC), 69 ScalarizationOnly(ScalarizationOnly) {} 70 71 bool run(); 72 73 private: 74 Function &F; 75 IRBuilder<> Builder; 76 const TargetTransformInfo &TTI; 77 const DominatorTree &DT; 78 AAResults &AA; 79 AssumptionCache &AC; 80 81 /// If true only perform scalarization combines and do not introduce new 82 /// vector operations. 83 bool ScalarizationOnly; 84 85 InstructionWorklist Worklist; 86 87 bool vectorizeLoadInsert(Instruction &I); 88 ExtractElementInst *getShuffleExtract(ExtractElementInst *Ext0, 89 ExtractElementInst *Ext1, 90 unsigned PreferredExtractIndex) const; 91 bool isExtractExtractCheap(ExtractElementInst *Ext0, ExtractElementInst *Ext1, 92 const Instruction &I, 93 ExtractElementInst *&ConvertToShuffle, 94 unsigned PreferredExtractIndex); 95 void foldExtExtCmp(ExtractElementInst *Ext0, ExtractElementInst *Ext1, 96 Instruction &I); 97 void foldExtExtBinop(ExtractElementInst *Ext0, ExtractElementInst *Ext1, 98 Instruction &I); 99 bool foldExtractExtract(Instruction &I); 100 bool foldBitcastShuf(Instruction &I); 101 bool scalarizeBinopOrCmp(Instruction &I); 102 bool foldExtractedCmps(Instruction &I); 103 bool foldSingleElementStore(Instruction &I); 104 bool scalarizeLoadExtract(Instruction &I); 105 bool foldShuffleOfBinops(Instruction &I); 106 bool foldShuffleFromReductions(Instruction &I); 107 bool foldSelectShuffle(Instruction &I, bool FromReduction = false); 108 109 void replaceValue(Value &Old, Value &New) { 110 Old.replaceAllUsesWith(&New); 111 if (auto *NewI = dyn_cast<Instruction>(&New)) { 112 New.takeName(&Old); 113 Worklist.pushUsersToWorkList(*NewI); 114 Worklist.pushValue(NewI); 115 } 116 Worklist.pushValue(&Old); 117 } 118 119 void eraseInstruction(Instruction &I) { 120 for (Value *Op : I.operands()) 121 Worklist.pushValue(Op); 122 Worklist.remove(&I); 123 I.eraseFromParent(); 124 } 125 }; 126 } // namespace 127 128 bool VectorCombine::vectorizeLoadInsert(Instruction &I) { 129 // Match insert into fixed vector of scalar value. 130 // TODO: Handle non-zero insert index. 131 auto *Ty = dyn_cast<FixedVectorType>(I.getType()); 132 Value *Scalar; 133 if (!Ty || !match(&I, m_InsertElt(m_Undef(), m_Value(Scalar), m_ZeroInt())) || 134 !Scalar->hasOneUse()) 135 return false; 136 137 // Optionally match an extract from another vector. 138 Value *X; 139 bool HasExtract = match(Scalar, m_ExtractElt(m_Value(X), m_ZeroInt())); 140 if (!HasExtract) 141 X = Scalar; 142 143 // Match source value as load of scalar or vector. 144 // Do not vectorize scalar load (widening) if atomic/volatile or under 145 // asan/hwasan/memtag/tsan. The widened load may load data from dirty regions 146 // or create data races non-existent in the source. 147 auto *Load = dyn_cast<LoadInst>(X); 148 if (!Load || !Load->isSimple() || !Load->hasOneUse() || 149 Load->getFunction()->hasFnAttribute(Attribute::SanitizeMemTag) || 150 mustSuppressSpeculation(*Load)) 151 return false; 152 153 const DataLayout &DL = I.getModule()->getDataLayout(); 154 Value *SrcPtr = Load->getPointerOperand()->stripPointerCasts(); 155 assert(isa<PointerType>(SrcPtr->getType()) && "Expected a pointer type"); 156 157 unsigned AS = Load->getPointerAddressSpace(); 158 159 // We are potentially transforming byte-sized (8-bit) memory accesses, so make 160 // sure we have all of our type-based constraints in place for this target. 161 Type *ScalarTy = Scalar->getType(); 162 uint64_t ScalarSize = ScalarTy->getPrimitiveSizeInBits(); 163 unsigned MinVectorSize = TTI.getMinVectorRegisterBitWidth(); 164 if (!ScalarSize || !MinVectorSize || MinVectorSize % ScalarSize != 0 || 165 ScalarSize % 8 != 0) 166 return false; 167 168 // Check safety of replacing the scalar load with a larger vector load. 169 // We use minimal alignment (maximum flexibility) because we only care about 170 // the dereferenceable region. When calculating cost and creating a new op, 171 // we may use a larger value based on alignment attributes. 172 unsigned MinVecNumElts = MinVectorSize / ScalarSize; 173 auto *MinVecTy = VectorType::get(ScalarTy, MinVecNumElts, false); 174 unsigned OffsetEltIndex = 0; 175 Align Alignment = Load->getAlign(); 176 if (!isSafeToLoadUnconditionally(SrcPtr, MinVecTy, Align(1), DL, Load, &DT)) { 177 // It is not safe to load directly from the pointer, but we can still peek 178 // through gep offsets and check if it safe to load from a base address with 179 // updated alignment. If it is, we can shuffle the element(s) into place 180 // after loading. 181 unsigned OffsetBitWidth = DL.getIndexTypeSizeInBits(SrcPtr->getType()); 182 APInt Offset(OffsetBitWidth, 0); 183 SrcPtr = SrcPtr->stripAndAccumulateInBoundsConstantOffsets(DL, Offset); 184 185 // We want to shuffle the result down from a high element of a vector, so 186 // the offset must be positive. 187 if (Offset.isNegative()) 188 return false; 189 190 // The offset must be a multiple of the scalar element to shuffle cleanly 191 // in the element's size. 192 uint64_t ScalarSizeInBytes = ScalarSize / 8; 193 if (Offset.urem(ScalarSizeInBytes) != 0) 194 return false; 195 196 // If we load MinVecNumElts, will our target element still be loaded? 197 OffsetEltIndex = Offset.udiv(ScalarSizeInBytes).getZExtValue(); 198 if (OffsetEltIndex >= MinVecNumElts) 199 return false; 200 201 if (!isSafeToLoadUnconditionally(SrcPtr, MinVecTy, Align(1), DL, Load, &DT)) 202 return false; 203 204 // Update alignment with offset value. Note that the offset could be negated 205 // to more accurately represent "(new) SrcPtr - Offset = (old) SrcPtr", but 206 // negation does not change the result of the alignment calculation. 207 Alignment = commonAlignment(Alignment, Offset.getZExtValue()); 208 } 209 210 // Original pattern: insertelt undef, load [free casts of] PtrOp, 0 211 // Use the greater of the alignment on the load or its source pointer. 212 Alignment = std::max(SrcPtr->getPointerAlignment(DL), Alignment); 213 Type *LoadTy = Load->getType(); 214 InstructionCost OldCost = 215 TTI.getMemoryOpCost(Instruction::Load, LoadTy, Alignment, AS); 216 APInt DemandedElts = APInt::getOneBitSet(MinVecNumElts, 0); 217 OldCost += TTI.getScalarizationOverhead(MinVecTy, DemandedElts, 218 /* Insert */ true, HasExtract); 219 220 // New pattern: load VecPtr 221 InstructionCost NewCost = 222 TTI.getMemoryOpCost(Instruction::Load, MinVecTy, Alignment, AS); 223 // Optionally, we are shuffling the loaded vector element(s) into place. 224 // For the mask set everything but element 0 to undef to prevent poison from 225 // propagating from the extra loaded memory. This will also optionally 226 // shrink/grow the vector from the loaded size to the output size. 227 // We assume this operation has no cost in codegen if there was no offset. 228 // Note that we could use freeze to avoid poison problems, but then we might 229 // still need a shuffle to change the vector size. 230 unsigned OutputNumElts = Ty->getNumElements(); 231 SmallVector<int, 16> Mask(OutputNumElts, UndefMaskElem); 232 assert(OffsetEltIndex < MinVecNumElts && "Address offset too big"); 233 Mask[0] = OffsetEltIndex; 234 if (OffsetEltIndex) 235 NewCost += TTI.getShuffleCost(TTI::SK_PermuteSingleSrc, MinVecTy, Mask); 236 237 // We can aggressively convert to the vector form because the backend can 238 // invert this transform if it does not result in a performance win. 239 if (OldCost < NewCost || !NewCost.isValid()) 240 return false; 241 242 // It is safe and potentially profitable to load a vector directly: 243 // inselt undef, load Scalar, 0 --> load VecPtr 244 IRBuilder<> Builder(Load); 245 Value *CastedPtr = Builder.CreatePointerBitCastOrAddrSpaceCast( 246 SrcPtr, MinVecTy->getPointerTo(AS)); 247 Value *VecLd = Builder.CreateAlignedLoad(MinVecTy, CastedPtr, Alignment); 248 VecLd = Builder.CreateShuffleVector(VecLd, Mask); 249 250 replaceValue(I, *VecLd); 251 ++NumVecLoad; 252 return true; 253 } 254 255 /// Determine which, if any, of the inputs should be replaced by a shuffle 256 /// followed by extract from a different index. 257 ExtractElementInst *VectorCombine::getShuffleExtract( 258 ExtractElementInst *Ext0, ExtractElementInst *Ext1, 259 unsigned PreferredExtractIndex = InvalidIndex) const { 260 auto *Index0C = dyn_cast<ConstantInt>(Ext0->getIndexOperand()); 261 auto *Index1C = dyn_cast<ConstantInt>(Ext1->getIndexOperand()); 262 assert(Index0C && Index1C && "Expected constant extract indexes"); 263 264 unsigned Index0 = Index0C->getZExtValue(); 265 unsigned Index1 = Index1C->getZExtValue(); 266 267 // If the extract indexes are identical, no shuffle is needed. 268 if (Index0 == Index1) 269 return nullptr; 270 271 Type *VecTy = Ext0->getVectorOperand()->getType(); 272 assert(VecTy == Ext1->getVectorOperand()->getType() && "Need matching types"); 273 InstructionCost Cost0 = 274 TTI.getVectorInstrCost(Ext0->getOpcode(), VecTy, Index0); 275 InstructionCost Cost1 = 276 TTI.getVectorInstrCost(Ext1->getOpcode(), VecTy, Index1); 277 278 // If both costs are invalid no shuffle is needed 279 if (!Cost0.isValid() && !Cost1.isValid()) 280 return nullptr; 281 282 // We are extracting from 2 different indexes, so one operand must be shuffled 283 // before performing a vector operation and/or extract. The more expensive 284 // extract will be replaced by a shuffle. 285 if (Cost0 > Cost1) 286 return Ext0; 287 if (Cost1 > Cost0) 288 return Ext1; 289 290 // If the costs are equal and there is a preferred extract index, shuffle the 291 // opposite operand. 292 if (PreferredExtractIndex == Index0) 293 return Ext1; 294 if (PreferredExtractIndex == Index1) 295 return Ext0; 296 297 // Otherwise, replace the extract with the higher index. 298 return Index0 > Index1 ? Ext0 : Ext1; 299 } 300 301 /// Compare the relative costs of 2 extracts followed by scalar operation vs. 302 /// vector operation(s) followed by extract. Return true if the existing 303 /// instructions are cheaper than a vector alternative. Otherwise, return false 304 /// and if one of the extracts should be transformed to a shufflevector, set 305 /// \p ConvertToShuffle to that extract instruction. 306 bool VectorCombine::isExtractExtractCheap(ExtractElementInst *Ext0, 307 ExtractElementInst *Ext1, 308 const Instruction &I, 309 ExtractElementInst *&ConvertToShuffle, 310 unsigned PreferredExtractIndex) { 311 auto *Ext0IndexC = dyn_cast<ConstantInt>(Ext0->getOperand(1)); 312 auto *Ext1IndexC = dyn_cast<ConstantInt>(Ext1->getOperand(1)); 313 assert(Ext0IndexC && Ext1IndexC && "Expected constant extract indexes"); 314 315 unsigned Opcode = I.getOpcode(); 316 Type *ScalarTy = Ext0->getType(); 317 auto *VecTy = cast<VectorType>(Ext0->getOperand(0)->getType()); 318 InstructionCost ScalarOpCost, VectorOpCost; 319 320 // Get cost estimates for scalar and vector versions of the operation. 321 bool IsBinOp = Instruction::isBinaryOp(Opcode); 322 if (IsBinOp) { 323 ScalarOpCost = TTI.getArithmeticInstrCost(Opcode, ScalarTy); 324 VectorOpCost = TTI.getArithmeticInstrCost(Opcode, VecTy); 325 } else { 326 assert((Opcode == Instruction::ICmp || Opcode == Instruction::FCmp) && 327 "Expected a compare"); 328 CmpInst::Predicate Pred = cast<CmpInst>(I).getPredicate(); 329 ScalarOpCost = TTI.getCmpSelInstrCost( 330 Opcode, ScalarTy, CmpInst::makeCmpResultType(ScalarTy), Pred); 331 VectorOpCost = TTI.getCmpSelInstrCost( 332 Opcode, VecTy, CmpInst::makeCmpResultType(VecTy), Pred); 333 } 334 335 // Get cost estimates for the extract elements. These costs will factor into 336 // both sequences. 337 unsigned Ext0Index = Ext0IndexC->getZExtValue(); 338 unsigned Ext1Index = Ext1IndexC->getZExtValue(); 339 340 InstructionCost Extract0Cost = 341 TTI.getVectorInstrCost(Instruction::ExtractElement, VecTy, Ext0Index); 342 InstructionCost Extract1Cost = 343 TTI.getVectorInstrCost(Instruction::ExtractElement, VecTy, Ext1Index); 344 345 // A more expensive extract will always be replaced by a splat shuffle. 346 // For example, if Ext0 is more expensive: 347 // opcode (extelt V0, Ext0), (ext V1, Ext1) --> 348 // extelt (opcode (splat V0, Ext0), V1), Ext1 349 // TODO: Evaluate whether that always results in lowest cost. Alternatively, 350 // check the cost of creating a broadcast shuffle and shuffling both 351 // operands to element 0. 352 InstructionCost CheapExtractCost = std::min(Extract0Cost, Extract1Cost); 353 354 // Extra uses of the extracts mean that we include those costs in the 355 // vector total because those instructions will not be eliminated. 356 InstructionCost OldCost, NewCost; 357 if (Ext0->getOperand(0) == Ext1->getOperand(0) && Ext0Index == Ext1Index) { 358 // Handle a special case. If the 2 extracts are identical, adjust the 359 // formulas to account for that. The extra use charge allows for either the 360 // CSE'd pattern or an unoptimized form with identical values: 361 // opcode (extelt V, C), (extelt V, C) --> extelt (opcode V, V), C 362 bool HasUseTax = Ext0 == Ext1 ? !Ext0->hasNUses(2) 363 : !Ext0->hasOneUse() || !Ext1->hasOneUse(); 364 OldCost = CheapExtractCost + ScalarOpCost; 365 NewCost = VectorOpCost + CheapExtractCost + HasUseTax * CheapExtractCost; 366 } else { 367 // Handle the general case. Each extract is actually a different value: 368 // opcode (extelt V0, C0), (extelt V1, C1) --> extelt (opcode V0, V1), C 369 OldCost = Extract0Cost + Extract1Cost + ScalarOpCost; 370 NewCost = VectorOpCost + CheapExtractCost + 371 !Ext0->hasOneUse() * Extract0Cost + 372 !Ext1->hasOneUse() * Extract1Cost; 373 } 374 375 ConvertToShuffle = getShuffleExtract(Ext0, Ext1, PreferredExtractIndex); 376 if (ConvertToShuffle) { 377 if (IsBinOp && DisableBinopExtractShuffle) 378 return true; 379 380 // If we are extracting from 2 different indexes, then one operand must be 381 // shuffled before performing the vector operation. The shuffle mask is 382 // undefined except for 1 lane that is being translated to the remaining 383 // extraction lane. Therefore, it is a splat shuffle. Ex: 384 // ShufMask = { undef, undef, 0, undef } 385 // TODO: The cost model has an option for a "broadcast" shuffle 386 // (splat-from-element-0), but no option for a more general splat. 387 NewCost += 388 TTI.getShuffleCost(TargetTransformInfo::SK_PermuteSingleSrc, VecTy); 389 } 390 391 // Aggressively form a vector op if the cost is equal because the transform 392 // may enable further optimization. 393 // Codegen can reverse this transform (scalarize) if it was not profitable. 394 return OldCost < NewCost; 395 } 396 397 /// Create a shuffle that translates (shifts) 1 element from the input vector 398 /// to a new element location. 399 static Value *createShiftShuffle(Value *Vec, unsigned OldIndex, 400 unsigned NewIndex, IRBuilder<> &Builder) { 401 // The shuffle mask is undefined except for 1 lane that is being translated 402 // to the new element index. Example for OldIndex == 2 and NewIndex == 0: 403 // ShufMask = { 2, undef, undef, undef } 404 auto *VecTy = cast<FixedVectorType>(Vec->getType()); 405 SmallVector<int, 32> ShufMask(VecTy->getNumElements(), UndefMaskElem); 406 ShufMask[NewIndex] = OldIndex; 407 return Builder.CreateShuffleVector(Vec, ShufMask, "shift"); 408 } 409 410 /// Given an extract element instruction with constant index operand, shuffle 411 /// the source vector (shift the scalar element) to a NewIndex for extraction. 412 /// Return null if the input can be constant folded, so that we are not creating 413 /// unnecessary instructions. 414 static ExtractElementInst *translateExtract(ExtractElementInst *ExtElt, 415 unsigned NewIndex, 416 IRBuilder<> &Builder) { 417 // Shufflevectors can only be created for fixed-width vectors. 418 if (!isa<FixedVectorType>(ExtElt->getOperand(0)->getType())) 419 return nullptr; 420 421 // If the extract can be constant-folded, this code is unsimplified. Defer 422 // to other passes to handle that. 423 Value *X = ExtElt->getVectorOperand(); 424 Value *C = ExtElt->getIndexOperand(); 425 assert(isa<ConstantInt>(C) && "Expected a constant index operand"); 426 if (isa<Constant>(X)) 427 return nullptr; 428 429 Value *Shuf = createShiftShuffle(X, cast<ConstantInt>(C)->getZExtValue(), 430 NewIndex, Builder); 431 return cast<ExtractElementInst>(Builder.CreateExtractElement(Shuf, NewIndex)); 432 } 433 434 /// Try to reduce extract element costs by converting scalar compares to vector 435 /// compares followed by extract. 436 /// cmp (ext0 V0, C), (ext1 V1, C) 437 void VectorCombine::foldExtExtCmp(ExtractElementInst *Ext0, 438 ExtractElementInst *Ext1, Instruction &I) { 439 assert(isa<CmpInst>(&I) && "Expected a compare"); 440 assert(cast<ConstantInt>(Ext0->getIndexOperand())->getZExtValue() == 441 cast<ConstantInt>(Ext1->getIndexOperand())->getZExtValue() && 442 "Expected matching constant extract indexes"); 443 444 // cmp Pred (extelt V0, C), (extelt V1, C) --> extelt (cmp Pred V0, V1), C 445 ++NumVecCmp; 446 CmpInst::Predicate Pred = cast<CmpInst>(&I)->getPredicate(); 447 Value *V0 = Ext0->getVectorOperand(), *V1 = Ext1->getVectorOperand(); 448 Value *VecCmp = Builder.CreateCmp(Pred, V0, V1); 449 Value *NewExt = Builder.CreateExtractElement(VecCmp, Ext0->getIndexOperand()); 450 replaceValue(I, *NewExt); 451 } 452 453 /// Try to reduce extract element costs by converting scalar binops to vector 454 /// binops followed by extract. 455 /// bo (ext0 V0, C), (ext1 V1, C) 456 void VectorCombine::foldExtExtBinop(ExtractElementInst *Ext0, 457 ExtractElementInst *Ext1, Instruction &I) { 458 assert(isa<BinaryOperator>(&I) && "Expected a binary operator"); 459 assert(cast<ConstantInt>(Ext0->getIndexOperand())->getZExtValue() == 460 cast<ConstantInt>(Ext1->getIndexOperand())->getZExtValue() && 461 "Expected matching constant extract indexes"); 462 463 // bo (extelt V0, C), (extelt V1, C) --> extelt (bo V0, V1), C 464 ++NumVecBO; 465 Value *V0 = Ext0->getVectorOperand(), *V1 = Ext1->getVectorOperand(); 466 Value *VecBO = 467 Builder.CreateBinOp(cast<BinaryOperator>(&I)->getOpcode(), V0, V1); 468 469 // All IR flags are safe to back-propagate because any potential poison 470 // created in unused vector elements is discarded by the extract. 471 if (auto *VecBOInst = dyn_cast<Instruction>(VecBO)) 472 VecBOInst->copyIRFlags(&I); 473 474 Value *NewExt = Builder.CreateExtractElement(VecBO, Ext0->getIndexOperand()); 475 replaceValue(I, *NewExt); 476 } 477 478 /// Match an instruction with extracted vector operands. 479 bool VectorCombine::foldExtractExtract(Instruction &I) { 480 // It is not safe to transform things like div, urem, etc. because we may 481 // create undefined behavior when executing those on unknown vector elements. 482 if (!isSafeToSpeculativelyExecute(&I)) 483 return false; 484 485 Instruction *I0, *I1; 486 CmpInst::Predicate Pred = CmpInst::BAD_ICMP_PREDICATE; 487 if (!match(&I, m_Cmp(Pred, m_Instruction(I0), m_Instruction(I1))) && 488 !match(&I, m_BinOp(m_Instruction(I0), m_Instruction(I1)))) 489 return false; 490 491 Value *V0, *V1; 492 uint64_t C0, C1; 493 if (!match(I0, m_ExtractElt(m_Value(V0), m_ConstantInt(C0))) || 494 !match(I1, m_ExtractElt(m_Value(V1), m_ConstantInt(C1))) || 495 V0->getType() != V1->getType()) 496 return false; 497 498 // If the scalar value 'I' is going to be re-inserted into a vector, then try 499 // to create an extract to that same element. The extract/insert can be 500 // reduced to a "select shuffle". 501 // TODO: If we add a larger pattern match that starts from an insert, this 502 // probably becomes unnecessary. 503 auto *Ext0 = cast<ExtractElementInst>(I0); 504 auto *Ext1 = cast<ExtractElementInst>(I1); 505 uint64_t InsertIndex = InvalidIndex; 506 if (I.hasOneUse()) 507 match(I.user_back(), 508 m_InsertElt(m_Value(), m_Value(), m_ConstantInt(InsertIndex))); 509 510 ExtractElementInst *ExtractToChange; 511 if (isExtractExtractCheap(Ext0, Ext1, I, ExtractToChange, InsertIndex)) 512 return false; 513 514 if (ExtractToChange) { 515 unsigned CheapExtractIdx = ExtractToChange == Ext0 ? C1 : C0; 516 ExtractElementInst *NewExtract = 517 translateExtract(ExtractToChange, CheapExtractIdx, Builder); 518 if (!NewExtract) 519 return false; 520 if (ExtractToChange == Ext0) 521 Ext0 = NewExtract; 522 else 523 Ext1 = NewExtract; 524 } 525 526 if (Pred != CmpInst::BAD_ICMP_PREDICATE) 527 foldExtExtCmp(Ext0, Ext1, I); 528 else 529 foldExtExtBinop(Ext0, Ext1, I); 530 531 Worklist.push(Ext0); 532 Worklist.push(Ext1); 533 return true; 534 } 535 536 /// If this is a bitcast of a shuffle, try to bitcast the source vector to the 537 /// destination type followed by shuffle. This can enable further transforms by 538 /// moving bitcasts or shuffles together. 539 bool VectorCombine::foldBitcastShuf(Instruction &I) { 540 Value *V; 541 ArrayRef<int> Mask; 542 if (!match(&I, m_BitCast( 543 m_OneUse(m_Shuffle(m_Value(V), m_Undef(), m_Mask(Mask)))))) 544 return false; 545 546 // 1) Do not fold bitcast shuffle for scalable type. First, shuffle cost for 547 // scalable type is unknown; Second, we cannot reason if the narrowed shuffle 548 // mask for scalable type is a splat or not. 549 // 2) Disallow non-vector casts and length-changing shuffles. 550 // TODO: We could allow any shuffle. 551 auto *DestTy = dyn_cast<FixedVectorType>(I.getType()); 552 auto *SrcTy = dyn_cast<FixedVectorType>(V->getType()); 553 if (!SrcTy || !DestTy || I.getOperand(0)->getType() != SrcTy) 554 return false; 555 556 unsigned DestNumElts = DestTy->getNumElements(); 557 unsigned SrcNumElts = SrcTy->getNumElements(); 558 SmallVector<int, 16> NewMask; 559 if (SrcNumElts <= DestNumElts) { 560 // The bitcast is from wide to narrow/equal elements. The shuffle mask can 561 // always be expanded to the equivalent form choosing narrower elements. 562 assert(DestNumElts % SrcNumElts == 0 && "Unexpected shuffle mask"); 563 unsigned ScaleFactor = DestNumElts / SrcNumElts; 564 narrowShuffleMaskElts(ScaleFactor, Mask, NewMask); 565 } else { 566 // The bitcast is from narrow elements to wide elements. The shuffle mask 567 // must choose consecutive elements to allow casting first. 568 assert(SrcNumElts % DestNumElts == 0 && "Unexpected shuffle mask"); 569 unsigned ScaleFactor = SrcNumElts / DestNumElts; 570 if (!widenShuffleMaskElts(ScaleFactor, Mask, NewMask)) 571 return false; 572 } 573 574 // The new shuffle must not cost more than the old shuffle. The bitcast is 575 // moved ahead of the shuffle, so assume that it has the same cost as before. 576 InstructionCost DestCost = TTI.getShuffleCost( 577 TargetTransformInfo::SK_PermuteSingleSrc, DestTy, NewMask); 578 InstructionCost SrcCost = 579 TTI.getShuffleCost(TargetTransformInfo::SK_PermuteSingleSrc, SrcTy, Mask); 580 if (DestCost > SrcCost || !DestCost.isValid()) 581 return false; 582 583 // bitcast (shuf V, MaskC) --> shuf (bitcast V), MaskC' 584 ++NumShufOfBitcast; 585 Value *CastV = Builder.CreateBitCast(V, DestTy); 586 Value *Shuf = Builder.CreateShuffleVector(CastV, NewMask); 587 replaceValue(I, *Shuf); 588 return true; 589 } 590 591 /// Match a vector binop or compare instruction with at least one inserted 592 /// scalar operand and convert to scalar binop/cmp followed by insertelement. 593 bool VectorCombine::scalarizeBinopOrCmp(Instruction &I) { 594 CmpInst::Predicate Pred = CmpInst::BAD_ICMP_PREDICATE; 595 Value *Ins0, *Ins1; 596 if (!match(&I, m_BinOp(m_Value(Ins0), m_Value(Ins1))) && 597 !match(&I, m_Cmp(Pred, m_Value(Ins0), m_Value(Ins1)))) 598 return false; 599 600 // Do not convert the vector condition of a vector select into a scalar 601 // condition. That may cause problems for codegen because of differences in 602 // boolean formats and register-file transfers. 603 // TODO: Can we account for that in the cost model? 604 bool IsCmp = Pred != CmpInst::Predicate::BAD_ICMP_PREDICATE; 605 if (IsCmp) 606 for (User *U : I.users()) 607 if (match(U, m_Select(m_Specific(&I), m_Value(), m_Value()))) 608 return false; 609 610 // Match against one or both scalar values being inserted into constant 611 // vectors: 612 // vec_op VecC0, (inselt VecC1, V1, Index) 613 // vec_op (inselt VecC0, V0, Index), VecC1 614 // vec_op (inselt VecC0, V0, Index), (inselt VecC1, V1, Index) 615 // TODO: Deal with mismatched index constants and variable indexes? 616 Constant *VecC0 = nullptr, *VecC1 = nullptr; 617 Value *V0 = nullptr, *V1 = nullptr; 618 uint64_t Index0 = 0, Index1 = 0; 619 if (!match(Ins0, m_InsertElt(m_Constant(VecC0), m_Value(V0), 620 m_ConstantInt(Index0))) && 621 !match(Ins0, m_Constant(VecC0))) 622 return false; 623 if (!match(Ins1, m_InsertElt(m_Constant(VecC1), m_Value(V1), 624 m_ConstantInt(Index1))) && 625 !match(Ins1, m_Constant(VecC1))) 626 return false; 627 628 bool IsConst0 = !V0; 629 bool IsConst1 = !V1; 630 if (IsConst0 && IsConst1) 631 return false; 632 if (!IsConst0 && !IsConst1 && Index0 != Index1) 633 return false; 634 635 // Bail for single insertion if it is a load. 636 // TODO: Handle this once getVectorInstrCost can cost for load/stores. 637 auto *I0 = dyn_cast_or_null<Instruction>(V0); 638 auto *I1 = dyn_cast_or_null<Instruction>(V1); 639 if ((IsConst0 && I1 && I1->mayReadFromMemory()) || 640 (IsConst1 && I0 && I0->mayReadFromMemory())) 641 return false; 642 643 uint64_t Index = IsConst0 ? Index1 : Index0; 644 Type *ScalarTy = IsConst0 ? V1->getType() : V0->getType(); 645 Type *VecTy = I.getType(); 646 assert(VecTy->isVectorTy() && 647 (IsConst0 || IsConst1 || V0->getType() == V1->getType()) && 648 (ScalarTy->isIntegerTy() || ScalarTy->isFloatingPointTy() || 649 ScalarTy->isPointerTy()) && 650 "Unexpected types for insert element into binop or cmp"); 651 652 unsigned Opcode = I.getOpcode(); 653 InstructionCost ScalarOpCost, VectorOpCost; 654 if (IsCmp) { 655 CmpInst::Predicate Pred = cast<CmpInst>(I).getPredicate(); 656 ScalarOpCost = TTI.getCmpSelInstrCost( 657 Opcode, ScalarTy, CmpInst::makeCmpResultType(ScalarTy), Pred); 658 VectorOpCost = TTI.getCmpSelInstrCost( 659 Opcode, VecTy, CmpInst::makeCmpResultType(VecTy), Pred); 660 } else { 661 ScalarOpCost = TTI.getArithmeticInstrCost(Opcode, ScalarTy); 662 VectorOpCost = TTI.getArithmeticInstrCost(Opcode, VecTy); 663 } 664 665 // Get cost estimate for the insert element. This cost will factor into 666 // both sequences. 667 InstructionCost InsertCost = 668 TTI.getVectorInstrCost(Instruction::InsertElement, VecTy, Index); 669 InstructionCost OldCost = 670 (IsConst0 ? 0 : InsertCost) + (IsConst1 ? 0 : InsertCost) + VectorOpCost; 671 InstructionCost NewCost = ScalarOpCost + InsertCost + 672 (IsConst0 ? 0 : !Ins0->hasOneUse() * InsertCost) + 673 (IsConst1 ? 0 : !Ins1->hasOneUse() * InsertCost); 674 675 // We want to scalarize unless the vector variant actually has lower cost. 676 if (OldCost < NewCost || !NewCost.isValid()) 677 return false; 678 679 // vec_op (inselt VecC0, V0, Index), (inselt VecC1, V1, Index) --> 680 // inselt NewVecC, (scalar_op V0, V1), Index 681 if (IsCmp) 682 ++NumScalarCmp; 683 else 684 ++NumScalarBO; 685 686 // For constant cases, extract the scalar element, this should constant fold. 687 if (IsConst0) 688 V0 = ConstantExpr::getExtractElement(VecC0, Builder.getInt64(Index)); 689 if (IsConst1) 690 V1 = ConstantExpr::getExtractElement(VecC1, Builder.getInt64(Index)); 691 692 Value *Scalar = 693 IsCmp ? Builder.CreateCmp(Pred, V0, V1) 694 : Builder.CreateBinOp((Instruction::BinaryOps)Opcode, V0, V1); 695 696 Scalar->setName(I.getName() + ".scalar"); 697 698 // All IR flags are safe to back-propagate. There is no potential for extra 699 // poison to be created by the scalar instruction. 700 if (auto *ScalarInst = dyn_cast<Instruction>(Scalar)) 701 ScalarInst->copyIRFlags(&I); 702 703 // Fold the vector constants in the original vectors into a new base vector. 704 Value *NewVecC = 705 IsCmp ? Builder.CreateCmp(Pred, VecC0, VecC1) 706 : Builder.CreateBinOp((Instruction::BinaryOps)Opcode, VecC0, VecC1); 707 Value *Insert = Builder.CreateInsertElement(NewVecC, Scalar, Index); 708 replaceValue(I, *Insert); 709 return true; 710 } 711 712 /// Try to combine a scalar binop + 2 scalar compares of extracted elements of 713 /// a vector into vector operations followed by extract. Note: The SLP pass 714 /// may miss this pattern because of implementation problems. 715 bool VectorCombine::foldExtractedCmps(Instruction &I) { 716 // We are looking for a scalar binop of booleans. 717 // binop i1 (cmp Pred I0, C0), (cmp Pred I1, C1) 718 if (!I.isBinaryOp() || !I.getType()->isIntegerTy(1)) 719 return false; 720 721 // The compare predicates should match, and each compare should have a 722 // constant operand. 723 // TODO: Relax the one-use constraints. 724 Value *B0 = I.getOperand(0), *B1 = I.getOperand(1); 725 Instruction *I0, *I1; 726 Constant *C0, *C1; 727 CmpInst::Predicate P0, P1; 728 if (!match(B0, m_OneUse(m_Cmp(P0, m_Instruction(I0), m_Constant(C0)))) || 729 !match(B1, m_OneUse(m_Cmp(P1, m_Instruction(I1), m_Constant(C1)))) || 730 P0 != P1) 731 return false; 732 733 // The compare operands must be extracts of the same vector with constant 734 // extract indexes. 735 // TODO: Relax the one-use constraints. 736 Value *X; 737 uint64_t Index0, Index1; 738 if (!match(I0, m_OneUse(m_ExtractElt(m_Value(X), m_ConstantInt(Index0)))) || 739 !match(I1, m_OneUse(m_ExtractElt(m_Specific(X), m_ConstantInt(Index1))))) 740 return false; 741 742 auto *Ext0 = cast<ExtractElementInst>(I0); 743 auto *Ext1 = cast<ExtractElementInst>(I1); 744 ExtractElementInst *ConvertToShuf = getShuffleExtract(Ext0, Ext1); 745 if (!ConvertToShuf) 746 return false; 747 748 // The original scalar pattern is: 749 // binop i1 (cmp Pred (ext X, Index0), C0), (cmp Pred (ext X, Index1), C1) 750 CmpInst::Predicate Pred = P0; 751 unsigned CmpOpcode = CmpInst::isFPPredicate(Pred) ? Instruction::FCmp 752 : Instruction::ICmp; 753 auto *VecTy = dyn_cast<FixedVectorType>(X->getType()); 754 if (!VecTy) 755 return false; 756 757 InstructionCost OldCost = 758 TTI.getVectorInstrCost(Ext0->getOpcode(), VecTy, Index0); 759 OldCost += TTI.getVectorInstrCost(Ext1->getOpcode(), VecTy, Index1); 760 OldCost += 761 TTI.getCmpSelInstrCost(CmpOpcode, I0->getType(), 762 CmpInst::makeCmpResultType(I0->getType()), Pred) * 763 2; 764 OldCost += TTI.getArithmeticInstrCost(I.getOpcode(), I.getType()); 765 766 // The proposed vector pattern is: 767 // vcmp = cmp Pred X, VecC 768 // ext (binop vNi1 vcmp, (shuffle vcmp, Index1)), Index0 769 int CheapIndex = ConvertToShuf == Ext0 ? Index1 : Index0; 770 int ExpensiveIndex = ConvertToShuf == Ext0 ? Index0 : Index1; 771 auto *CmpTy = cast<FixedVectorType>(CmpInst::makeCmpResultType(X->getType())); 772 InstructionCost NewCost = TTI.getCmpSelInstrCost( 773 CmpOpcode, X->getType(), CmpInst::makeCmpResultType(X->getType()), Pred); 774 SmallVector<int, 32> ShufMask(VecTy->getNumElements(), UndefMaskElem); 775 ShufMask[CheapIndex] = ExpensiveIndex; 776 NewCost += TTI.getShuffleCost(TargetTransformInfo::SK_PermuteSingleSrc, CmpTy, 777 ShufMask); 778 NewCost += TTI.getArithmeticInstrCost(I.getOpcode(), CmpTy); 779 NewCost += TTI.getVectorInstrCost(Ext0->getOpcode(), CmpTy, CheapIndex); 780 781 // Aggressively form vector ops if the cost is equal because the transform 782 // may enable further optimization. 783 // Codegen can reverse this transform (scalarize) if it was not profitable. 784 if (OldCost < NewCost || !NewCost.isValid()) 785 return false; 786 787 // Create a vector constant from the 2 scalar constants. 788 SmallVector<Constant *, 32> CmpC(VecTy->getNumElements(), 789 UndefValue::get(VecTy->getElementType())); 790 CmpC[Index0] = C0; 791 CmpC[Index1] = C1; 792 Value *VCmp = Builder.CreateCmp(Pred, X, ConstantVector::get(CmpC)); 793 794 Value *Shuf = createShiftShuffle(VCmp, ExpensiveIndex, CheapIndex, Builder); 795 Value *VecLogic = Builder.CreateBinOp(cast<BinaryOperator>(I).getOpcode(), 796 VCmp, Shuf); 797 Value *NewExt = Builder.CreateExtractElement(VecLogic, CheapIndex); 798 replaceValue(I, *NewExt); 799 ++NumVecCmpBO; 800 return true; 801 } 802 803 // Check if memory loc modified between two instrs in the same BB 804 static bool isMemModifiedBetween(BasicBlock::iterator Begin, 805 BasicBlock::iterator End, 806 const MemoryLocation &Loc, AAResults &AA) { 807 unsigned NumScanned = 0; 808 return std::any_of(Begin, End, [&](const Instruction &Instr) { 809 return isModSet(AA.getModRefInfo(&Instr, Loc)) || 810 ++NumScanned > MaxInstrsToScan; 811 }); 812 } 813 814 /// Helper class to indicate whether a vector index can be safely scalarized and 815 /// if a freeze needs to be inserted. 816 class ScalarizationResult { 817 enum class StatusTy { Unsafe, Safe, SafeWithFreeze }; 818 819 StatusTy Status; 820 Value *ToFreeze; 821 822 ScalarizationResult(StatusTy Status, Value *ToFreeze = nullptr) 823 : Status(Status), ToFreeze(ToFreeze) {} 824 825 public: 826 ScalarizationResult(const ScalarizationResult &Other) = default; 827 ~ScalarizationResult() { 828 assert(!ToFreeze && "freeze() not called with ToFreeze being set"); 829 } 830 831 static ScalarizationResult unsafe() { return {StatusTy::Unsafe}; } 832 static ScalarizationResult safe() { return {StatusTy::Safe}; } 833 static ScalarizationResult safeWithFreeze(Value *ToFreeze) { 834 return {StatusTy::SafeWithFreeze, ToFreeze}; 835 } 836 837 /// Returns true if the index can be scalarize without requiring a freeze. 838 bool isSafe() const { return Status == StatusTy::Safe; } 839 /// Returns true if the index cannot be scalarized. 840 bool isUnsafe() const { return Status == StatusTy::Unsafe; } 841 /// Returns true if the index can be scalarize, but requires inserting a 842 /// freeze. 843 bool isSafeWithFreeze() const { return Status == StatusTy::SafeWithFreeze; } 844 845 /// Reset the state of Unsafe and clear ToFreze if set. 846 void discard() { 847 ToFreeze = nullptr; 848 Status = StatusTy::Unsafe; 849 } 850 851 /// Freeze the ToFreeze and update the use in \p User to use it. 852 void freeze(IRBuilder<> &Builder, Instruction &UserI) { 853 assert(isSafeWithFreeze() && 854 "should only be used when freezing is required"); 855 assert(is_contained(ToFreeze->users(), &UserI) && 856 "UserI must be a user of ToFreeze"); 857 IRBuilder<>::InsertPointGuard Guard(Builder); 858 Builder.SetInsertPoint(cast<Instruction>(&UserI)); 859 Value *Frozen = 860 Builder.CreateFreeze(ToFreeze, ToFreeze->getName() + ".frozen"); 861 for (Use &U : make_early_inc_range((UserI.operands()))) 862 if (U.get() == ToFreeze) 863 U.set(Frozen); 864 865 ToFreeze = nullptr; 866 } 867 }; 868 869 /// Check if it is legal to scalarize a memory access to \p VecTy at index \p 870 /// Idx. \p Idx must access a valid vector element. 871 static ScalarizationResult canScalarizeAccess(FixedVectorType *VecTy, 872 Value *Idx, Instruction *CtxI, 873 AssumptionCache &AC, 874 const DominatorTree &DT) { 875 if (auto *C = dyn_cast<ConstantInt>(Idx)) { 876 if (C->getValue().ult(VecTy->getNumElements())) 877 return ScalarizationResult::safe(); 878 return ScalarizationResult::unsafe(); 879 } 880 881 unsigned IntWidth = Idx->getType()->getScalarSizeInBits(); 882 APInt Zero(IntWidth, 0); 883 APInt MaxElts(IntWidth, VecTy->getNumElements()); 884 ConstantRange ValidIndices(Zero, MaxElts); 885 ConstantRange IdxRange(IntWidth, true); 886 887 if (isGuaranteedNotToBePoison(Idx, &AC)) { 888 if (ValidIndices.contains(computeConstantRange(Idx, /* ForSigned */ false, 889 true, &AC, CtxI, &DT))) 890 return ScalarizationResult::safe(); 891 return ScalarizationResult::unsafe(); 892 } 893 894 // If the index may be poison, check if we can insert a freeze before the 895 // range of the index is restricted. 896 Value *IdxBase; 897 ConstantInt *CI; 898 if (match(Idx, m_And(m_Value(IdxBase), m_ConstantInt(CI)))) { 899 IdxRange = IdxRange.binaryAnd(CI->getValue()); 900 } else if (match(Idx, m_URem(m_Value(IdxBase), m_ConstantInt(CI)))) { 901 IdxRange = IdxRange.urem(CI->getValue()); 902 } 903 904 if (ValidIndices.contains(IdxRange)) 905 return ScalarizationResult::safeWithFreeze(IdxBase); 906 return ScalarizationResult::unsafe(); 907 } 908 909 /// The memory operation on a vector of \p ScalarType had alignment of 910 /// \p VectorAlignment. Compute the maximal, but conservatively correct, 911 /// alignment that will be valid for the memory operation on a single scalar 912 /// element of the same type with index \p Idx. 913 static Align computeAlignmentAfterScalarization(Align VectorAlignment, 914 Type *ScalarType, Value *Idx, 915 const DataLayout &DL) { 916 if (auto *C = dyn_cast<ConstantInt>(Idx)) 917 return commonAlignment(VectorAlignment, 918 C->getZExtValue() * DL.getTypeStoreSize(ScalarType)); 919 return commonAlignment(VectorAlignment, DL.getTypeStoreSize(ScalarType)); 920 } 921 922 // Combine patterns like: 923 // %0 = load <4 x i32>, <4 x i32>* %a 924 // %1 = insertelement <4 x i32> %0, i32 %b, i32 1 925 // store <4 x i32> %1, <4 x i32>* %a 926 // to: 927 // %0 = bitcast <4 x i32>* %a to i32* 928 // %1 = getelementptr inbounds i32, i32* %0, i64 0, i64 1 929 // store i32 %b, i32* %1 930 bool VectorCombine::foldSingleElementStore(Instruction &I) { 931 StoreInst *SI = dyn_cast<StoreInst>(&I); 932 if (!SI || !SI->isSimple() || 933 !isa<FixedVectorType>(SI->getValueOperand()->getType())) 934 return false; 935 936 // TODO: Combine more complicated patterns (multiple insert) by referencing 937 // TargetTransformInfo. 938 Instruction *Source; 939 Value *NewElement; 940 Value *Idx; 941 if (!match(SI->getValueOperand(), 942 m_InsertElt(m_Instruction(Source), m_Value(NewElement), 943 m_Value(Idx)))) 944 return false; 945 946 if (auto *Load = dyn_cast<LoadInst>(Source)) { 947 auto VecTy = cast<FixedVectorType>(SI->getValueOperand()->getType()); 948 const DataLayout &DL = I.getModule()->getDataLayout(); 949 Value *SrcAddr = Load->getPointerOperand()->stripPointerCasts(); 950 // Don't optimize for atomic/volatile load or store. Ensure memory is not 951 // modified between, vector type matches store size, and index is inbounds. 952 if (!Load->isSimple() || Load->getParent() != SI->getParent() || 953 !DL.typeSizeEqualsStoreSize(Load->getType()) || 954 SrcAddr != SI->getPointerOperand()->stripPointerCasts()) 955 return false; 956 957 auto ScalarizableIdx = canScalarizeAccess(VecTy, Idx, Load, AC, DT); 958 if (ScalarizableIdx.isUnsafe() || 959 isMemModifiedBetween(Load->getIterator(), SI->getIterator(), 960 MemoryLocation::get(SI), AA)) 961 return false; 962 963 if (ScalarizableIdx.isSafeWithFreeze()) 964 ScalarizableIdx.freeze(Builder, *cast<Instruction>(Idx)); 965 Value *GEP = Builder.CreateInBoundsGEP( 966 SI->getValueOperand()->getType(), SI->getPointerOperand(), 967 {ConstantInt::get(Idx->getType(), 0), Idx}); 968 StoreInst *NSI = Builder.CreateStore(NewElement, GEP); 969 NSI->copyMetadata(*SI); 970 Align ScalarOpAlignment = computeAlignmentAfterScalarization( 971 std::max(SI->getAlign(), Load->getAlign()), NewElement->getType(), Idx, 972 DL); 973 NSI->setAlignment(ScalarOpAlignment); 974 replaceValue(I, *NSI); 975 eraseInstruction(I); 976 return true; 977 } 978 979 return false; 980 } 981 982 /// Try to scalarize vector loads feeding extractelement instructions. 983 bool VectorCombine::scalarizeLoadExtract(Instruction &I) { 984 Value *Ptr; 985 if (!match(&I, m_Load(m_Value(Ptr)))) 986 return false; 987 988 auto *LI = cast<LoadInst>(&I); 989 const DataLayout &DL = I.getModule()->getDataLayout(); 990 if (LI->isVolatile() || !DL.typeSizeEqualsStoreSize(LI->getType())) 991 return false; 992 993 auto *FixedVT = dyn_cast<FixedVectorType>(LI->getType()); 994 if (!FixedVT) 995 return false; 996 997 InstructionCost OriginalCost = 998 TTI.getMemoryOpCost(Instruction::Load, LI->getType(), LI->getAlign(), 999 LI->getPointerAddressSpace()); 1000 InstructionCost ScalarizedCost = 0; 1001 1002 Instruction *LastCheckedInst = LI; 1003 unsigned NumInstChecked = 0; 1004 // Check if all users of the load are extracts with no memory modifications 1005 // between the load and the extract. Compute the cost of both the original 1006 // code and the scalarized version. 1007 for (User *U : LI->users()) { 1008 auto *UI = dyn_cast<ExtractElementInst>(U); 1009 if (!UI || UI->getParent() != LI->getParent()) 1010 return false; 1011 1012 if (!isGuaranteedNotToBePoison(UI->getOperand(1), &AC, LI, &DT)) 1013 return false; 1014 1015 // Check if any instruction between the load and the extract may modify 1016 // memory. 1017 if (LastCheckedInst->comesBefore(UI)) { 1018 for (Instruction &I : 1019 make_range(std::next(LI->getIterator()), UI->getIterator())) { 1020 // Bail out if we reached the check limit or the instruction may write 1021 // to memory. 1022 if (NumInstChecked == MaxInstrsToScan || I.mayWriteToMemory()) 1023 return false; 1024 NumInstChecked++; 1025 } 1026 LastCheckedInst = UI; 1027 } 1028 1029 auto ScalarIdx = canScalarizeAccess(FixedVT, UI->getOperand(1), &I, AC, DT); 1030 if (!ScalarIdx.isSafe()) { 1031 // TODO: Freeze index if it is safe to do so. 1032 ScalarIdx.discard(); 1033 return false; 1034 } 1035 1036 auto *Index = dyn_cast<ConstantInt>(UI->getOperand(1)); 1037 OriginalCost += 1038 TTI.getVectorInstrCost(Instruction::ExtractElement, LI->getType(), 1039 Index ? Index->getZExtValue() : -1); 1040 ScalarizedCost += 1041 TTI.getMemoryOpCost(Instruction::Load, FixedVT->getElementType(), 1042 Align(1), LI->getPointerAddressSpace()); 1043 ScalarizedCost += TTI.getAddressComputationCost(FixedVT->getElementType()); 1044 } 1045 1046 if (ScalarizedCost >= OriginalCost) 1047 return false; 1048 1049 // Replace extracts with narrow scalar loads. 1050 for (User *U : LI->users()) { 1051 auto *EI = cast<ExtractElementInst>(U); 1052 Builder.SetInsertPoint(EI); 1053 1054 Value *Idx = EI->getOperand(1); 1055 Value *GEP = 1056 Builder.CreateInBoundsGEP(FixedVT, Ptr, {Builder.getInt32(0), Idx}); 1057 auto *NewLoad = cast<LoadInst>(Builder.CreateLoad( 1058 FixedVT->getElementType(), GEP, EI->getName() + ".scalar")); 1059 1060 Align ScalarOpAlignment = computeAlignmentAfterScalarization( 1061 LI->getAlign(), FixedVT->getElementType(), Idx, DL); 1062 NewLoad->setAlignment(ScalarOpAlignment); 1063 1064 replaceValue(*EI, *NewLoad); 1065 } 1066 1067 return true; 1068 } 1069 1070 /// Try to convert "shuffle (binop), (binop)" with a shared binop operand into 1071 /// "binop (shuffle), (shuffle)". 1072 bool VectorCombine::foldShuffleOfBinops(Instruction &I) { 1073 auto *VecTy = dyn_cast<FixedVectorType>(I.getType()); 1074 if (!VecTy) 1075 return false; 1076 1077 BinaryOperator *B0, *B1; 1078 ArrayRef<int> Mask; 1079 if (!match(&I, m_Shuffle(m_OneUse(m_BinOp(B0)), m_OneUse(m_BinOp(B1)), 1080 m_Mask(Mask))) || 1081 B0->getOpcode() != B1->getOpcode() || B0->getType() != VecTy) 1082 return false; 1083 1084 // Try to replace a binop with a shuffle if the shuffle is not costly. 1085 // The new shuffle will choose from a single, common operand, so it may be 1086 // cheaper than the existing two-operand shuffle. 1087 SmallVector<int> UnaryMask = createUnaryMask(Mask, Mask.size()); 1088 Instruction::BinaryOps Opcode = B0->getOpcode(); 1089 InstructionCost BinopCost = TTI.getArithmeticInstrCost(Opcode, VecTy); 1090 InstructionCost ShufCost = TTI.getShuffleCost( 1091 TargetTransformInfo::SK_PermuteSingleSrc, VecTy, UnaryMask); 1092 if (ShufCost > BinopCost) 1093 return false; 1094 1095 // If we have something like "add X, Y" and "add Z, X", swap ops to match. 1096 Value *X = B0->getOperand(0), *Y = B0->getOperand(1); 1097 Value *Z = B1->getOperand(0), *W = B1->getOperand(1); 1098 if (BinaryOperator::isCommutative(Opcode) && X != Z && Y != W) 1099 std::swap(X, Y); 1100 1101 Value *Shuf0, *Shuf1; 1102 if (X == Z) { 1103 // shuf (bo X, Y), (bo X, W) --> bo (shuf X), (shuf Y, W) 1104 Shuf0 = Builder.CreateShuffleVector(X, UnaryMask); 1105 Shuf1 = Builder.CreateShuffleVector(Y, W, Mask); 1106 } else if (Y == W) { 1107 // shuf (bo X, Y), (bo Z, Y) --> bo (shuf X, Z), (shuf Y) 1108 Shuf0 = Builder.CreateShuffleVector(X, Z, Mask); 1109 Shuf1 = Builder.CreateShuffleVector(Y, UnaryMask); 1110 } else { 1111 return false; 1112 } 1113 1114 Value *NewBO = Builder.CreateBinOp(Opcode, Shuf0, Shuf1); 1115 // Intersect flags from the old binops. 1116 if (auto *NewInst = dyn_cast<Instruction>(NewBO)) { 1117 NewInst->copyIRFlags(B0); 1118 NewInst->andIRFlags(B1); 1119 } 1120 replaceValue(I, *NewBO); 1121 return true; 1122 } 1123 1124 /// Given a commutative reduction, the order of the input lanes does not alter 1125 /// the results. We can use this to remove certain shuffles feeding the 1126 /// reduction, removing the need to shuffle at all. 1127 bool VectorCombine::foldShuffleFromReductions(Instruction &I) { 1128 auto *II = dyn_cast<IntrinsicInst>(&I); 1129 if (!II) 1130 return false; 1131 switch (II->getIntrinsicID()) { 1132 case Intrinsic::vector_reduce_add: 1133 case Intrinsic::vector_reduce_mul: 1134 case Intrinsic::vector_reduce_and: 1135 case Intrinsic::vector_reduce_or: 1136 case Intrinsic::vector_reduce_xor: 1137 case Intrinsic::vector_reduce_smin: 1138 case Intrinsic::vector_reduce_smax: 1139 case Intrinsic::vector_reduce_umin: 1140 case Intrinsic::vector_reduce_umax: 1141 break; 1142 default: 1143 return false; 1144 } 1145 1146 // Find all the inputs when looking through operations that do not alter the 1147 // lane order (binops, for example). Currently we look for a single shuffle, 1148 // and can ignore splat values. 1149 std::queue<Value *> Worklist; 1150 SmallPtrSet<Value *, 4> Visited; 1151 ShuffleVectorInst *Shuffle = nullptr; 1152 if (auto *Op = dyn_cast<Instruction>(I.getOperand(0))) 1153 Worklist.push(Op); 1154 1155 while (!Worklist.empty()) { 1156 Value *CV = Worklist.front(); 1157 Worklist.pop(); 1158 if (Visited.contains(CV)) 1159 continue; 1160 1161 // Splats don't change the order, so can be safely ignored. 1162 if (isSplatValue(CV)) 1163 continue; 1164 1165 Visited.insert(CV); 1166 1167 if (auto *CI = dyn_cast<Instruction>(CV)) { 1168 if (CI->isBinaryOp()) { 1169 for (auto *Op : CI->operand_values()) 1170 Worklist.push(Op); 1171 continue; 1172 } else if (auto *SV = dyn_cast<ShuffleVectorInst>(CI)) { 1173 if (Shuffle && Shuffle != SV) 1174 return false; 1175 Shuffle = SV; 1176 continue; 1177 } 1178 } 1179 1180 // Anything else is currently an unknown node. 1181 return false; 1182 } 1183 1184 if (!Shuffle) 1185 return false; 1186 1187 // Check all uses of the binary ops and shuffles are also included in the 1188 // lane-invariant operations (Visited should be the list of lanewise 1189 // instructions, including the shuffle that we found). 1190 for (auto *V : Visited) 1191 for (auto *U : V->users()) 1192 if (!Visited.contains(U) && U != &I) 1193 return false; 1194 1195 FixedVectorType *VecType = 1196 dyn_cast<FixedVectorType>(II->getOperand(0)->getType()); 1197 if (!VecType) 1198 return false; 1199 FixedVectorType *ShuffleInputType = 1200 dyn_cast<FixedVectorType>(Shuffle->getOperand(0)->getType()); 1201 if (!ShuffleInputType) 1202 return false; 1203 int NumInputElts = ShuffleInputType->getNumElements(); 1204 1205 // Find the mask from sorting the lanes into order. This is most likely to 1206 // become a identity or concat mask. Undef elements are pushed to the end. 1207 SmallVector<int> ConcatMask; 1208 Shuffle->getShuffleMask(ConcatMask); 1209 sort(ConcatMask, [](int X, int Y) { return (unsigned)X < (unsigned)Y; }); 1210 bool UsesSecondVec = 1211 any_of(ConcatMask, [&](int M) { return M >= NumInputElts; }); 1212 InstructionCost OldCost = TTI.getShuffleCost( 1213 UsesSecondVec ? TTI::SK_PermuteTwoSrc : TTI::SK_PermuteSingleSrc, VecType, 1214 Shuffle->getShuffleMask()); 1215 InstructionCost NewCost = TTI.getShuffleCost( 1216 UsesSecondVec ? TTI::SK_PermuteTwoSrc : TTI::SK_PermuteSingleSrc, VecType, 1217 ConcatMask); 1218 1219 LLVM_DEBUG(dbgs() << "Found a reduction feeding from a shuffle: " << *Shuffle 1220 << "\n"); 1221 LLVM_DEBUG(dbgs() << " OldCost: " << OldCost << " vs NewCost: " << NewCost 1222 << "\n"); 1223 if (NewCost < OldCost) { 1224 Builder.SetInsertPoint(Shuffle); 1225 Value *NewShuffle = Builder.CreateShuffleVector( 1226 Shuffle->getOperand(0), Shuffle->getOperand(1), ConcatMask); 1227 LLVM_DEBUG(dbgs() << "Created new shuffle: " << *NewShuffle << "\n"); 1228 replaceValue(*Shuffle, *NewShuffle); 1229 } 1230 1231 // See if we can re-use foldSelectShuffle, getting it to reduce the size of 1232 // the shuffle into a nicer order, as it can ignore the order of the shuffles. 1233 return foldSelectShuffle(*Shuffle, true); 1234 } 1235 1236 /// This method looks for groups of shuffles acting on binops, of the form: 1237 /// %x = shuffle ... 1238 /// %y = shuffle ... 1239 /// %a = binop %x, %y 1240 /// %b = binop %x, %y 1241 /// shuffle %a, %b, selectmask 1242 /// We may, especially if the shuffle is wider than legal, be able to convert 1243 /// the shuffle to a form where only parts of a and b need to be computed. On 1244 /// architectures with no obvious "select" shuffle, this can reduce the total 1245 /// number of operations if the target reports them as cheaper. 1246 bool VectorCombine::foldSelectShuffle(Instruction &I, bool FromReduction) { 1247 auto *SVI = dyn_cast<ShuffleVectorInst>(&I); 1248 auto *VT = dyn_cast<FixedVectorType>(I.getType()); 1249 if (!SVI || !VT) 1250 return false; 1251 auto *Op0 = dyn_cast<Instruction>(SVI->getOperand(0)); 1252 auto *Op1 = dyn_cast<Instruction>(SVI->getOperand(1)); 1253 if (!Op0 || !Op1 || Op0 == Op1 || !Op0->isBinaryOp() || !Op1->isBinaryOp() || 1254 VT != Op0->getType()) 1255 return false; 1256 auto *SVI0A = dyn_cast<Instruction>(Op0->getOperand(0)); 1257 auto *SVI0B = dyn_cast<Instruction>(Op0->getOperand(1)); 1258 auto *SVI1A = dyn_cast<Instruction>(Op1->getOperand(0)); 1259 auto *SVI1B = dyn_cast<Instruction>(Op1->getOperand(1)); 1260 SmallPtrSet<Instruction *, 4> InputShuffles({SVI0A, SVI0B, SVI1A, SVI1B}); 1261 auto checkSVNonOpUses = [&](Instruction *I) { 1262 if (!I || I->getOperand(0)->getType() != VT) 1263 return true; 1264 return any_of(I->users(), [&](User *U) { 1265 return U != Op0 && U != Op1 && 1266 !(isa<ShuffleVectorInst>(U) && 1267 (InputShuffles.contains(cast<Instruction>(U)) || 1268 isInstructionTriviallyDead(cast<Instruction>(U)))); 1269 }); 1270 }; 1271 if (checkSVNonOpUses(SVI0A) || checkSVNonOpUses(SVI0B) || 1272 checkSVNonOpUses(SVI1A) || checkSVNonOpUses(SVI1B)) 1273 return false; 1274 1275 // Collect all the uses that are shuffles that we can transform together. We 1276 // may not have a single shuffle, but a group that can all be transformed 1277 // together profitably. 1278 SmallVector<ShuffleVectorInst *> Shuffles; 1279 auto collectShuffles = [&](Instruction *I) { 1280 for (auto *U : I->users()) { 1281 auto *SV = dyn_cast<ShuffleVectorInst>(U); 1282 if (!SV || SV->getType() != VT) 1283 return false; 1284 if ((SV->getOperand(0) != Op0 && SV->getOperand(0) != Op1) || 1285 (SV->getOperand(1) != Op0 && SV->getOperand(1) != Op1)) 1286 return false; 1287 if (!llvm::is_contained(Shuffles, SV)) 1288 Shuffles.push_back(SV); 1289 } 1290 return true; 1291 }; 1292 if (!collectShuffles(Op0) || !collectShuffles(Op1)) 1293 return false; 1294 // From a reduction, we need to be processing a single shuffle, otherwise the 1295 // other uses will not be lane-invariant. 1296 if (FromReduction && Shuffles.size() > 1) 1297 return false; 1298 1299 // Add any shuffle uses for the shuffles we have found, to include them in our 1300 // cost calculations. 1301 if (!FromReduction) { 1302 for (ShuffleVectorInst *SV : Shuffles) { 1303 for (auto U : SV->users()) { 1304 ShuffleVectorInst *SSV = dyn_cast<ShuffleVectorInst>(U); 1305 if (SSV && isa<UndefValue>(SSV->getOperand(1)) && SSV->getType() == VT) 1306 Shuffles.push_back(SSV); 1307 } 1308 } 1309 } 1310 1311 // For each of the output shuffles, we try to sort all the first vector 1312 // elements to the beginning, followed by the second array elements at the 1313 // end. If the binops are legalized to smaller vectors, this may reduce total 1314 // number of binops. We compute the ReconstructMask mask needed to convert 1315 // back to the original lane order. 1316 SmallVector<std::pair<int, int>> V1, V2; 1317 SmallVector<SmallVector<int>> OrigReconstructMasks; 1318 int MaxV1Elt = 0, MaxV2Elt = 0; 1319 unsigned NumElts = VT->getNumElements(); 1320 for (ShuffleVectorInst *SVN : Shuffles) { 1321 SmallVector<int> Mask; 1322 SVN->getShuffleMask(Mask); 1323 1324 // Check the operands are the same as the original, or reversed (in which 1325 // case we need to commute the mask). 1326 Value *SVOp0 = SVN->getOperand(0); 1327 Value *SVOp1 = SVN->getOperand(1); 1328 if (isa<UndefValue>(SVOp1)) { 1329 auto *SSV = cast<ShuffleVectorInst>(SVOp0); 1330 SVOp0 = SSV->getOperand(0); 1331 SVOp1 = SSV->getOperand(1); 1332 for (unsigned I = 0, E = Mask.size(); I != E; I++) { 1333 if (Mask[I] >= static_cast<int>(SSV->getShuffleMask().size())) 1334 return false; 1335 Mask[I] = Mask[I] < 0 ? Mask[I] : SSV->getMaskValue(Mask[I]); 1336 } 1337 } 1338 if (SVOp0 == Op1 && SVOp1 == Op0) { 1339 std::swap(SVOp0, SVOp1); 1340 ShuffleVectorInst::commuteShuffleMask(Mask, NumElts); 1341 } 1342 if (SVOp0 != Op0 || SVOp1 != Op1) 1343 return false; 1344 1345 // Calculate the reconstruction mask for this shuffle, as the mask needed to 1346 // take the packed values from Op0/Op1 and reconstructing to the original 1347 // order. 1348 SmallVector<int> ReconstructMask; 1349 for (unsigned I = 0; I < Mask.size(); I++) { 1350 if (Mask[I] < 0) { 1351 ReconstructMask.push_back(-1); 1352 } else if (Mask[I] < static_cast<int>(NumElts)) { 1353 MaxV1Elt = std::max(MaxV1Elt, Mask[I]); 1354 auto It = find_if(V1, [&](const std::pair<int, int> &A) { 1355 return Mask[I] == A.first; 1356 }); 1357 if (It != V1.end()) 1358 ReconstructMask.push_back(It - V1.begin()); 1359 else { 1360 ReconstructMask.push_back(V1.size()); 1361 V1.emplace_back(Mask[I], V1.size()); 1362 } 1363 } else { 1364 MaxV2Elt = std::max<int>(MaxV2Elt, Mask[I] - NumElts); 1365 auto It = find_if(V2, [&](const std::pair<int, int> &A) { 1366 return Mask[I] - static_cast<int>(NumElts) == A.first; 1367 }); 1368 if (It != V2.end()) 1369 ReconstructMask.push_back(NumElts + It - V2.begin()); 1370 else { 1371 ReconstructMask.push_back(NumElts + V2.size()); 1372 V2.emplace_back(Mask[I] - NumElts, NumElts + V2.size()); 1373 } 1374 } 1375 } 1376 1377 // For reductions, we know that the lane ordering out doesn't alter the 1378 // result. In-order can help simplify the shuffle away. 1379 if (FromReduction) 1380 sort(ReconstructMask); 1381 OrigReconstructMasks.push_back(std::move(ReconstructMask)); 1382 } 1383 1384 // If the Maximum element used from V1 and V2 are not larger than the new 1385 // vectors, the vectors are already packes and performing the optimization 1386 // again will likely not help any further. This also prevents us from getting 1387 // stuck in a cycle in case the costs do not also rule it out. 1388 if (V1.empty() || V2.empty() || 1389 (MaxV1Elt == static_cast<int>(V1.size()) - 1 && 1390 MaxV2Elt == static_cast<int>(V2.size()) - 1)) 1391 return false; 1392 1393 // GetBaseMaskValue takes one of the inputs, which may either be a shuffle, a 1394 // shuffle of another shuffle, or not a shuffle (that is treated like a 1395 // identity shuffle). 1396 auto GetBaseMaskValue = [&](Instruction *I, int M) { 1397 auto *SV = dyn_cast<ShuffleVectorInst>(I); 1398 if (!SV) 1399 return M; 1400 if (isa<UndefValue>(SV->getOperand(1))) 1401 if (auto *SSV = dyn_cast<ShuffleVectorInst>(SV->getOperand(0))) 1402 if (InputShuffles.contains(SSV)) 1403 return SSV->getMaskValue(SV->getMaskValue(M)); 1404 return SV->getMaskValue(M); 1405 }; 1406 1407 // Attempt to sort the inputs my ascending mask values to make simpler input 1408 // shuffles and push complex shuffles down to the uses. We sort on the first 1409 // of the two input shuffle orders, to try and get at least one input into a 1410 // nice order. 1411 auto SortBase = [&](Instruction *A, std::pair<int, int> X, 1412 std::pair<int, int> Y) { 1413 int MXA = GetBaseMaskValue(A, X.first); 1414 int MYA = GetBaseMaskValue(A, Y.first); 1415 return MXA < MYA; 1416 }; 1417 stable_sort(V1, [&](std::pair<int, int> A, std::pair<int, int> B) { 1418 return SortBase(SVI0A, A, B); 1419 }); 1420 stable_sort(V2, [&](std::pair<int, int> A, std::pair<int, int> B) { 1421 return SortBase(SVI1A, A, B); 1422 }); 1423 // Calculate our ReconstructMasks from the OrigReconstructMasks and the 1424 // modified order of the input shuffles. 1425 SmallVector<SmallVector<int>> ReconstructMasks; 1426 for (auto Mask : OrigReconstructMasks) { 1427 SmallVector<int> ReconstructMask; 1428 for (int M : Mask) { 1429 auto FindIndex = [](const SmallVector<std::pair<int, int>> &V, int M) { 1430 auto It = find_if(V, [M](auto A) { return A.second == M; }); 1431 assert(It != V.end() && "Expected all entries in Mask"); 1432 return std::distance(V.begin(), It); 1433 }; 1434 if (M < 0) 1435 ReconstructMask.push_back(-1); 1436 else if (M < static_cast<int>(NumElts)) { 1437 ReconstructMask.push_back(FindIndex(V1, M)); 1438 } else { 1439 ReconstructMask.push_back(NumElts + FindIndex(V2, M)); 1440 } 1441 } 1442 ReconstructMasks.push_back(std::move(ReconstructMask)); 1443 } 1444 1445 // Calculate the masks needed for the new input shuffles, which get padded 1446 // with undef 1447 SmallVector<int> V1A, V1B, V2A, V2B; 1448 for (unsigned I = 0; I < V1.size(); I++) { 1449 V1A.push_back(GetBaseMaskValue(SVI0A, V1[I].first)); 1450 V1B.push_back(GetBaseMaskValue(SVI0B, V1[I].first)); 1451 } 1452 for (unsigned I = 0; I < V2.size(); I++) { 1453 V2A.push_back(GetBaseMaskValue(SVI1A, V2[I].first)); 1454 V2B.push_back(GetBaseMaskValue(SVI1B, V2[I].first)); 1455 } 1456 while (V1A.size() < NumElts) { 1457 V1A.push_back(UndefMaskElem); 1458 V1B.push_back(UndefMaskElem); 1459 } 1460 while (V2A.size() < NumElts) { 1461 V2A.push_back(UndefMaskElem); 1462 V2B.push_back(UndefMaskElem); 1463 } 1464 1465 auto AddShuffleCost = [&](InstructionCost C, Instruction *I) { 1466 auto *SV = dyn_cast<ShuffleVectorInst>(I); 1467 if (!SV) 1468 return C; 1469 return C + TTI.getShuffleCost(isa<UndefValue>(SV->getOperand(1)) 1470 ? TTI::SK_PermuteSingleSrc 1471 : TTI::SK_PermuteTwoSrc, 1472 VT, SV->getShuffleMask()); 1473 }; 1474 auto AddShuffleMaskCost = [&](InstructionCost C, ArrayRef<int> Mask) { 1475 return C + TTI.getShuffleCost(TTI::SK_PermuteTwoSrc, VT, Mask); 1476 }; 1477 1478 // Get the costs of the shuffles + binops before and after with the new 1479 // shuffle masks. 1480 InstructionCost CostBefore = 1481 TTI.getArithmeticInstrCost(Op0->getOpcode(), VT) + 1482 TTI.getArithmeticInstrCost(Op1->getOpcode(), VT); 1483 CostBefore += std::accumulate(Shuffles.begin(), Shuffles.end(), 1484 InstructionCost(0), AddShuffleCost); 1485 CostBefore += std::accumulate(InputShuffles.begin(), InputShuffles.end(), 1486 InstructionCost(0), AddShuffleCost); 1487 1488 // The new binops will be unused for lanes past the used shuffle lengths. 1489 // These types attempt to get the correct cost for that from the target. 1490 FixedVectorType *Op0SmallVT = 1491 FixedVectorType::get(VT->getScalarType(), V1.size()); 1492 FixedVectorType *Op1SmallVT = 1493 FixedVectorType::get(VT->getScalarType(), V2.size()); 1494 InstructionCost CostAfter = 1495 TTI.getArithmeticInstrCost(Op0->getOpcode(), Op0SmallVT) + 1496 TTI.getArithmeticInstrCost(Op1->getOpcode(), Op1SmallVT); 1497 CostAfter += std::accumulate(ReconstructMasks.begin(), ReconstructMasks.end(), 1498 InstructionCost(0), AddShuffleMaskCost); 1499 std::set<SmallVector<int>> OutputShuffleMasks({V1A, V1B, V2A, V2B}); 1500 CostAfter += 1501 std::accumulate(OutputShuffleMasks.begin(), OutputShuffleMasks.end(), 1502 InstructionCost(0), AddShuffleMaskCost); 1503 1504 LLVM_DEBUG(dbgs() << "Found a binop select shuffle pattern: " << I << "\n"); 1505 LLVM_DEBUG(dbgs() << " CostBefore: " << CostBefore 1506 << " vs CostAfter: " << CostAfter << "\n"); 1507 if (CostBefore <= CostAfter) 1508 return false; 1509 1510 // The cost model has passed, create the new instructions. 1511 auto GetShuffleOperand = [&](Instruction *I, unsigned Op) -> Value * { 1512 auto *SV = dyn_cast<ShuffleVectorInst>(I); 1513 if (!SV) 1514 return I; 1515 if (isa<UndefValue>(SV->getOperand(1))) 1516 if (auto *SSV = dyn_cast<ShuffleVectorInst>(SV->getOperand(0))) 1517 if (InputShuffles.contains(SSV)) 1518 return SSV->getOperand(Op); 1519 return SV->getOperand(Op); 1520 }; 1521 Builder.SetInsertPoint(SVI0A->getNextNode()); 1522 Value *NSV0A = Builder.CreateShuffleVector(GetShuffleOperand(SVI0A, 0), 1523 GetShuffleOperand(SVI0A, 1), V1A); 1524 Builder.SetInsertPoint(SVI0B->getNextNode()); 1525 Value *NSV0B = Builder.CreateShuffleVector(GetShuffleOperand(SVI0B, 0), 1526 GetShuffleOperand(SVI0B, 1), V1B); 1527 Builder.SetInsertPoint(SVI1A->getNextNode()); 1528 Value *NSV1A = Builder.CreateShuffleVector(GetShuffleOperand(SVI1A, 0), 1529 GetShuffleOperand(SVI1A, 1), V2A); 1530 Builder.SetInsertPoint(SVI1B->getNextNode()); 1531 Value *NSV1B = Builder.CreateShuffleVector(GetShuffleOperand(SVI1B, 0), 1532 GetShuffleOperand(SVI1B, 1), V2B); 1533 Builder.SetInsertPoint(Op0); 1534 Value *NOp0 = Builder.CreateBinOp((Instruction::BinaryOps)Op0->getOpcode(), 1535 NSV0A, NSV0B); 1536 if (auto *I = dyn_cast<Instruction>(NOp0)) 1537 I->copyIRFlags(Op0, true); 1538 Builder.SetInsertPoint(Op1); 1539 Value *NOp1 = Builder.CreateBinOp((Instruction::BinaryOps)Op1->getOpcode(), 1540 NSV1A, NSV1B); 1541 if (auto *I = dyn_cast<Instruction>(NOp1)) 1542 I->copyIRFlags(Op1, true); 1543 1544 for (int S = 0, E = ReconstructMasks.size(); S != E; S++) { 1545 Builder.SetInsertPoint(Shuffles[S]); 1546 Value *NSV = Builder.CreateShuffleVector(NOp0, NOp1, ReconstructMasks[S]); 1547 replaceValue(*Shuffles[S], *NSV); 1548 } 1549 1550 Worklist.pushValue(NSV0A); 1551 Worklist.pushValue(NSV0B); 1552 Worklist.pushValue(NSV1A); 1553 Worklist.pushValue(NSV1B); 1554 for (auto *S : Shuffles) 1555 Worklist.add(S); 1556 return true; 1557 } 1558 1559 /// This is the entry point for all transforms. Pass manager differences are 1560 /// handled in the callers of this function. 1561 bool VectorCombine::run() { 1562 if (DisableVectorCombine) 1563 return false; 1564 1565 // Don't attempt vectorization if the target does not support vectors. 1566 if (!TTI.getNumberOfRegisters(TTI.getRegisterClassForType(/*Vector*/ true))) 1567 return false; 1568 1569 bool MadeChange = false; 1570 auto FoldInst = [this, &MadeChange](Instruction &I) { 1571 Builder.SetInsertPoint(&I); 1572 if (!ScalarizationOnly) { 1573 MadeChange |= vectorizeLoadInsert(I); 1574 MadeChange |= foldExtractExtract(I); 1575 MadeChange |= foldBitcastShuf(I); 1576 MadeChange |= foldExtractedCmps(I); 1577 MadeChange |= foldShuffleOfBinops(I); 1578 MadeChange |= foldShuffleFromReductions(I); 1579 MadeChange |= foldSelectShuffle(I); 1580 } 1581 MadeChange |= scalarizeBinopOrCmp(I); 1582 MadeChange |= scalarizeLoadExtract(I); 1583 MadeChange |= foldSingleElementStore(I); 1584 }; 1585 for (BasicBlock &BB : F) { 1586 // Ignore unreachable basic blocks. 1587 if (!DT.isReachableFromEntry(&BB)) 1588 continue; 1589 // Use early increment range so that we can erase instructions in loop. 1590 for (Instruction &I : make_early_inc_range(BB)) { 1591 if (I.isDebugOrPseudoInst()) 1592 continue; 1593 FoldInst(I); 1594 } 1595 } 1596 1597 while (!Worklist.isEmpty()) { 1598 Instruction *I = Worklist.removeOne(); 1599 if (!I) 1600 continue; 1601 1602 if (isInstructionTriviallyDead(I)) { 1603 eraseInstruction(*I); 1604 continue; 1605 } 1606 1607 FoldInst(*I); 1608 } 1609 1610 return MadeChange; 1611 } 1612 1613 // Pass manager boilerplate below here. 1614 1615 namespace { 1616 class VectorCombineLegacyPass : public FunctionPass { 1617 public: 1618 static char ID; 1619 VectorCombineLegacyPass() : FunctionPass(ID) { 1620 initializeVectorCombineLegacyPassPass(*PassRegistry::getPassRegistry()); 1621 } 1622 1623 void getAnalysisUsage(AnalysisUsage &AU) const override { 1624 AU.addRequired<AssumptionCacheTracker>(); 1625 AU.addRequired<DominatorTreeWrapperPass>(); 1626 AU.addRequired<TargetTransformInfoWrapperPass>(); 1627 AU.addRequired<AAResultsWrapperPass>(); 1628 AU.setPreservesCFG(); 1629 AU.addPreserved<DominatorTreeWrapperPass>(); 1630 AU.addPreserved<GlobalsAAWrapperPass>(); 1631 AU.addPreserved<AAResultsWrapperPass>(); 1632 AU.addPreserved<BasicAAWrapperPass>(); 1633 FunctionPass::getAnalysisUsage(AU); 1634 } 1635 1636 bool runOnFunction(Function &F) override { 1637 if (skipFunction(F)) 1638 return false; 1639 auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 1640 auto &TTI = getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 1641 auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 1642 auto &AA = getAnalysis<AAResultsWrapperPass>().getAAResults(); 1643 VectorCombine Combiner(F, TTI, DT, AA, AC, false); 1644 return Combiner.run(); 1645 } 1646 }; 1647 } // namespace 1648 1649 char VectorCombineLegacyPass::ID = 0; 1650 INITIALIZE_PASS_BEGIN(VectorCombineLegacyPass, "vector-combine", 1651 "Optimize scalar/vector ops", false, 1652 false) 1653 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 1654 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 1655 INITIALIZE_PASS_END(VectorCombineLegacyPass, "vector-combine", 1656 "Optimize scalar/vector ops", false, false) 1657 Pass *llvm::createVectorCombinePass() { 1658 return new VectorCombineLegacyPass(); 1659 } 1660 1661 PreservedAnalyses VectorCombinePass::run(Function &F, 1662 FunctionAnalysisManager &FAM) { 1663 auto &AC = FAM.getResult<AssumptionAnalysis>(F); 1664 TargetTransformInfo &TTI = FAM.getResult<TargetIRAnalysis>(F); 1665 DominatorTree &DT = FAM.getResult<DominatorTreeAnalysis>(F); 1666 AAResults &AA = FAM.getResult<AAManager>(F); 1667 VectorCombine Combiner(F, TTI, DT, AA, AC, ScalarizationOnly); 1668 if (!Combiner.run()) 1669 return PreservedAnalyses::all(); 1670 PreservedAnalyses PA; 1671 PA.preserveSet<CFGAnalyses>(); 1672 return PA; 1673 } 1674