1 //===------- VectorCombine.cpp - Optimize partial vector operations -------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This pass optimizes scalar/vector interactions using target cost models. The
10 // transforms implemented here may not fit in traditional loop-based or SLP
11 // vectorization passes.
12 //
13 //===----------------------------------------------------------------------===//
14
15 #include "llvm/Transforms/Vectorize/VectorCombine.h"
16 #include "llvm/ADT/Statistic.h"
17 #include "llvm/Analysis/AssumptionCache.h"
18 #include "llvm/Analysis/BasicAliasAnalysis.h"
19 #include "llvm/Analysis/GlobalsModRef.h"
20 #include "llvm/Analysis/Loads.h"
21 #include "llvm/Analysis/TargetTransformInfo.h"
22 #include "llvm/Analysis/ValueTracking.h"
23 #include "llvm/Analysis/VectorUtils.h"
24 #include "llvm/IR/Dominators.h"
25 #include "llvm/IR/Function.h"
26 #include "llvm/IR/IRBuilder.h"
27 #include "llvm/IR/PatternMatch.h"
28 #include "llvm/InitializePasses.h"
29 #include "llvm/Pass.h"
30 #include "llvm/Support/CommandLine.h"
31 #include "llvm/Transforms/Utils/Local.h"
32 #include "llvm/Transforms/Vectorize.h"
33
34 #define DEBUG_TYPE "vector-combine"
35 #include "llvm/Transforms/Utils/InstructionWorklist.h"
36
37 using namespace llvm;
38 using namespace llvm::PatternMatch;
39
40 STATISTIC(NumVecLoad, "Number of vector loads formed");
41 STATISTIC(NumVecCmp, "Number of vector compares formed");
42 STATISTIC(NumVecBO, "Number of vector binops formed");
43 STATISTIC(NumVecCmpBO, "Number of vector compare + binop formed");
44 STATISTIC(NumShufOfBitcast, "Number of shuffles moved after bitcast");
45 STATISTIC(NumScalarBO, "Number of scalar binops formed");
46 STATISTIC(NumScalarCmp, "Number of scalar compares formed");
47
48 static cl::opt<bool> DisableVectorCombine(
49 "disable-vector-combine", cl::init(false), cl::Hidden,
50 cl::desc("Disable all vector combine transforms"));
51
52 static cl::opt<bool> DisableBinopExtractShuffle(
53 "disable-binop-extract-shuffle", cl::init(false), cl::Hidden,
54 cl::desc("Disable binop extract to shuffle transforms"));
55
56 static cl::opt<unsigned> MaxInstrsToScan(
57 "vector-combine-max-scan-instrs", cl::init(30), cl::Hidden,
58 cl::desc("Max number of instructions to scan for vector combining."));
59
60 static const unsigned InvalidIndex = std::numeric_limits<unsigned>::max();
61
62 namespace {
63 class VectorCombine {
64 public:
VectorCombine(Function & F,const TargetTransformInfo & TTI,const DominatorTree & DT,AAResults & AA,AssumptionCache & AC)65 VectorCombine(Function &F, const TargetTransformInfo &TTI,
66 const DominatorTree &DT, AAResults &AA, AssumptionCache &AC)
67 : F(F), Builder(F.getContext()), TTI(TTI), DT(DT), AA(AA), AC(AC) {}
68
69 bool run();
70
71 private:
72 Function &F;
73 IRBuilder<> Builder;
74 const TargetTransformInfo &TTI;
75 const DominatorTree &DT;
76 AAResults &AA;
77 AssumptionCache &AC;
78 InstructionWorklist Worklist;
79
80 bool vectorizeLoadInsert(Instruction &I);
81 ExtractElementInst *getShuffleExtract(ExtractElementInst *Ext0,
82 ExtractElementInst *Ext1,
83 unsigned PreferredExtractIndex) const;
84 bool isExtractExtractCheap(ExtractElementInst *Ext0, ExtractElementInst *Ext1,
85 const Instruction &I,
86 ExtractElementInst *&ConvertToShuffle,
87 unsigned PreferredExtractIndex);
88 void foldExtExtCmp(ExtractElementInst *Ext0, ExtractElementInst *Ext1,
89 Instruction &I);
90 void foldExtExtBinop(ExtractElementInst *Ext0, ExtractElementInst *Ext1,
91 Instruction &I);
92 bool foldExtractExtract(Instruction &I);
93 bool foldBitcastShuf(Instruction &I);
94 bool scalarizeBinopOrCmp(Instruction &I);
95 bool foldExtractedCmps(Instruction &I);
96 bool foldSingleElementStore(Instruction &I);
97 bool scalarizeLoadExtract(Instruction &I);
98
replaceValue(Value & Old,Value & New)99 void replaceValue(Value &Old, Value &New) {
100 Old.replaceAllUsesWith(&New);
101 New.takeName(&Old);
102 if (auto *NewI = dyn_cast<Instruction>(&New)) {
103 Worklist.pushUsersToWorkList(*NewI);
104 Worklist.pushValue(NewI);
105 }
106 Worklist.pushValue(&Old);
107 }
108
eraseInstruction(Instruction & I)109 void eraseInstruction(Instruction &I) {
110 for (Value *Op : I.operands())
111 Worklist.pushValue(Op);
112 Worklist.remove(&I);
113 I.eraseFromParent();
114 }
115 };
116 } // namespace
117
vectorizeLoadInsert(Instruction & I)118 bool VectorCombine::vectorizeLoadInsert(Instruction &I) {
119 // Match insert into fixed vector of scalar value.
120 // TODO: Handle non-zero insert index.
121 auto *Ty = dyn_cast<FixedVectorType>(I.getType());
122 Value *Scalar;
123 if (!Ty || !match(&I, m_InsertElt(m_Undef(), m_Value(Scalar), m_ZeroInt())) ||
124 !Scalar->hasOneUse())
125 return false;
126
127 // Optionally match an extract from another vector.
128 Value *X;
129 bool HasExtract = match(Scalar, m_ExtractElt(m_Value(X), m_ZeroInt()));
130 if (!HasExtract)
131 X = Scalar;
132
133 // Match source value as load of scalar or vector.
134 // Do not vectorize scalar load (widening) if atomic/volatile or under
135 // asan/hwasan/memtag/tsan. The widened load may load data from dirty regions
136 // or create data races non-existent in the source.
137 auto *Load = dyn_cast<LoadInst>(X);
138 if (!Load || !Load->isSimple() || !Load->hasOneUse() ||
139 Load->getFunction()->hasFnAttribute(Attribute::SanitizeMemTag) ||
140 mustSuppressSpeculation(*Load))
141 return false;
142
143 const DataLayout &DL = I.getModule()->getDataLayout();
144 Value *SrcPtr = Load->getPointerOperand()->stripPointerCasts();
145 assert(isa<PointerType>(SrcPtr->getType()) && "Expected a pointer type");
146
147 // If original AS != Load's AS, we can't bitcast the original pointer and have
148 // to use Load's operand instead. Ideally we would want to strip pointer casts
149 // without changing AS, but there's no API to do that ATM.
150 unsigned AS = Load->getPointerAddressSpace();
151 if (AS != SrcPtr->getType()->getPointerAddressSpace())
152 SrcPtr = Load->getPointerOperand();
153
154 // We are potentially transforming byte-sized (8-bit) memory accesses, so make
155 // sure we have all of our type-based constraints in place for this target.
156 Type *ScalarTy = Scalar->getType();
157 uint64_t ScalarSize = ScalarTy->getPrimitiveSizeInBits();
158 unsigned MinVectorSize = TTI.getMinVectorRegisterBitWidth();
159 if (!ScalarSize || !MinVectorSize || MinVectorSize % ScalarSize != 0 ||
160 ScalarSize % 8 != 0)
161 return false;
162
163 // Check safety of replacing the scalar load with a larger vector load.
164 // We use minimal alignment (maximum flexibility) because we only care about
165 // the dereferenceable region. When calculating cost and creating a new op,
166 // we may use a larger value based on alignment attributes.
167 unsigned MinVecNumElts = MinVectorSize / ScalarSize;
168 auto *MinVecTy = VectorType::get(ScalarTy, MinVecNumElts, false);
169 unsigned OffsetEltIndex = 0;
170 Align Alignment = Load->getAlign();
171 if (!isSafeToLoadUnconditionally(SrcPtr, MinVecTy, Align(1), DL, Load, &DT)) {
172 // It is not safe to load directly from the pointer, but we can still peek
173 // through gep offsets and check if it safe to load from a base address with
174 // updated alignment. If it is, we can shuffle the element(s) into place
175 // after loading.
176 unsigned OffsetBitWidth = DL.getIndexTypeSizeInBits(SrcPtr->getType());
177 APInt Offset(OffsetBitWidth, 0);
178 SrcPtr = SrcPtr->stripAndAccumulateInBoundsConstantOffsets(DL, Offset);
179
180 // We want to shuffle the result down from a high element of a vector, so
181 // the offset must be positive.
182 if (Offset.isNegative())
183 return false;
184
185 // The offset must be a multiple of the scalar element to shuffle cleanly
186 // in the element's size.
187 uint64_t ScalarSizeInBytes = ScalarSize / 8;
188 if (Offset.urem(ScalarSizeInBytes) != 0)
189 return false;
190
191 // If we load MinVecNumElts, will our target element still be loaded?
192 OffsetEltIndex = Offset.udiv(ScalarSizeInBytes).getZExtValue();
193 if (OffsetEltIndex >= MinVecNumElts)
194 return false;
195
196 if (!isSafeToLoadUnconditionally(SrcPtr, MinVecTy, Align(1), DL, Load, &DT))
197 return false;
198
199 // Update alignment with offset value. Note that the offset could be negated
200 // to more accurately represent "(new) SrcPtr - Offset = (old) SrcPtr", but
201 // negation does not change the result of the alignment calculation.
202 Alignment = commonAlignment(Alignment, Offset.getZExtValue());
203 }
204
205 // Original pattern: insertelt undef, load [free casts of] PtrOp, 0
206 // Use the greater of the alignment on the load or its source pointer.
207 Alignment = std::max(SrcPtr->getPointerAlignment(DL), Alignment);
208 Type *LoadTy = Load->getType();
209 InstructionCost OldCost =
210 TTI.getMemoryOpCost(Instruction::Load, LoadTy, Alignment, AS);
211 APInt DemandedElts = APInt::getOneBitSet(MinVecNumElts, 0);
212 OldCost += TTI.getScalarizationOverhead(MinVecTy, DemandedElts,
213 /* Insert */ true, HasExtract);
214
215 // New pattern: load VecPtr
216 InstructionCost NewCost =
217 TTI.getMemoryOpCost(Instruction::Load, MinVecTy, Alignment, AS);
218 // Optionally, we are shuffling the loaded vector element(s) into place.
219 // For the mask set everything but element 0 to undef to prevent poison from
220 // propagating from the extra loaded memory. This will also optionally
221 // shrink/grow the vector from the loaded size to the output size.
222 // We assume this operation has no cost in codegen if there was no offset.
223 // Note that we could use freeze to avoid poison problems, but then we might
224 // still need a shuffle to change the vector size.
225 unsigned OutputNumElts = Ty->getNumElements();
226 SmallVector<int, 16> Mask(OutputNumElts, UndefMaskElem);
227 assert(OffsetEltIndex < MinVecNumElts && "Address offset too big");
228 Mask[0] = OffsetEltIndex;
229 if (OffsetEltIndex)
230 NewCost += TTI.getShuffleCost(TTI::SK_PermuteSingleSrc, MinVecTy, Mask);
231
232 // We can aggressively convert to the vector form because the backend can
233 // invert this transform if it does not result in a performance win.
234 if (OldCost < NewCost || !NewCost.isValid())
235 return false;
236
237 // It is safe and potentially profitable to load a vector directly:
238 // inselt undef, load Scalar, 0 --> load VecPtr
239 IRBuilder<> Builder(Load);
240 Value *CastedPtr = Builder.CreateBitCast(SrcPtr, MinVecTy->getPointerTo(AS));
241 Value *VecLd = Builder.CreateAlignedLoad(MinVecTy, CastedPtr, Alignment);
242 VecLd = Builder.CreateShuffleVector(VecLd, Mask);
243
244 replaceValue(I, *VecLd);
245 ++NumVecLoad;
246 return true;
247 }
248
249 /// Determine which, if any, of the inputs should be replaced by a shuffle
250 /// followed by extract from a different index.
getShuffleExtract(ExtractElementInst * Ext0,ExtractElementInst * Ext1,unsigned PreferredExtractIndex=InvalidIndex) const251 ExtractElementInst *VectorCombine::getShuffleExtract(
252 ExtractElementInst *Ext0, ExtractElementInst *Ext1,
253 unsigned PreferredExtractIndex = InvalidIndex) const {
254 assert(isa<ConstantInt>(Ext0->getIndexOperand()) &&
255 isa<ConstantInt>(Ext1->getIndexOperand()) &&
256 "Expected constant extract indexes");
257
258 unsigned Index0 = cast<ConstantInt>(Ext0->getIndexOperand())->getZExtValue();
259 unsigned Index1 = cast<ConstantInt>(Ext1->getIndexOperand())->getZExtValue();
260
261 // If the extract indexes are identical, no shuffle is needed.
262 if (Index0 == Index1)
263 return nullptr;
264
265 Type *VecTy = Ext0->getVectorOperand()->getType();
266 assert(VecTy == Ext1->getVectorOperand()->getType() && "Need matching types");
267 InstructionCost Cost0 =
268 TTI.getVectorInstrCost(Ext0->getOpcode(), VecTy, Index0);
269 InstructionCost Cost1 =
270 TTI.getVectorInstrCost(Ext1->getOpcode(), VecTy, Index1);
271
272 // If both costs are invalid no shuffle is needed
273 if (!Cost0.isValid() && !Cost1.isValid())
274 return nullptr;
275
276 // We are extracting from 2 different indexes, so one operand must be shuffled
277 // before performing a vector operation and/or extract. The more expensive
278 // extract will be replaced by a shuffle.
279 if (Cost0 > Cost1)
280 return Ext0;
281 if (Cost1 > Cost0)
282 return Ext1;
283
284 // If the costs are equal and there is a preferred extract index, shuffle the
285 // opposite operand.
286 if (PreferredExtractIndex == Index0)
287 return Ext1;
288 if (PreferredExtractIndex == Index1)
289 return Ext0;
290
291 // Otherwise, replace the extract with the higher index.
292 return Index0 > Index1 ? Ext0 : Ext1;
293 }
294
295 /// Compare the relative costs of 2 extracts followed by scalar operation vs.
296 /// vector operation(s) followed by extract. Return true if the existing
297 /// instructions are cheaper than a vector alternative. Otherwise, return false
298 /// and if one of the extracts should be transformed to a shufflevector, set
299 /// \p ConvertToShuffle to that extract instruction.
isExtractExtractCheap(ExtractElementInst * Ext0,ExtractElementInst * Ext1,const Instruction & I,ExtractElementInst * & ConvertToShuffle,unsigned PreferredExtractIndex)300 bool VectorCombine::isExtractExtractCheap(ExtractElementInst *Ext0,
301 ExtractElementInst *Ext1,
302 const Instruction &I,
303 ExtractElementInst *&ConvertToShuffle,
304 unsigned PreferredExtractIndex) {
305 assert(isa<ConstantInt>(Ext0->getOperand(1)) &&
306 isa<ConstantInt>(Ext1->getOperand(1)) &&
307 "Expected constant extract indexes");
308 unsigned Opcode = I.getOpcode();
309 Type *ScalarTy = Ext0->getType();
310 auto *VecTy = cast<VectorType>(Ext0->getOperand(0)->getType());
311 InstructionCost ScalarOpCost, VectorOpCost;
312
313 // Get cost estimates for scalar and vector versions of the operation.
314 bool IsBinOp = Instruction::isBinaryOp(Opcode);
315 if (IsBinOp) {
316 ScalarOpCost = TTI.getArithmeticInstrCost(Opcode, ScalarTy);
317 VectorOpCost = TTI.getArithmeticInstrCost(Opcode, VecTy);
318 } else {
319 assert((Opcode == Instruction::ICmp || Opcode == Instruction::FCmp) &&
320 "Expected a compare");
321 CmpInst::Predicate Pred = cast<CmpInst>(I).getPredicate();
322 ScalarOpCost = TTI.getCmpSelInstrCost(
323 Opcode, ScalarTy, CmpInst::makeCmpResultType(ScalarTy), Pred);
324 VectorOpCost = TTI.getCmpSelInstrCost(
325 Opcode, VecTy, CmpInst::makeCmpResultType(VecTy), Pred);
326 }
327
328 // Get cost estimates for the extract elements. These costs will factor into
329 // both sequences.
330 unsigned Ext0Index = cast<ConstantInt>(Ext0->getOperand(1))->getZExtValue();
331 unsigned Ext1Index = cast<ConstantInt>(Ext1->getOperand(1))->getZExtValue();
332
333 InstructionCost Extract0Cost =
334 TTI.getVectorInstrCost(Instruction::ExtractElement, VecTy, Ext0Index);
335 InstructionCost Extract1Cost =
336 TTI.getVectorInstrCost(Instruction::ExtractElement, VecTy, Ext1Index);
337
338 // A more expensive extract will always be replaced by a splat shuffle.
339 // For example, if Ext0 is more expensive:
340 // opcode (extelt V0, Ext0), (ext V1, Ext1) -->
341 // extelt (opcode (splat V0, Ext0), V1), Ext1
342 // TODO: Evaluate whether that always results in lowest cost. Alternatively,
343 // check the cost of creating a broadcast shuffle and shuffling both
344 // operands to element 0.
345 InstructionCost CheapExtractCost = std::min(Extract0Cost, Extract1Cost);
346
347 // Extra uses of the extracts mean that we include those costs in the
348 // vector total because those instructions will not be eliminated.
349 InstructionCost OldCost, NewCost;
350 if (Ext0->getOperand(0) == Ext1->getOperand(0) && Ext0Index == Ext1Index) {
351 // Handle a special case. If the 2 extracts are identical, adjust the
352 // formulas to account for that. The extra use charge allows for either the
353 // CSE'd pattern or an unoptimized form with identical values:
354 // opcode (extelt V, C), (extelt V, C) --> extelt (opcode V, V), C
355 bool HasUseTax = Ext0 == Ext1 ? !Ext0->hasNUses(2)
356 : !Ext0->hasOneUse() || !Ext1->hasOneUse();
357 OldCost = CheapExtractCost + ScalarOpCost;
358 NewCost = VectorOpCost + CheapExtractCost + HasUseTax * CheapExtractCost;
359 } else {
360 // Handle the general case. Each extract is actually a different value:
361 // opcode (extelt V0, C0), (extelt V1, C1) --> extelt (opcode V0, V1), C
362 OldCost = Extract0Cost + Extract1Cost + ScalarOpCost;
363 NewCost = VectorOpCost + CheapExtractCost +
364 !Ext0->hasOneUse() * Extract0Cost +
365 !Ext1->hasOneUse() * Extract1Cost;
366 }
367
368 ConvertToShuffle = getShuffleExtract(Ext0, Ext1, PreferredExtractIndex);
369 if (ConvertToShuffle) {
370 if (IsBinOp && DisableBinopExtractShuffle)
371 return true;
372
373 // If we are extracting from 2 different indexes, then one operand must be
374 // shuffled before performing the vector operation. The shuffle mask is
375 // undefined except for 1 lane that is being translated to the remaining
376 // extraction lane. Therefore, it is a splat shuffle. Ex:
377 // ShufMask = { undef, undef, 0, undef }
378 // TODO: The cost model has an option for a "broadcast" shuffle
379 // (splat-from-element-0), but no option for a more general splat.
380 NewCost +=
381 TTI.getShuffleCost(TargetTransformInfo::SK_PermuteSingleSrc, VecTy);
382 }
383
384 // Aggressively form a vector op if the cost is equal because the transform
385 // may enable further optimization.
386 // Codegen can reverse this transform (scalarize) if it was not profitable.
387 return OldCost < NewCost;
388 }
389
390 /// Create a shuffle that translates (shifts) 1 element from the input vector
391 /// to a new element location.
createShiftShuffle(Value * Vec,unsigned OldIndex,unsigned NewIndex,IRBuilder<> & Builder)392 static Value *createShiftShuffle(Value *Vec, unsigned OldIndex,
393 unsigned NewIndex, IRBuilder<> &Builder) {
394 // The shuffle mask is undefined except for 1 lane that is being translated
395 // to the new element index. Example for OldIndex == 2 and NewIndex == 0:
396 // ShufMask = { 2, undef, undef, undef }
397 auto *VecTy = cast<FixedVectorType>(Vec->getType());
398 SmallVector<int, 32> ShufMask(VecTy->getNumElements(), UndefMaskElem);
399 ShufMask[NewIndex] = OldIndex;
400 return Builder.CreateShuffleVector(Vec, ShufMask, "shift");
401 }
402
403 /// Given an extract element instruction with constant index operand, shuffle
404 /// the source vector (shift the scalar element) to a NewIndex for extraction.
405 /// Return null if the input can be constant folded, so that we are not creating
406 /// unnecessary instructions.
translateExtract(ExtractElementInst * ExtElt,unsigned NewIndex,IRBuilder<> & Builder)407 static ExtractElementInst *translateExtract(ExtractElementInst *ExtElt,
408 unsigned NewIndex,
409 IRBuilder<> &Builder) {
410 // If the extract can be constant-folded, this code is unsimplified. Defer
411 // to other passes to handle that.
412 Value *X = ExtElt->getVectorOperand();
413 Value *C = ExtElt->getIndexOperand();
414 assert(isa<ConstantInt>(C) && "Expected a constant index operand");
415 if (isa<Constant>(X))
416 return nullptr;
417
418 Value *Shuf = createShiftShuffle(X, cast<ConstantInt>(C)->getZExtValue(),
419 NewIndex, Builder);
420 return cast<ExtractElementInst>(Builder.CreateExtractElement(Shuf, NewIndex));
421 }
422
423 /// Try to reduce extract element costs by converting scalar compares to vector
424 /// compares followed by extract.
425 /// cmp (ext0 V0, C), (ext1 V1, C)
foldExtExtCmp(ExtractElementInst * Ext0,ExtractElementInst * Ext1,Instruction & I)426 void VectorCombine::foldExtExtCmp(ExtractElementInst *Ext0,
427 ExtractElementInst *Ext1, Instruction &I) {
428 assert(isa<CmpInst>(&I) && "Expected a compare");
429 assert(cast<ConstantInt>(Ext0->getIndexOperand())->getZExtValue() ==
430 cast<ConstantInt>(Ext1->getIndexOperand())->getZExtValue() &&
431 "Expected matching constant extract indexes");
432
433 // cmp Pred (extelt V0, C), (extelt V1, C) --> extelt (cmp Pred V0, V1), C
434 ++NumVecCmp;
435 CmpInst::Predicate Pred = cast<CmpInst>(&I)->getPredicate();
436 Value *V0 = Ext0->getVectorOperand(), *V1 = Ext1->getVectorOperand();
437 Value *VecCmp = Builder.CreateCmp(Pred, V0, V1);
438 Value *NewExt = Builder.CreateExtractElement(VecCmp, Ext0->getIndexOperand());
439 replaceValue(I, *NewExt);
440 }
441
442 /// Try to reduce extract element costs by converting scalar binops to vector
443 /// binops followed by extract.
444 /// bo (ext0 V0, C), (ext1 V1, C)
foldExtExtBinop(ExtractElementInst * Ext0,ExtractElementInst * Ext1,Instruction & I)445 void VectorCombine::foldExtExtBinop(ExtractElementInst *Ext0,
446 ExtractElementInst *Ext1, Instruction &I) {
447 assert(isa<BinaryOperator>(&I) && "Expected a binary operator");
448 assert(cast<ConstantInt>(Ext0->getIndexOperand())->getZExtValue() ==
449 cast<ConstantInt>(Ext1->getIndexOperand())->getZExtValue() &&
450 "Expected matching constant extract indexes");
451
452 // bo (extelt V0, C), (extelt V1, C) --> extelt (bo V0, V1), C
453 ++NumVecBO;
454 Value *V0 = Ext0->getVectorOperand(), *V1 = Ext1->getVectorOperand();
455 Value *VecBO =
456 Builder.CreateBinOp(cast<BinaryOperator>(&I)->getOpcode(), V0, V1);
457
458 // All IR flags are safe to back-propagate because any potential poison
459 // created in unused vector elements is discarded by the extract.
460 if (auto *VecBOInst = dyn_cast<Instruction>(VecBO))
461 VecBOInst->copyIRFlags(&I);
462
463 Value *NewExt = Builder.CreateExtractElement(VecBO, Ext0->getIndexOperand());
464 replaceValue(I, *NewExt);
465 }
466
467 /// Match an instruction with extracted vector operands.
foldExtractExtract(Instruction & I)468 bool VectorCombine::foldExtractExtract(Instruction &I) {
469 // It is not safe to transform things like div, urem, etc. because we may
470 // create undefined behavior when executing those on unknown vector elements.
471 if (!isSafeToSpeculativelyExecute(&I))
472 return false;
473
474 Instruction *I0, *I1;
475 CmpInst::Predicate Pred = CmpInst::BAD_ICMP_PREDICATE;
476 if (!match(&I, m_Cmp(Pred, m_Instruction(I0), m_Instruction(I1))) &&
477 !match(&I, m_BinOp(m_Instruction(I0), m_Instruction(I1))))
478 return false;
479
480 Value *V0, *V1;
481 uint64_t C0, C1;
482 if (!match(I0, m_ExtractElt(m_Value(V0), m_ConstantInt(C0))) ||
483 !match(I1, m_ExtractElt(m_Value(V1), m_ConstantInt(C1))) ||
484 V0->getType() != V1->getType())
485 return false;
486
487 // If the scalar value 'I' is going to be re-inserted into a vector, then try
488 // to create an extract to that same element. The extract/insert can be
489 // reduced to a "select shuffle".
490 // TODO: If we add a larger pattern match that starts from an insert, this
491 // probably becomes unnecessary.
492 auto *Ext0 = cast<ExtractElementInst>(I0);
493 auto *Ext1 = cast<ExtractElementInst>(I1);
494 uint64_t InsertIndex = InvalidIndex;
495 if (I.hasOneUse())
496 match(I.user_back(),
497 m_InsertElt(m_Value(), m_Value(), m_ConstantInt(InsertIndex)));
498
499 ExtractElementInst *ExtractToChange;
500 if (isExtractExtractCheap(Ext0, Ext1, I, ExtractToChange, InsertIndex))
501 return false;
502
503 if (ExtractToChange) {
504 unsigned CheapExtractIdx = ExtractToChange == Ext0 ? C1 : C0;
505 ExtractElementInst *NewExtract =
506 translateExtract(ExtractToChange, CheapExtractIdx, Builder);
507 if (!NewExtract)
508 return false;
509 if (ExtractToChange == Ext0)
510 Ext0 = NewExtract;
511 else
512 Ext1 = NewExtract;
513 }
514
515 if (Pred != CmpInst::BAD_ICMP_PREDICATE)
516 foldExtExtCmp(Ext0, Ext1, I);
517 else
518 foldExtExtBinop(Ext0, Ext1, I);
519
520 Worklist.push(Ext0);
521 Worklist.push(Ext1);
522 return true;
523 }
524
525 /// If this is a bitcast of a shuffle, try to bitcast the source vector to the
526 /// destination type followed by shuffle. This can enable further transforms by
527 /// moving bitcasts or shuffles together.
foldBitcastShuf(Instruction & I)528 bool VectorCombine::foldBitcastShuf(Instruction &I) {
529 Value *V;
530 ArrayRef<int> Mask;
531 if (!match(&I, m_BitCast(
532 m_OneUse(m_Shuffle(m_Value(V), m_Undef(), m_Mask(Mask))))))
533 return false;
534
535 // 1) Do not fold bitcast shuffle for scalable type. First, shuffle cost for
536 // scalable type is unknown; Second, we cannot reason if the narrowed shuffle
537 // mask for scalable type is a splat or not.
538 // 2) Disallow non-vector casts and length-changing shuffles.
539 // TODO: We could allow any shuffle.
540 auto *DestTy = dyn_cast<FixedVectorType>(I.getType());
541 auto *SrcTy = dyn_cast<FixedVectorType>(V->getType());
542 if (!SrcTy || !DestTy || I.getOperand(0)->getType() != SrcTy)
543 return false;
544
545 unsigned DestNumElts = DestTy->getNumElements();
546 unsigned SrcNumElts = SrcTy->getNumElements();
547 SmallVector<int, 16> NewMask;
548 if (SrcNumElts <= DestNumElts) {
549 // The bitcast is from wide to narrow/equal elements. The shuffle mask can
550 // always be expanded to the equivalent form choosing narrower elements.
551 assert(DestNumElts % SrcNumElts == 0 && "Unexpected shuffle mask");
552 unsigned ScaleFactor = DestNumElts / SrcNumElts;
553 narrowShuffleMaskElts(ScaleFactor, Mask, NewMask);
554 } else {
555 // The bitcast is from narrow elements to wide elements. The shuffle mask
556 // must choose consecutive elements to allow casting first.
557 assert(SrcNumElts % DestNumElts == 0 && "Unexpected shuffle mask");
558 unsigned ScaleFactor = SrcNumElts / DestNumElts;
559 if (!widenShuffleMaskElts(ScaleFactor, Mask, NewMask))
560 return false;
561 }
562
563 // The new shuffle must not cost more than the old shuffle. The bitcast is
564 // moved ahead of the shuffle, so assume that it has the same cost as before.
565 InstructionCost DestCost = TTI.getShuffleCost(
566 TargetTransformInfo::SK_PermuteSingleSrc, DestTy, NewMask);
567 InstructionCost SrcCost =
568 TTI.getShuffleCost(TargetTransformInfo::SK_PermuteSingleSrc, SrcTy, Mask);
569 if (DestCost > SrcCost || !DestCost.isValid())
570 return false;
571
572 // bitcast (shuf V, MaskC) --> shuf (bitcast V), MaskC'
573 ++NumShufOfBitcast;
574 Value *CastV = Builder.CreateBitCast(V, DestTy);
575 Value *Shuf = Builder.CreateShuffleVector(CastV, NewMask);
576 replaceValue(I, *Shuf);
577 return true;
578 }
579
580 /// Match a vector binop or compare instruction with at least one inserted
581 /// scalar operand and convert to scalar binop/cmp followed by insertelement.
scalarizeBinopOrCmp(Instruction & I)582 bool VectorCombine::scalarizeBinopOrCmp(Instruction &I) {
583 CmpInst::Predicate Pred = CmpInst::BAD_ICMP_PREDICATE;
584 Value *Ins0, *Ins1;
585 if (!match(&I, m_BinOp(m_Value(Ins0), m_Value(Ins1))) &&
586 !match(&I, m_Cmp(Pred, m_Value(Ins0), m_Value(Ins1))))
587 return false;
588
589 // Do not convert the vector condition of a vector select into a scalar
590 // condition. That may cause problems for codegen because of differences in
591 // boolean formats and register-file transfers.
592 // TODO: Can we account for that in the cost model?
593 bool IsCmp = Pred != CmpInst::Predicate::BAD_ICMP_PREDICATE;
594 if (IsCmp)
595 for (User *U : I.users())
596 if (match(U, m_Select(m_Specific(&I), m_Value(), m_Value())))
597 return false;
598
599 // Match against one or both scalar values being inserted into constant
600 // vectors:
601 // vec_op VecC0, (inselt VecC1, V1, Index)
602 // vec_op (inselt VecC0, V0, Index), VecC1
603 // vec_op (inselt VecC0, V0, Index), (inselt VecC1, V1, Index)
604 // TODO: Deal with mismatched index constants and variable indexes?
605 Constant *VecC0 = nullptr, *VecC1 = nullptr;
606 Value *V0 = nullptr, *V1 = nullptr;
607 uint64_t Index0 = 0, Index1 = 0;
608 if (!match(Ins0, m_InsertElt(m_Constant(VecC0), m_Value(V0),
609 m_ConstantInt(Index0))) &&
610 !match(Ins0, m_Constant(VecC0)))
611 return false;
612 if (!match(Ins1, m_InsertElt(m_Constant(VecC1), m_Value(V1),
613 m_ConstantInt(Index1))) &&
614 !match(Ins1, m_Constant(VecC1)))
615 return false;
616
617 bool IsConst0 = !V0;
618 bool IsConst1 = !V1;
619 if (IsConst0 && IsConst1)
620 return false;
621 if (!IsConst0 && !IsConst1 && Index0 != Index1)
622 return false;
623
624 // Bail for single insertion if it is a load.
625 // TODO: Handle this once getVectorInstrCost can cost for load/stores.
626 auto *I0 = dyn_cast_or_null<Instruction>(V0);
627 auto *I1 = dyn_cast_or_null<Instruction>(V1);
628 if ((IsConst0 && I1 && I1->mayReadFromMemory()) ||
629 (IsConst1 && I0 && I0->mayReadFromMemory()))
630 return false;
631
632 uint64_t Index = IsConst0 ? Index1 : Index0;
633 Type *ScalarTy = IsConst0 ? V1->getType() : V0->getType();
634 Type *VecTy = I.getType();
635 assert(VecTy->isVectorTy() &&
636 (IsConst0 || IsConst1 || V0->getType() == V1->getType()) &&
637 (ScalarTy->isIntegerTy() || ScalarTy->isFloatingPointTy() ||
638 ScalarTy->isPointerTy()) &&
639 "Unexpected types for insert element into binop or cmp");
640
641 unsigned Opcode = I.getOpcode();
642 InstructionCost ScalarOpCost, VectorOpCost;
643 if (IsCmp) {
644 CmpInst::Predicate Pred = cast<CmpInst>(I).getPredicate();
645 ScalarOpCost = TTI.getCmpSelInstrCost(
646 Opcode, ScalarTy, CmpInst::makeCmpResultType(ScalarTy), Pred);
647 VectorOpCost = TTI.getCmpSelInstrCost(
648 Opcode, VecTy, CmpInst::makeCmpResultType(VecTy), Pred);
649 } else {
650 ScalarOpCost = TTI.getArithmeticInstrCost(Opcode, ScalarTy);
651 VectorOpCost = TTI.getArithmeticInstrCost(Opcode, VecTy);
652 }
653
654 // Get cost estimate for the insert element. This cost will factor into
655 // both sequences.
656 InstructionCost InsertCost =
657 TTI.getVectorInstrCost(Instruction::InsertElement, VecTy, Index);
658 InstructionCost OldCost =
659 (IsConst0 ? 0 : InsertCost) + (IsConst1 ? 0 : InsertCost) + VectorOpCost;
660 InstructionCost NewCost = ScalarOpCost + InsertCost +
661 (IsConst0 ? 0 : !Ins0->hasOneUse() * InsertCost) +
662 (IsConst1 ? 0 : !Ins1->hasOneUse() * InsertCost);
663
664 // We want to scalarize unless the vector variant actually has lower cost.
665 if (OldCost < NewCost || !NewCost.isValid())
666 return false;
667
668 // vec_op (inselt VecC0, V0, Index), (inselt VecC1, V1, Index) -->
669 // inselt NewVecC, (scalar_op V0, V1), Index
670 if (IsCmp)
671 ++NumScalarCmp;
672 else
673 ++NumScalarBO;
674
675 // For constant cases, extract the scalar element, this should constant fold.
676 if (IsConst0)
677 V0 = ConstantExpr::getExtractElement(VecC0, Builder.getInt64(Index));
678 if (IsConst1)
679 V1 = ConstantExpr::getExtractElement(VecC1, Builder.getInt64(Index));
680
681 Value *Scalar =
682 IsCmp ? Builder.CreateCmp(Pred, V0, V1)
683 : Builder.CreateBinOp((Instruction::BinaryOps)Opcode, V0, V1);
684
685 Scalar->setName(I.getName() + ".scalar");
686
687 // All IR flags are safe to back-propagate. There is no potential for extra
688 // poison to be created by the scalar instruction.
689 if (auto *ScalarInst = dyn_cast<Instruction>(Scalar))
690 ScalarInst->copyIRFlags(&I);
691
692 // Fold the vector constants in the original vectors into a new base vector.
693 Constant *NewVecC = IsCmp ? ConstantExpr::getCompare(Pred, VecC0, VecC1)
694 : ConstantExpr::get(Opcode, VecC0, VecC1);
695 Value *Insert = Builder.CreateInsertElement(NewVecC, Scalar, Index);
696 replaceValue(I, *Insert);
697 return true;
698 }
699
700 /// Try to combine a scalar binop + 2 scalar compares of extracted elements of
701 /// a vector into vector operations followed by extract. Note: The SLP pass
702 /// may miss this pattern because of implementation problems.
foldExtractedCmps(Instruction & I)703 bool VectorCombine::foldExtractedCmps(Instruction &I) {
704 // We are looking for a scalar binop of booleans.
705 // binop i1 (cmp Pred I0, C0), (cmp Pred I1, C1)
706 if (!I.isBinaryOp() || !I.getType()->isIntegerTy(1))
707 return false;
708
709 // The compare predicates should match, and each compare should have a
710 // constant operand.
711 // TODO: Relax the one-use constraints.
712 Value *B0 = I.getOperand(0), *B1 = I.getOperand(1);
713 Instruction *I0, *I1;
714 Constant *C0, *C1;
715 CmpInst::Predicate P0, P1;
716 if (!match(B0, m_OneUse(m_Cmp(P0, m_Instruction(I0), m_Constant(C0)))) ||
717 !match(B1, m_OneUse(m_Cmp(P1, m_Instruction(I1), m_Constant(C1)))) ||
718 P0 != P1)
719 return false;
720
721 // The compare operands must be extracts of the same vector with constant
722 // extract indexes.
723 // TODO: Relax the one-use constraints.
724 Value *X;
725 uint64_t Index0, Index1;
726 if (!match(I0, m_OneUse(m_ExtractElt(m_Value(X), m_ConstantInt(Index0)))) ||
727 !match(I1, m_OneUse(m_ExtractElt(m_Specific(X), m_ConstantInt(Index1)))))
728 return false;
729
730 auto *Ext0 = cast<ExtractElementInst>(I0);
731 auto *Ext1 = cast<ExtractElementInst>(I1);
732 ExtractElementInst *ConvertToShuf = getShuffleExtract(Ext0, Ext1);
733 if (!ConvertToShuf)
734 return false;
735
736 // The original scalar pattern is:
737 // binop i1 (cmp Pred (ext X, Index0), C0), (cmp Pred (ext X, Index1), C1)
738 CmpInst::Predicate Pred = P0;
739 unsigned CmpOpcode = CmpInst::isFPPredicate(Pred) ? Instruction::FCmp
740 : Instruction::ICmp;
741 auto *VecTy = dyn_cast<FixedVectorType>(X->getType());
742 if (!VecTy)
743 return false;
744
745 InstructionCost OldCost =
746 TTI.getVectorInstrCost(Ext0->getOpcode(), VecTy, Index0);
747 OldCost += TTI.getVectorInstrCost(Ext1->getOpcode(), VecTy, Index1);
748 OldCost +=
749 TTI.getCmpSelInstrCost(CmpOpcode, I0->getType(),
750 CmpInst::makeCmpResultType(I0->getType()), Pred) *
751 2;
752 OldCost += TTI.getArithmeticInstrCost(I.getOpcode(), I.getType());
753
754 // The proposed vector pattern is:
755 // vcmp = cmp Pred X, VecC
756 // ext (binop vNi1 vcmp, (shuffle vcmp, Index1)), Index0
757 int CheapIndex = ConvertToShuf == Ext0 ? Index1 : Index0;
758 int ExpensiveIndex = ConvertToShuf == Ext0 ? Index0 : Index1;
759 auto *CmpTy = cast<FixedVectorType>(CmpInst::makeCmpResultType(X->getType()));
760 InstructionCost NewCost = TTI.getCmpSelInstrCost(
761 CmpOpcode, X->getType(), CmpInst::makeCmpResultType(X->getType()), Pred);
762 SmallVector<int, 32> ShufMask(VecTy->getNumElements(), UndefMaskElem);
763 ShufMask[CheapIndex] = ExpensiveIndex;
764 NewCost += TTI.getShuffleCost(TargetTransformInfo::SK_PermuteSingleSrc, CmpTy,
765 ShufMask);
766 NewCost += TTI.getArithmeticInstrCost(I.getOpcode(), CmpTy);
767 NewCost += TTI.getVectorInstrCost(Ext0->getOpcode(), CmpTy, CheapIndex);
768
769 // Aggressively form vector ops if the cost is equal because the transform
770 // may enable further optimization.
771 // Codegen can reverse this transform (scalarize) if it was not profitable.
772 if (OldCost < NewCost || !NewCost.isValid())
773 return false;
774
775 // Create a vector constant from the 2 scalar constants.
776 SmallVector<Constant *, 32> CmpC(VecTy->getNumElements(),
777 UndefValue::get(VecTy->getElementType()));
778 CmpC[Index0] = C0;
779 CmpC[Index1] = C1;
780 Value *VCmp = Builder.CreateCmp(Pred, X, ConstantVector::get(CmpC));
781
782 Value *Shuf = createShiftShuffle(VCmp, ExpensiveIndex, CheapIndex, Builder);
783 Value *VecLogic = Builder.CreateBinOp(cast<BinaryOperator>(I).getOpcode(),
784 VCmp, Shuf);
785 Value *NewExt = Builder.CreateExtractElement(VecLogic, CheapIndex);
786 replaceValue(I, *NewExt);
787 ++NumVecCmpBO;
788 return true;
789 }
790
791 // Check if memory loc modified between two instrs in the same BB
isMemModifiedBetween(BasicBlock::iterator Begin,BasicBlock::iterator End,const MemoryLocation & Loc,AAResults & AA)792 static bool isMemModifiedBetween(BasicBlock::iterator Begin,
793 BasicBlock::iterator End,
794 const MemoryLocation &Loc, AAResults &AA) {
795 unsigned NumScanned = 0;
796 return std::any_of(Begin, End, [&](const Instruction &Instr) {
797 return isModSet(AA.getModRefInfo(&Instr, Loc)) ||
798 ++NumScanned > MaxInstrsToScan;
799 });
800 }
801
802 /// Helper class to indicate whether a vector index can be safely scalarized and
803 /// if a freeze needs to be inserted.
804 class ScalarizationResult {
805 enum class StatusTy { Unsafe, Safe, SafeWithFreeze };
806
807 StatusTy Status;
808 Value *ToFreeze;
809
ScalarizationResult(StatusTy Status,Value * ToFreeze=nullptr)810 ScalarizationResult(StatusTy Status, Value *ToFreeze = nullptr)
811 : Status(Status), ToFreeze(ToFreeze) {}
812
813 public:
814 ScalarizationResult(const ScalarizationResult &Other) = default;
~ScalarizationResult()815 ~ScalarizationResult() {
816 assert(!ToFreeze && "freeze() not called with ToFreeze being set");
817 }
818
unsafe()819 static ScalarizationResult unsafe() { return {StatusTy::Unsafe}; }
safe()820 static ScalarizationResult safe() { return {StatusTy::Safe}; }
safeWithFreeze(Value * ToFreeze)821 static ScalarizationResult safeWithFreeze(Value *ToFreeze) {
822 return {StatusTy::SafeWithFreeze, ToFreeze};
823 }
824
825 /// Returns true if the index can be scalarize without requiring a freeze.
isSafe() const826 bool isSafe() const { return Status == StatusTy::Safe; }
827 /// Returns true if the index cannot be scalarized.
isUnsafe() const828 bool isUnsafe() const { return Status == StatusTy::Unsafe; }
829 /// Returns true if the index can be scalarize, but requires inserting a
830 /// freeze.
isSafeWithFreeze() const831 bool isSafeWithFreeze() const { return Status == StatusTy::SafeWithFreeze; }
832
833 /// Reset the state of Unsafe and clear ToFreze if set.
discard()834 void discard() {
835 ToFreeze = nullptr;
836 Status = StatusTy::Unsafe;
837 }
838
839 /// Freeze the ToFreeze and update the use in \p User to use it.
freeze(IRBuilder<> & Builder,Instruction & UserI)840 void freeze(IRBuilder<> &Builder, Instruction &UserI) {
841 assert(isSafeWithFreeze() &&
842 "should only be used when freezing is required");
843 assert(is_contained(ToFreeze->users(), &UserI) &&
844 "UserI must be a user of ToFreeze");
845 IRBuilder<>::InsertPointGuard Guard(Builder);
846 Builder.SetInsertPoint(cast<Instruction>(&UserI));
847 Value *Frozen =
848 Builder.CreateFreeze(ToFreeze, ToFreeze->getName() + ".frozen");
849 for (Use &U : make_early_inc_range((UserI.operands())))
850 if (U.get() == ToFreeze)
851 U.set(Frozen);
852
853 ToFreeze = nullptr;
854 }
855 };
856
857 /// Check if it is legal to scalarize a memory access to \p VecTy at index \p
858 /// Idx. \p Idx must access a valid vector element.
canScalarizeAccess(FixedVectorType * VecTy,Value * Idx,Instruction * CtxI,AssumptionCache & AC,const DominatorTree & DT)859 static ScalarizationResult canScalarizeAccess(FixedVectorType *VecTy,
860 Value *Idx, Instruction *CtxI,
861 AssumptionCache &AC,
862 const DominatorTree &DT) {
863 if (auto *C = dyn_cast<ConstantInt>(Idx)) {
864 if (C->getValue().ult(VecTy->getNumElements()))
865 return ScalarizationResult::safe();
866 return ScalarizationResult::unsafe();
867 }
868
869 unsigned IntWidth = Idx->getType()->getScalarSizeInBits();
870 APInt Zero(IntWidth, 0);
871 APInt MaxElts(IntWidth, VecTy->getNumElements());
872 ConstantRange ValidIndices(Zero, MaxElts);
873 ConstantRange IdxRange(IntWidth, true);
874
875 if (isGuaranteedNotToBePoison(Idx, &AC)) {
876 if (ValidIndices.contains(computeConstantRange(Idx, true, &AC, CtxI, &DT)))
877 return ScalarizationResult::safe();
878 return ScalarizationResult::unsafe();
879 }
880
881 // If the index may be poison, check if we can insert a freeze before the
882 // range of the index is restricted.
883 Value *IdxBase;
884 ConstantInt *CI;
885 if (match(Idx, m_And(m_Value(IdxBase), m_ConstantInt(CI)))) {
886 IdxRange = IdxRange.binaryAnd(CI->getValue());
887 } else if (match(Idx, m_URem(m_Value(IdxBase), m_ConstantInt(CI)))) {
888 IdxRange = IdxRange.urem(CI->getValue());
889 }
890
891 if (ValidIndices.contains(IdxRange))
892 return ScalarizationResult::safeWithFreeze(IdxBase);
893 return ScalarizationResult::unsafe();
894 }
895
896 /// The memory operation on a vector of \p ScalarType had alignment of
897 /// \p VectorAlignment. Compute the maximal, but conservatively correct,
898 /// alignment that will be valid for the memory operation on a single scalar
899 /// element of the same type with index \p Idx.
computeAlignmentAfterScalarization(Align VectorAlignment,Type * ScalarType,Value * Idx,const DataLayout & DL)900 static Align computeAlignmentAfterScalarization(Align VectorAlignment,
901 Type *ScalarType, Value *Idx,
902 const DataLayout &DL) {
903 if (auto *C = dyn_cast<ConstantInt>(Idx))
904 return commonAlignment(VectorAlignment,
905 C->getZExtValue() * DL.getTypeStoreSize(ScalarType));
906 return commonAlignment(VectorAlignment, DL.getTypeStoreSize(ScalarType));
907 }
908
909 // Combine patterns like:
910 // %0 = load <4 x i32>, <4 x i32>* %a
911 // %1 = insertelement <4 x i32> %0, i32 %b, i32 1
912 // store <4 x i32> %1, <4 x i32>* %a
913 // to:
914 // %0 = bitcast <4 x i32>* %a to i32*
915 // %1 = getelementptr inbounds i32, i32* %0, i64 0, i64 1
916 // store i32 %b, i32* %1
foldSingleElementStore(Instruction & I)917 bool VectorCombine::foldSingleElementStore(Instruction &I) {
918 StoreInst *SI = dyn_cast<StoreInst>(&I);
919 if (!SI || !SI->isSimple() ||
920 !isa<FixedVectorType>(SI->getValueOperand()->getType()))
921 return false;
922
923 // TODO: Combine more complicated patterns (multiple insert) by referencing
924 // TargetTransformInfo.
925 Instruction *Source;
926 Value *NewElement;
927 Value *Idx;
928 if (!match(SI->getValueOperand(),
929 m_InsertElt(m_Instruction(Source), m_Value(NewElement),
930 m_Value(Idx))))
931 return false;
932
933 if (auto *Load = dyn_cast<LoadInst>(Source)) {
934 auto VecTy = cast<FixedVectorType>(SI->getValueOperand()->getType());
935 const DataLayout &DL = I.getModule()->getDataLayout();
936 Value *SrcAddr = Load->getPointerOperand()->stripPointerCasts();
937 // Don't optimize for atomic/volatile load or store. Ensure memory is not
938 // modified between, vector type matches store size, and index is inbounds.
939 if (!Load->isSimple() || Load->getParent() != SI->getParent() ||
940 !DL.typeSizeEqualsStoreSize(Load->getType()) ||
941 SrcAddr != SI->getPointerOperand()->stripPointerCasts())
942 return false;
943
944 auto ScalarizableIdx = canScalarizeAccess(VecTy, Idx, Load, AC, DT);
945 if (ScalarizableIdx.isUnsafe() ||
946 isMemModifiedBetween(Load->getIterator(), SI->getIterator(),
947 MemoryLocation::get(SI), AA))
948 return false;
949
950 if (ScalarizableIdx.isSafeWithFreeze())
951 ScalarizableIdx.freeze(Builder, *cast<Instruction>(Idx));
952 Value *GEP = Builder.CreateInBoundsGEP(
953 SI->getValueOperand()->getType(), SI->getPointerOperand(),
954 {ConstantInt::get(Idx->getType(), 0), Idx});
955 StoreInst *NSI = Builder.CreateStore(NewElement, GEP);
956 NSI->copyMetadata(*SI);
957 Align ScalarOpAlignment = computeAlignmentAfterScalarization(
958 std::max(SI->getAlign(), Load->getAlign()), NewElement->getType(), Idx,
959 DL);
960 NSI->setAlignment(ScalarOpAlignment);
961 replaceValue(I, *NSI);
962 eraseInstruction(I);
963 return true;
964 }
965
966 return false;
967 }
968
969 /// Try to scalarize vector loads feeding extractelement instructions.
scalarizeLoadExtract(Instruction & I)970 bool VectorCombine::scalarizeLoadExtract(Instruction &I) {
971 Value *Ptr;
972 if (!match(&I, m_Load(m_Value(Ptr))))
973 return false;
974
975 auto *LI = cast<LoadInst>(&I);
976 const DataLayout &DL = I.getModule()->getDataLayout();
977 if (LI->isVolatile() || !DL.typeSizeEqualsStoreSize(LI->getType()))
978 return false;
979
980 auto *FixedVT = dyn_cast<FixedVectorType>(LI->getType());
981 if (!FixedVT)
982 return false;
983
984 InstructionCost OriginalCost = TTI.getMemoryOpCost(
985 Instruction::Load, LI->getType(), Align(LI->getAlignment()),
986 LI->getPointerAddressSpace());
987 InstructionCost ScalarizedCost = 0;
988
989 Instruction *LastCheckedInst = LI;
990 unsigned NumInstChecked = 0;
991 // Check if all users of the load are extracts with no memory modifications
992 // between the load and the extract. Compute the cost of both the original
993 // code and the scalarized version.
994 for (User *U : LI->users()) {
995 auto *UI = dyn_cast<ExtractElementInst>(U);
996 if (!UI || UI->getParent() != LI->getParent())
997 return false;
998
999 if (!isGuaranteedNotToBePoison(UI->getOperand(1), &AC, LI, &DT))
1000 return false;
1001
1002 // Check if any instruction between the load and the extract may modify
1003 // memory.
1004 if (LastCheckedInst->comesBefore(UI)) {
1005 for (Instruction &I :
1006 make_range(std::next(LI->getIterator()), UI->getIterator())) {
1007 // Bail out if we reached the check limit or the instruction may write
1008 // to memory.
1009 if (NumInstChecked == MaxInstrsToScan || I.mayWriteToMemory())
1010 return false;
1011 NumInstChecked++;
1012 }
1013 }
1014
1015 if (!LastCheckedInst)
1016 LastCheckedInst = UI;
1017 else if (LastCheckedInst->comesBefore(UI))
1018 LastCheckedInst = UI;
1019
1020 auto ScalarIdx = canScalarizeAccess(FixedVT, UI->getOperand(1), &I, AC, DT);
1021 if (!ScalarIdx.isSafe()) {
1022 // TODO: Freeze index if it is safe to do so.
1023 ScalarIdx.discard();
1024 return false;
1025 }
1026
1027 auto *Index = dyn_cast<ConstantInt>(UI->getOperand(1));
1028 OriginalCost +=
1029 TTI.getVectorInstrCost(Instruction::ExtractElement, LI->getType(),
1030 Index ? Index->getZExtValue() : -1);
1031 ScalarizedCost +=
1032 TTI.getMemoryOpCost(Instruction::Load, FixedVT->getElementType(),
1033 Align(1), LI->getPointerAddressSpace());
1034 ScalarizedCost += TTI.getAddressComputationCost(FixedVT->getElementType());
1035 }
1036
1037 if (ScalarizedCost >= OriginalCost)
1038 return false;
1039
1040 // Replace extracts with narrow scalar loads.
1041 for (User *U : LI->users()) {
1042 auto *EI = cast<ExtractElementInst>(U);
1043 Builder.SetInsertPoint(EI);
1044
1045 Value *Idx = EI->getOperand(1);
1046 Value *GEP =
1047 Builder.CreateInBoundsGEP(FixedVT, Ptr, {Builder.getInt32(0), Idx});
1048 auto *NewLoad = cast<LoadInst>(Builder.CreateLoad(
1049 FixedVT->getElementType(), GEP, EI->getName() + ".scalar"));
1050
1051 Align ScalarOpAlignment = computeAlignmentAfterScalarization(
1052 LI->getAlign(), FixedVT->getElementType(), Idx, DL);
1053 NewLoad->setAlignment(ScalarOpAlignment);
1054
1055 replaceValue(*EI, *NewLoad);
1056 }
1057
1058 return true;
1059 }
1060
1061 /// This is the entry point for all transforms. Pass manager differences are
1062 /// handled in the callers of this function.
run()1063 bool VectorCombine::run() {
1064 if (DisableVectorCombine)
1065 return false;
1066
1067 // Don't attempt vectorization if the target does not support vectors.
1068 if (!TTI.getNumberOfRegisters(TTI.getRegisterClassForType(/*Vector*/ true)))
1069 return false;
1070
1071 bool MadeChange = false;
1072 auto FoldInst = [this, &MadeChange](Instruction &I) {
1073 Builder.SetInsertPoint(&I);
1074 MadeChange |= vectorizeLoadInsert(I);
1075 MadeChange |= foldExtractExtract(I);
1076 MadeChange |= foldBitcastShuf(I);
1077 MadeChange |= scalarizeBinopOrCmp(I);
1078 MadeChange |= foldExtractedCmps(I);
1079 MadeChange |= scalarizeLoadExtract(I);
1080 MadeChange |= foldSingleElementStore(I);
1081 };
1082 for (BasicBlock &BB : F) {
1083 // Ignore unreachable basic blocks.
1084 if (!DT.isReachableFromEntry(&BB))
1085 continue;
1086 // Use early increment range so that we can erase instructions in loop.
1087 for (Instruction &I : make_early_inc_range(BB)) {
1088 if (isa<DbgInfoIntrinsic>(I))
1089 continue;
1090 FoldInst(I);
1091 }
1092 }
1093
1094 while (!Worklist.isEmpty()) {
1095 Instruction *I = Worklist.removeOne();
1096 if (!I)
1097 continue;
1098
1099 if (isInstructionTriviallyDead(I)) {
1100 eraseInstruction(*I);
1101 continue;
1102 }
1103
1104 FoldInst(*I);
1105 }
1106
1107 return MadeChange;
1108 }
1109
1110 // Pass manager boilerplate below here.
1111
1112 namespace {
1113 class VectorCombineLegacyPass : public FunctionPass {
1114 public:
1115 static char ID;
VectorCombineLegacyPass()1116 VectorCombineLegacyPass() : FunctionPass(ID) {
1117 initializeVectorCombineLegacyPassPass(*PassRegistry::getPassRegistry());
1118 }
1119
getAnalysisUsage(AnalysisUsage & AU) const1120 void getAnalysisUsage(AnalysisUsage &AU) const override {
1121 AU.addRequired<AssumptionCacheTracker>();
1122 AU.addRequired<DominatorTreeWrapperPass>();
1123 AU.addRequired<TargetTransformInfoWrapperPass>();
1124 AU.addRequired<AAResultsWrapperPass>();
1125 AU.setPreservesCFG();
1126 AU.addPreserved<DominatorTreeWrapperPass>();
1127 AU.addPreserved<GlobalsAAWrapperPass>();
1128 AU.addPreserved<AAResultsWrapperPass>();
1129 AU.addPreserved<BasicAAWrapperPass>();
1130 FunctionPass::getAnalysisUsage(AU);
1131 }
1132
runOnFunction(Function & F)1133 bool runOnFunction(Function &F) override {
1134 if (skipFunction(F))
1135 return false;
1136 auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
1137 auto &TTI = getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
1138 auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
1139 auto &AA = getAnalysis<AAResultsWrapperPass>().getAAResults();
1140 VectorCombine Combiner(F, TTI, DT, AA, AC);
1141 return Combiner.run();
1142 }
1143 };
1144 } // namespace
1145
1146 char VectorCombineLegacyPass::ID = 0;
1147 INITIALIZE_PASS_BEGIN(VectorCombineLegacyPass, "vector-combine",
1148 "Optimize scalar/vector ops", false,
1149 false)
INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)1150 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
1151 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
1152 INITIALIZE_PASS_END(VectorCombineLegacyPass, "vector-combine",
1153 "Optimize scalar/vector ops", false, false)
1154 Pass *llvm::createVectorCombinePass() {
1155 return new VectorCombineLegacyPass();
1156 }
1157
run(Function & F,FunctionAnalysisManager & FAM)1158 PreservedAnalyses VectorCombinePass::run(Function &F,
1159 FunctionAnalysisManager &FAM) {
1160 auto &AC = FAM.getResult<AssumptionAnalysis>(F);
1161 TargetTransformInfo &TTI = FAM.getResult<TargetIRAnalysis>(F);
1162 DominatorTree &DT = FAM.getResult<DominatorTreeAnalysis>(F);
1163 AAResults &AA = FAM.getResult<AAManager>(F);
1164 VectorCombine Combiner(F, TTI, DT, AA, AC);
1165 if (!Combiner.run())
1166 return PreservedAnalyses::all();
1167 PreservedAnalyses PA;
1168 PA.preserveSet<CFGAnalyses>();
1169 return PA;
1170 }
1171