1 //===- SLPVectorizer.cpp - A bottom up SLP Vectorizer ---------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This pass implements the Bottom Up SLP vectorizer. It detects consecutive
10 // stores that can be put together into vector-stores. Next, it attempts to
11 // construct vectorizable tree using the use-def chains. If a profitable tree
12 // was found, the SLP vectorizer performs vectorization on the tree.
13 //
14 // The pass is inspired by the work described in the paper:
15 // "Loop-Aware SLP in GCC" by Ira Rosen, Dorit Nuzman, Ayal Zaks.
16 //
17 //===----------------------------------------------------------------------===//
18
19 #include "llvm/Transforms/Vectorize/SLPVectorizer.h"
20 #include "llvm/ADT/DenseMap.h"
21 #include "llvm/ADT/DenseSet.h"
22 #include "llvm/ADT/Optional.h"
23 #include "llvm/ADT/PostOrderIterator.h"
24 #include "llvm/ADT/STLExtras.h"
25 #include "llvm/ADT/SetOperations.h"
26 #include "llvm/ADT/SetVector.h"
27 #include "llvm/ADT/SmallBitVector.h"
28 #include "llvm/ADT/SmallPtrSet.h"
29 #include "llvm/ADT/SmallSet.h"
30 #include "llvm/ADT/SmallString.h"
31 #include "llvm/ADT/Statistic.h"
32 #include "llvm/ADT/iterator.h"
33 #include "llvm/ADT/iterator_range.h"
34 #include "llvm/Analysis/AliasAnalysis.h"
35 #include "llvm/Analysis/AssumptionCache.h"
36 #include "llvm/Analysis/CodeMetrics.h"
37 #include "llvm/Analysis/DemandedBits.h"
38 #include "llvm/Analysis/GlobalsModRef.h"
39 #include "llvm/Analysis/IVDescriptors.h"
40 #include "llvm/Analysis/LoopAccessAnalysis.h"
41 #include "llvm/Analysis/LoopInfo.h"
42 #include "llvm/Analysis/MemoryLocation.h"
43 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
44 #include "llvm/Analysis/ScalarEvolution.h"
45 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
46 #include "llvm/Analysis/TargetLibraryInfo.h"
47 #include "llvm/Analysis/TargetTransformInfo.h"
48 #include "llvm/Analysis/ValueTracking.h"
49 #include "llvm/Analysis/VectorUtils.h"
50 #include "llvm/IR/Attributes.h"
51 #include "llvm/IR/BasicBlock.h"
52 #include "llvm/IR/Constant.h"
53 #include "llvm/IR/Constants.h"
54 #include "llvm/IR/DataLayout.h"
55 #include "llvm/IR/DebugLoc.h"
56 #include "llvm/IR/DerivedTypes.h"
57 #include "llvm/IR/Dominators.h"
58 #include "llvm/IR/Function.h"
59 #include "llvm/IR/IRBuilder.h"
60 #include "llvm/IR/InstrTypes.h"
61 #include "llvm/IR/Instruction.h"
62 #include "llvm/IR/Instructions.h"
63 #include "llvm/IR/IntrinsicInst.h"
64 #include "llvm/IR/Intrinsics.h"
65 #include "llvm/IR/Module.h"
66 #include "llvm/IR/NoFolder.h"
67 #include "llvm/IR/Operator.h"
68 #include "llvm/IR/PatternMatch.h"
69 #include "llvm/IR/Type.h"
70 #include "llvm/IR/Use.h"
71 #include "llvm/IR/User.h"
72 #include "llvm/IR/Value.h"
73 #include "llvm/IR/ValueHandle.h"
74 #include "llvm/IR/Verifier.h"
75 #include "llvm/InitializePasses.h"
76 #include "llvm/Pass.h"
77 #include "llvm/Support/Casting.h"
78 #include "llvm/Support/CommandLine.h"
79 #include "llvm/Support/Compiler.h"
80 #include "llvm/Support/DOTGraphTraits.h"
81 #include "llvm/Support/Debug.h"
82 #include "llvm/Support/ErrorHandling.h"
83 #include "llvm/Support/GraphWriter.h"
84 #include "llvm/Support/InstructionCost.h"
85 #include "llvm/Support/KnownBits.h"
86 #include "llvm/Support/MathExtras.h"
87 #include "llvm/Support/raw_ostream.h"
88 #include "llvm/Transforms/Utils/InjectTLIMappings.h"
89 #include "llvm/Transforms/Utils/LoopUtils.h"
90 #include "llvm/Transforms/Vectorize.h"
91 #include <algorithm>
92 #include <cassert>
93 #include <cstdint>
94 #include <iterator>
95 #include <memory>
96 #include <set>
97 #include <string>
98 #include <tuple>
99 #include <utility>
100 #include <vector>
101
102 using namespace llvm;
103 using namespace llvm::PatternMatch;
104 using namespace slpvectorizer;
105
106 #define SV_NAME "slp-vectorizer"
107 #define DEBUG_TYPE "SLP"
108
109 STATISTIC(NumVectorInstructions, "Number of vector instructions generated");
110
111 cl::opt<bool> RunSLPVectorization("vectorize-slp", cl::init(true), cl::Hidden,
112 cl::desc("Run the SLP vectorization passes"));
113
114 static cl::opt<int>
115 SLPCostThreshold("slp-threshold", cl::init(0), cl::Hidden,
116 cl::desc("Only vectorize if you gain more than this "
117 "number "));
118
119 static cl::opt<bool>
120 ShouldVectorizeHor("slp-vectorize-hor", cl::init(true), cl::Hidden,
121 cl::desc("Attempt to vectorize horizontal reductions"));
122
123 static cl::opt<bool> ShouldStartVectorizeHorAtStore(
124 "slp-vectorize-hor-store", cl::init(false), cl::Hidden,
125 cl::desc(
126 "Attempt to vectorize horizontal reductions feeding into a store"));
127
128 static cl::opt<int>
129 MaxVectorRegSizeOption("slp-max-reg-size", cl::init(128), cl::Hidden,
130 cl::desc("Attempt to vectorize for this register size in bits"));
131
132 static cl::opt<unsigned>
133 MaxVFOption("slp-max-vf", cl::init(0), cl::Hidden,
134 cl::desc("Maximum SLP vectorization factor (0=unlimited)"));
135
136 static cl::opt<int>
137 MaxStoreLookup("slp-max-store-lookup", cl::init(32), cl::Hidden,
138 cl::desc("Maximum depth of the lookup for consecutive stores."));
139
140 /// Limits the size of scheduling regions in a block.
141 /// It avoid long compile times for _very_ large blocks where vector
142 /// instructions are spread over a wide range.
143 /// This limit is way higher than needed by real-world functions.
144 static cl::opt<int>
145 ScheduleRegionSizeBudget("slp-schedule-budget", cl::init(100000), cl::Hidden,
146 cl::desc("Limit the size of the SLP scheduling region per block"));
147
148 static cl::opt<int> MinVectorRegSizeOption(
149 "slp-min-reg-size", cl::init(128), cl::Hidden,
150 cl::desc("Attempt to vectorize for this register size in bits"));
151
152 static cl::opt<unsigned> RecursionMaxDepth(
153 "slp-recursion-max-depth", cl::init(12), cl::Hidden,
154 cl::desc("Limit the recursion depth when building a vectorizable tree"));
155
156 static cl::opt<unsigned> MinTreeSize(
157 "slp-min-tree-size", cl::init(3), cl::Hidden,
158 cl::desc("Only vectorize small trees if they are fully vectorizable"));
159
160 // The maximum depth that the look-ahead score heuristic will explore.
161 // The higher this value, the higher the compilation time overhead.
162 static cl::opt<int> LookAheadMaxDepth(
163 "slp-max-look-ahead-depth", cl::init(2), cl::Hidden,
164 cl::desc("The maximum look-ahead depth for operand reordering scores"));
165
166 // The Look-ahead heuristic goes through the users of the bundle to calculate
167 // the users cost in getExternalUsesCost(). To avoid compilation time increase
168 // we limit the number of users visited to this value.
169 static cl::opt<unsigned> LookAheadUsersBudget(
170 "slp-look-ahead-users-budget", cl::init(2), cl::Hidden,
171 cl::desc("The maximum number of users to visit while visiting the "
172 "predecessors. This prevents compilation time increase."));
173
174 static cl::opt<bool>
175 ViewSLPTree("view-slp-tree", cl::Hidden,
176 cl::desc("Display the SLP trees with Graphviz"));
177
178 // Limit the number of alias checks. The limit is chosen so that
179 // it has no negative effect on the llvm benchmarks.
180 static const unsigned AliasedCheckLimit = 10;
181
182 // Another limit for the alias checks: The maximum distance between load/store
183 // instructions where alias checks are done.
184 // This limit is useful for very large basic blocks.
185 static const unsigned MaxMemDepDistance = 160;
186
187 /// If the ScheduleRegionSizeBudget is exhausted, we allow small scheduling
188 /// regions to be handled.
189 static const int MinScheduleRegionSize = 16;
190
191 /// Predicate for the element types that the SLP vectorizer supports.
192 ///
193 /// The most important thing to filter here are types which are invalid in LLVM
194 /// vectors. We also filter target specific types which have absolutely no
195 /// meaningful vectorization path such as x86_fp80 and ppc_f128. This just
196 /// avoids spending time checking the cost model and realizing that they will
197 /// be inevitably scalarized.
isValidElementType(Type * Ty)198 static bool isValidElementType(Type *Ty) {
199 return VectorType::isValidElementType(Ty) && !Ty->isX86_FP80Ty() &&
200 !Ty->isPPC_FP128Ty();
201 }
202
203 /// \returns true if all of the instructions in \p VL are in the same block or
204 /// false otherwise.
allSameBlock(ArrayRef<Value * > VL)205 static bool allSameBlock(ArrayRef<Value *> VL) {
206 Instruction *I0 = dyn_cast<Instruction>(VL[0]);
207 if (!I0)
208 return false;
209 BasicBlock *BB = I0->getParent();
210 for (int I = 1, E = VL.size(); I < E; I++) {
211 auto *II = dyn_cast<Instruction>(VL[I]);
212 if (!II)
213 return false;
214
215 if (BB != II->getParent())
216 return false;
217 }
218 return true;
219 }
220
221 /// \returns True if the value is a constant (but not globals/constant
222 /// expressions).
isConstant(Value * V)223 static bool isConstant(Value *V) {
224 return isa<Constant>(V) && !isa<ConstantExpr>(V) && !isa<GlobalValue>(V);
225 }
226
227 /// \returns True if all of the values in \p VL are constants (but not
228 /// globals/constant expressions).
allConstant(ArrayRef<Value * > VL)229 static bool allConstant(ArrayRef<Value *> VL) {
230 // Constant expressions and globals can't be vectorized like normal integer/FP
231 // constants.
232 return all_of(VL, isConstant);
233 }
234
235 /// \returns True if all of the values in \p VL are identical.
isSplat(ArrayRef<Value * > VL)236 static bool isSplat(ArrayRef<Value *> VL) {
237 for (unsigned i = 1, e = VL.size(); i < e; ++i)
238 if (VL[i] != VL[0])
239 return false;
240 return true;
241 }
242
243 /// \returns True if \p I is commutative, handles CmpInst and BinaryOperator.
isCommutative(Instruction * I)244 static bool isCommutative(Instruction *I) {
245 if (auto *Cmp = dyn_cast<CmpInst>(I))
246 return Cmp->isCommutative();
247 if (auto *BO = dyn_cast<BinaryOperator>(I))
248 return BO->isCommutative();
249 // TODO: This should check for generic Instruction::isCommutative(), but
250 // we need to confirm that the caller code correctly handles Intrinsics
251 // for example (does not have 2 operands).
252 return false;
253 }
254
255 /// Checks if the vector of instructions can be represented as a shuffle, like:
256 /// %x0 = extractelement <4 x i8> %x, i32 0
257 /// %x3 = extractelement <4 x i8> %x, i32 3
258 /// %y1 = extractelement <4 x i8> %y, i32 1
259 /// %y2 = extractelement <4 x i8> %y, i32 2
260 /// %x0x0 = mul i8 %x0, %x0
261 /// %x3x3 = mul i8 %x3, %x3
262 /// %y1y1 = mul i8 %y1, %y1
263 /// %y2y2 = mul i8 %y2, %y2
264 /// %ins1 = insertelement <4 x i8> poison, i8 %x0x0, i32 0
265 /// %ins2 = insertelement <4 x i8> %ins1, i8 %x3x3, i32 1
266 /// %ins3 = insertelement <4 x i8> %ins2, i8 %y1y1, i32 2
267 /// %ins4 = insertelement <4 x i8> %ins3, i8 %y2y2, i32 3
268 /// ret <4 x i8> %ins4
269 /// can be transformed into:
270 /// %1 = shufflevector <4 x i8> %x, <4 x i8> %y, <4 x i32> <i32 0, i32 3, i32 5,
271 /// i32 6>
272 /// %2 = mul <4 x i8> %1, %1
273 /// ret <4 x i8> %2
274 /// We convert this initially to something like:
275 /// %x0 = extractelement <4 x i8> %x, i32 0
276 /// %x3 = extractelement <4 x i8> %x, i32 3
277 /// %y1 = extractelement <4 x i8> %y, i32 1
278 /// %y2 = extractelement <4 x i8> %y, i32 2
279 /// %1 = insertelement <4 x i8> poison, i8 %x0, i32 0
280 /// %2 = insertelement <4 x i8> %1, i8 %x3, i32 1
281 /// %3 = insertelement <4 x i8> %2, i8 %y1, i32 2
282 /// %4 = insertelement <4 x i8> %3, i8 %y2, i32 3
283 /// %5 = mul <4 x i8> %4, %4
284 /// %6 = extractelement <4 x i8> %5, i32 0
285 /// %ins1 = insertelement <4 x i8> poison, i8 %6, i32 0
286 /// %7 = extractelement <4 x i8> %5, i32 1
287 /// %ins2 = insertelement <4 x i8> %ins1, i8 %7, i32 1
288 /// %8 = extractelement <4 x i8> %5, i32 2
289 /// %ins3 = insertelement <4 x i8> %ins2, i8 %8, i32 2
290 /// %9 = extractelement <4 x i8> %5, i32 3
291 /// %ins4 = insertelement <4 x i8> %ins3, i8 %9, i32 3
292 /// ret <4 x i8> %ins4
293 /// InstCombiner transforms this into a shuffle and vector mul
294 /// Mask will return the Shuffle Mask equivalent to the extracted elements.
295 /// TODO: Can we split off and reuse the shuffle mask detection from
296 /// TargetTransformInfo::getInstructionThroughput?
297 static Optional<TargetTransformInfo::ShuffleKind>
isShuffle(ArrayRef<Value * > VL,SmallVectorImpl<int> & Mask)298 isShuffle(ArrayRef<Value *> VL, SmallVectorImpl<int> &Mask) {
299 auto *EI0 = cast<ExtractElementInst>(VL[0]);
300 unsigned Size =
301 cast<FixedVectorType>(EI0->getVectorOperandType())->getNumElements();
302 Value *Vec1 = nullptr;
303 Value *Vec2 = nullptr;
304 enum ShuffleMode { Unknown, Select, Permute };
305 ShuffleMode CommonShuffleMode = Unknown;
306 for (unsigned I = 0, E = VL.size(); I < E; ++I) {
307 auto *EI = cast<ExtractElementInst>(VL[I]);
308 auto *Vec = EI->getVectorOperand();
309 // All vector operands must have the same number of vector elements.
310 if (cast<FixedVectorType>(Vec->getType())->getNumElements() != Size)
311 return None;
312 auto *Idx = dyn_cast<ConstantInt>(EI->getIndexOperand());
313 if (!Idx)
314 return None;
315 // Undefined behavior if Idx is negative or >= Size.
316 if (Idx->getValue().uge(Size)) {
317 Mask.push_back(UndefMaskElem);
318 continue;
319 }
320 unsigned IntIdx = Idx->getValue().getZExtValue();
321 Mask.push_back(IntIdx);
322 // We can extractelement from undef or poison vector.
323 if (isa<UndefValue>(Vec))
324 continue;
325 // For correct shuffling we have to have at most 2 different vector operands
326 // in all extractelement instructions.
327 if (!Vec1 || Vec1 == Vec)
328 Vec1 = Vec;
329 else if (!Vec2 || Vec2 == Vec)
330 Vec2 = Vec;
331 else
332 return None;
333 if (CommonShuffleMode == Permute)
334 continue;
335 // If the extract index is not the same as the operation number, it is a
336 // permutation.
337 if (IntIdx != I) {
338 CommonShuffleMode = Permute;
339 continue;
340 }
341 CommonShuffleMode = Select;
342 }
343 // If we're not crossing lanes in different vectors, consider it as blending.
344 if (CommonShuffleMode == Select && Vec2)
345 return TargetTransformInfo::SK_Select;
346 // If Vec2 was never used, we have a permutation of a single vector, otherwise
347 // we have permutation of 2 vectors.
348 return Vec2 ? TargetTransformInfo::SK_PermuteTwoSrc
349 : TargetTransformInfo::SK_PermuteSingleSrc;
350 }
351
352 namespace {
353
354 /// Main data required for vectorization of instructions.
355 struct InstructionsState {
356 /// The very first instruction in the list with the main opcode.
357 Value *OpValue = nullptr;
358
359 /// The main/alternate instruction.
360 Instruction *MainOp = nullptr;
361 Instruction *AltOp = nullptr;
362
363 /// The main/alternate opcodes for the list of instructions.
getOpcode__anon82cbe93a0111::InstructionsState364 unsigned getOpcode() const {
365 return MainOp ? MainOp->getOpcode() : 0;
366 }
367
getAltOpcode__anon82cbe93a0111::InstructionsState368 unsigned getAltOpcode() const {
369 return AltOp ? AltOp->getOpcode() : 0;
370 }
371
372 /// Some of the instructions in the list have alternate opcodes.
isAltShuffle__anon82cbe93a0111::InstructionsState373 bool isAltShuffle() const { return getOpcode() != getAltOpcode(); }
374
isOpcodeOrAlt__anon82cbe93a0111::InstructionsState375 bool isOpcodeOrAlt(Instruction *I) const {
376 unsigned CheckedOpcode = I->getOpcode();
377 return getOpcode() == CheckedOpcode || getAltOpcode() == CheckedOpcode;
378 }
379
380 InstructionsState() = delete;
InstructionsState__anon82cbe93a0111::InstructionsState381 InstructionsState(Value *OpValue, Instruction *MainOp, Instruction *AltOp)
382 : OpValue(OpValue), MainOp(MainOp), AltOp(AltOp) {}
383 };
384
385 } // end anonymous namespace
386
387 /// Chooses the correct key for scheduling data. If \p Op has the same (or
388 /// alternate) opcode as \p OpValue, the key is \p Op. Otherwise the key is \p
389 /// OpValue.
isOneOf(const InstructionsState & S,Value * Op)390 static Value *isOneOf(const InstructionsState &S, Value *Op) {
391 auto *I = dyn_cast<Instruction>(Op);
392 if (I && S.isOpcodeOrAlt(I))
393 return Op;
394 return S.OpValue;
395 }
396
397 /// \returns true if \p Opcode is allowed as part of of the main/alternate
398 /// instruction for SLP vectorization.
399 ///
400 /// Example of unsupported opcode is SDIV that can potentially cause UB if the
401 /// "shuffled out" lane would result in division by zero.
isValidForAlternation(unsigned Opcode)402 static bool isValidForAlternation(unsigned Opcode) {
403 if (Instruction::isIntDivRem(Opcode))
404 return false;
405
406 return true;
407 }
408
409 /// \returns analysis of the Instructions in \p VL described in
410 /// InstructionsState, the Opcode that we suppose the whole list
411 /// could be vectorized even if its structure is diverse.
getSameOpcode(ArrayRef<Value * > VL,unsigned BaseIndex=0)412 static InstructionsState getSameOpcode(ArrayRef<Value *> VL,
413 unsigned BaseIndex = 0) {
414 // Make sure these are all Instructions.
415 if (llvm::any_of(VL, [](Value *V) { return !isa<Instruction>(V); }))
416 return InstructionsState(VL[BaseIndex], nullptr, nullptr);
417
418 bool IsCastOp = isa<CastInst>(VL[BaseIndex]);
419 bool IsBinOp = isa<BinaryOperator>(VL[BaseIndex]);
420 unsigned Opcode = cast<Instruction>(VL[BaseIndex])->getOpcode();
421 unsigned AltOpcode = Opcode;
422 unsigned AltIndex = BaseIndex;
423
424 // Check for one alternate opcode from another BinaryOperator.
425 // TODO - generalize to support all operators (types, calls etc.).
426 for (int Cnt = 0, E = VL.size(); Cnt < E; Cnt++) {
427 unsigned InstOpcode = cast<Instruction>(VL[Cnt])->getOpcode();
428 if (IsBinOp && isa<BinaryOperator>(VL[Cnt])) {
429 if (InstOpcode == Opcode || InstOpcode == AltOpcode)
430 continue;
431 if (Opcode == AltOpcode && isValidForAlternation(InstOpcode) &&
432 isValidForAlternation(Opcode)) {
433 AltOpcode = InstOpcode;
434 AltIndex = Cnt;
435 continue;
436 }
437 } else if (IsCastOp && isa<CastInst>(VL[Cnt])) {
438 Type *Ty0 = cast<Instruction>(VL[BaseIndex])->getOperand(0)->getType();
439 Type *Ty1 = cast<Instruction>(VL[Cnt])->getOperand(0)->getType();
440 if (Ty0 == Ty1) {
441 if (InstOpcode == Opcode || InstOpcode == AltOpcode)
442 continue;
443 if (Opcode == AltOpcode) {
444 assert(isValidForAlternation(Opcode) &&
445 isValidForAlternation(InstOpcode) &&
446 "Cast isn't safe for alternation, logic needs to be updated!");
447 AltOpcode = InstOpcode;
448 AltIndex = Cnt;
449 continue;
450 }
451 }
452 } else if (InstOpcode == Opcode || InstOpcode == AltOpcode)
453 continue;
454 return InstructionsState(VL[BaseIndex], nullptr, nullptr);
455 }
456
457 return InstructionsState(VL[BaseIndex], cast<Instruction>(VL[BaseIndex]),
458 cast<Instruction>(VL[AltIndex]));
459 }
460
461 /// \returns true if all of the values in \p VL have the same type or false
462 /// otherwise.
allSameType(ArrayRef<Value * > VL)463 static bool allSameType(ArrayRef<Value *> VL) {
464 Type *Ty = VL[0]->getType();
465 for (int i = 1, e = VL.size(); i < e; i++)
466 if (VL[i]->getType() != Ty)
467 return false;
468
469 return true;
470 }
471
472 /// \returns True if Extract{Value,Element} instruction extracts element Idx.
getExtractIndex(Instruction * E)473 static Optional<unsigned> getExtractIndex(Instruction *E) {
474 unsigned Opcode = E->getOpcode();
475 assert((Opcode == Instruction::ExtractElement ||
476 Opcode == Instruction::ExtractValue) &&
477 "Expected extractelement or extractvalue instruction.");
478 if (Opcode == Instruction::ExtractElement) {
479 auto *CI = dyn_cast<ConstantInt>(E->getOperand(1));
480 if (!CI)
481 return None;
482 return CI->getZExtValue();
483 }
484 ExtractValueInst *EI = cast<ExtractValueInst>(E);
485 if (EI->getNumIndices() != 1)
486 return None;
487 return *EI->idx_begin();
488 }
489
490 /// \returns True if in-tree use also needs extract. This refers to
491 /// possible scalar operand in vectorized instruction.
InTreeUserNeedToExtract(Value * Scalar,Instruction * UserInst,TargetLibraryInfo * TLI)492 static bool InTreeUserNeedToExtract(Value *Scalar, Instruction *UserInst,
493 TargetLibraryInfo *TLI) {
494 unsigned Opcode = UserInst->getOpcode();
495 switch (Opcode) {
496 case Instruction::Load: {
497 LoadInst *LI = cast<LoadInst>(UserInst);
498 return (LI->getPointerOperand() == Scalar);
499 }
500 case Instruction::Store: {
501 StoreInst *SI = cast<StoreInst>(UserInst);
502 return (SI->getPointerOperand() == Scalar);
503 }
504 case Instruction::Call: {
505 CallInst *CI = cast<CallInst>(UserInst);
506 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
507 for (unsigned i = 0, e = CI->getNumArgOperands(); i != e; ++i) {
508 if (hasVectorInstrinsicScalarOpd(ID, i))
509 return (CI->getArgOperand(i) == Scalar);
510 }
511 LLVM_FALLTHROUGH;
512 }
513 default:
514 return false;
515 }
516 }
517
518 /// \returns the AA location that is being access by the instruction.
getLocation(Instruction * I,AAResults * AA)519 static MemoryLocation getLocation(Instruction *I, AAResults *AA) {
520 if (StoreInst *SI = dyn_cast<StoreInst>(I))
521 return MemoryLocation::get(SI);
522 if (LoadInst *LI = dyn_cast<LoadInst>(I))
523 return MemoryLocation::get(LI);
524 return MemoryLocation();
525 }
526
527 /// \returns True if the instruction is not a volatile or atomic load/store.
isSimple(Instruction * I)528 static bool isSimple(Instruction *I) {
529 if (LoadInst *LI = dyn_cast<LoadInst>(I))
530 return LI->isSimple();
531 if (StoreInst *SI = dyn_cast<StoreInst>(I))
532 return SI->isSimple();
533 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I))
534 return !MI->isVolatile();
535 return true;
536 }
537
538 namespace llvm {
539
inversePermutation(ArrayRef<unsigned> Indices,SmallVectorImpl<int> & Mask)540 static void inversePermutation(ArrayRef<unsigned> Indices,
541 SmallVectorImpl<int> &Mask) {
542 Mask.clear();
543 const unsigned E = Indices.size();
544 Mask.resize(E, E + 1);
545 for (unsigned I = 0; I < E; ++I)
546 Mask[Indices[I]] = I;
547 }
548
549 /// \returns inserting index of InsertElement or InsertValue instruction,
550 /// using Offset as base offset for index.
getInsertIndex(Value * InsertInst,unsigned Offset)551 static Optional<int> getInsertIndex(Value *InsertInst, unsigned Offset) {
552 int Index = Offset;
553 if (auto *IE = dyn_cast<InsertElementInst>(InsertInst)) {
554 if (auto *CI = dyn_cast<ConstantInt>(IE->getOperand(2))) {
555 auto *VT = cast<FixedVectorType>(IE->getType());
556 if (CI->getValue().uge(VT->getNumElements()))
557 return UndefMaskElem;
558 Index *= VT->getNumElements();
559 Index += CI->getZExtValue();
560 return Index;
561 }
562 if (isa<UndefValue>(IE->getOperand(2)))
563 return UndefMaskElem;
564 return None;
565 }
566
567 auto *IV = cast<InsertValueInst>(InsertInst);
568 Type *CurrentType = IV->getType();
569 for (unsigned I : IV->indices()) {
570 if (auto *ST = dyn_cast<StructType>(CurrentType)) {
571 Index *= ST->getNumElements();
572 CurrentType = ST->getElementType(I);
573 } else if (auto *AT = dyn_cast<ArrayType>(CurrentType)) {
574 Index *= AT->getNumElements();
575 CurrentType = AT->getElementType();
576 } else {
577 return None;
578 }
579 Index += I;
580 }
581 return Index;
582 }
583
584 namespace slpvectorizer {
585
586 /// Bottom Up SLP Vectorizer.
587 class BoUpSLP {
588 struct TreeEntry;
589 struct ScheduleData;
590
591 public:
592 using ValueList = SmallVector<Value *, 8>;
593 using InstrList = SmallVector<Instruction *, 16>;
594 using ValueSet = SmallPtrSet<Value *, 16>;
595 using StoreList = SmallVector<StoreInst *, 8>;
596 using ExtraValueToDebugLocsMap =
597 MapVector<Value *, SmallVector<Instruction *, 2>>;
598 using OrdersType = SmallVector<unsigned, 4>;
599
BoUpSLP(Function * Func,ScalarEvolution * Se,TargetTransformInfo * Tti,TargetLibraryInfo * TLi,AAResults * Aa,LoopInfo * Li,DominatorTree * Dt,AssumptionCache * AC,DemandedBits * DB,const DataLayout * DL,OptimizationRemarkEmitter * ORE)600 BoUpSLP(Function *Func, ScalarEvolution *Se, TargetTransformInfo *Tti,
601 TargetLibraryInfo *TLi, AAResults *Aa, LoopInfo *Li,
602 DominatorTree *Dt, AssumptionCache *AC, DemandedBits *DB,
603 const DataLayout *DL, OptimizationRemarkEmitter *ORE)
604 : F(Func), SE(Se), TTI(Tti), TLI(TLi), AA(Aa), LI(Li), DT(Dt), AC(AC),
605 DB(DB), DL(DL), ORE(ORE), Builder(Se->getContext()) {
606 CodeMetrics::collectEphemeralValues(F, AC, EphValues);
607 // Use the vector register size specified by the target unless overridden
608 // by a command-line option.
609 // TODO: It would be better to limit the vectorization factor based on
610 // data type rather than just register size. For example, x86 AVX has
611 // 256-bit registers, but it does not support integer operations
612 // at that width (that requires AVX2).
613 if (MaxVectorRegSizeOption.getNumOccurrences())
614 MaxVecRegSize = MaxVectorRegSizeOption;
615 else
616 MaxVecRegSize =
617 TTI->getRegisterBitWidth(TargetTransformInfo::RGK_FixedWidthVector)
618 .getFixedSize();
619
620 if (MinVectorRegSizeOption.getNumOccurrences())
621 MinVecRegSize = MinVectorRegSizeOption;
622 else
623 MinVecRegSize = TTI->getMinVectorRegisterBitWidth();
624 }
625
626 /// Vectorize the tree that starts with the elements in \p VL.
627 /// Returns the vectorized root.
628 Value *vectorizeTree();
629
630 /// Vectorize the tree but with the list of externally used values \p
631 /// ExternallyUsedValues. Values in this MapVector can be replaced but the
632 /// generated extractvalue instructions.
633 Value *vectorizeTree(ExtraValueToDebugLocsMap &ExternallyUsedValues);
634
635 /// \returns the cost incurred by unwanted spills and fills, caused by
636 /// holding live values over call sites.
637 InstructionCost getSpillCost() const;
638
639 /// \returns the vectorization cost of the subtree that starts at \p VL.
640 /// A negative number means that this is profitable.
641 InstructionCost getTreeCost(ArrayRef<Value *> VectorizedVals = None);
642
643 /// Construct a vectorizable tree that starts at \p Roots, ignoring users for
644 /// the purpose of scheduling and extraction in the \p UserIgnoreLst.
645 void buildTree(ArrayRef<Value *> Roots,
646 ArrayRef<Value *> UserIgnoreLst = None);
647
648 /// Construct a vectorizable tree that starts at \p Roots, ignoring users for
649 /// the purpose of scheduling and extraction in the \p UserIgnoreLst taking
650 /// into account (and updating it, if required) list of externally used
651 /// values stored in \p ExternallyUsedValues.
652 void buildTree(ArrayRef<Value *> Roots,
653 ExtraValueToDebugLocsMap &ExternallyUsedValues,
654 ArrayRef<Value *> UserIgnoreLst = None);
655
656 /// Clear the internal data structures that are created by 'buildTree'.
deleteTree()657 void deleteTree() {
658 VectorizableTree.clear();
659 ScalarToTreeEntry.clear();
660 MustGather.clear();
661 ExternalUses.clear();
662 NumOpsWantToKeepOrder.clear();
663 NumOpsWantToKeepOriginalOrder = 0;
664 for (auto &Iter : BlocksSchedules) {
665 BlockScheduling *BS = Iter.second.get();
666 BS->clear();
667 }
668 MinBWs.clear();
669 InstrElementSize.clear();
670 }
671
getTreeSize() const672 unsigned getTreeSize() const { return VectorizableTree.size(); }
673
674 /// Perform LICM and CSE on the newly generated gather sequences.
675 void optimizeGatherSequence();
676
677 /// \returns The best order of instructions for vectorization.
bestOrder() const678 Optional<ArrayRef<unsigned>> bestOrder() const {
679 assert(llvm::all_of(
680 NumOpsWantToKeepOrder,
681 [this](const decltype(NumOpsWantToKeepOrder)::value_type &D) {
682 return D.getFirst().size() ==
683 VectorizableTree[0]->Scalars.size();
684 }) &&
685 "All orders must have the same size as number of instructions in "
686 "tree node.");
687 auto I = std::max_element(
688 NumOpsWantToKeepOrder.begin(), NumOpsWantToKeepOrder.end(),
689 [](const decltype(NumOpsWantToKeepOrder)::value_type &D1,
690 const decltype(NumOpsWantToKeepOrder)::value_type &D2) {
691 return D1.second < D2.second;
692 });
693 if (I == NumOpsWantToKeepOrder.end() ||
694 I->getSecond() <= NumOpsWantToKeepOriginalOrder)
695 return None;
696
697 return makeArrayRef(I->getFirst());
698 }
699
700 /// Builds the correct order for root instructions.
701 /// If some leaves have the same instructions to be vectorized, we may
702 /// incorrectly evaluate the best order for the root node (it is built for the
703 /// vector of instructions without repeated instructions and, thus, has less
704 /// elements than the root node). This function builds the correct order for
705 /// the root node.
706 /// For example, if the root node is \<a+b, a+c, a+d, f+e\>, then the leaves
707 /// are \<a, a, a, f\> and \<b, c, d, e\>. When we try to vectorize the first
708 /// leaf, it will be shrink to \<a, b\>. If instructions in this leaf should
709 /// be reordered, the best order will be \<1, 0\>. We need to extend this
710 /// order for the root node. For the root node this order should look like
711 /// \<3, 0, 1, 2\>. This function extends the order for the reused
712 /// instructions.
findRootOrder(OrdersType & Order)713 void findRootOrder(OrdersType &Order) {
714 // If the leaf has the same number of instructions to vectorize as the root
715 // - order must be set already.
716 unsigned RootSize = VectorizableTree[0]->Scalars.size();
717 if (Order.size() == RootSize)
718 return;
719 SmallVector<unsigned, 4> RealOrder(Order.size());
720 std::swap(Order, RealOrder);
721 SmallVector<int, 4> Mask;
722 inversePermutation(RealOrder, Mask);
723 Order.assign(Mask.begin(), Mask.end());
724 // The leaf has less number of instructions - need to find the true order of
725 // the root.
726 // Scan the nodes starting from the leaf back to the root.
727 const TreeEntry *PNode = VectorizableTree.back().get();
728 SmallVector<const TreeEntry *, 4> Nodes(1, PNode);
729 SmallPtrSet<const TreeEntry *, 4> Visited;
730 while (!Nodes.empty() && Order.size() != RootSize) {
731 const TreeEntry *PNode = Nodes.pop_back_val();
732 if (!Visited.insert(PNode).second)
733 continue;
734 const TreeEntry &Node = *PNode;
735 for (const EdgeInfo &EI : Node.UserTreeIndices)
736 if (EI.UserTE)
737 Nodes.push_back(EI.UserTE);
738 if (Node.ReuseShuffleIndices.empty())
739 continue;
740 // Build the order for the parent node.
741 OrdersType NewOrder(Node.ReuseShuffleIndices.size(), RootSize);
742 SmallVector<unsigned, 4> OrderCounter(Order.size(), 0);
743 // The algorithm of the order extension is:
744 // 1. Calculate the number of the same instructions for the order.
745 // 2. Calculate the index of the new order: total number of instructions
746 // with order less than the order of the current instruction + reuse
747 // number of the current instruction.
748 // 3. The new order is just the index of the instruction in the original
749 // vector of the instructions.
750 for (unsigned I : Node.ReuseShuffleIndices)
751 ++OrderCounter[Order[I]];
752 SmallVector<unsigned, 4> CurrentCounter(Order.size(), 0);
753 for (unsigned I = 0, E = Node.ReuseShuffleIndices.size(); I < E; ++I) {
754 unsigned ReusedIdx = Node.ReuseShuffleIndices[I];
755 unsigned OrderIdx = Order[ReusedIdx];
756 unsigned NewIdx = 0;
757 for (unsigned J = 0; J < OrderIdx; ++J)
758 NewIdx += OrderCounter[J];
759 NewIdx += CurrentCounter[OrderIdx];
760 ++CurrentCounter[OrderIdx];
761 assert(NewOrder[NewIdx] == RootSize &&
762 "The order index should not be written already.");
763 NewOrder[NewIdx] = I;
764 }
765 std::swap(Order, NewOrder);
766 }
767 assert(Order.size() == RootSize &&
768 "Root node is expected or the size of the order must be the same as "
769 "the number of elements in the root node.");
770 assert(llvm::all_of(Order,
771 [RootSize](unsigned Val) { return Val != RootSize; }) &&
772 "All indices must be initialized");
773 }
774
775 /// \return The vector element size in bits to use when vectorizing the
776 /// expression tree ending at \p V. If V is a store, the size is the width of
777 /// the stored value. Otherwise, the size is the width of the largest loaded
778 /// value reaching V. This method is used by the vectorizer to calculate
779 /// vectorization factors.
780 unsigned getVectorElementSize(Value *V);
781
782 /// Compute the minimum type sizes required to represent the entries in a
783 /// vectorizable tree.
784 void computeMinimumValueSizes();
785
786 // \returns maximum vector register size as set by TTI or overridden by cl::opt.
getMaxVecRegSize() const787 unsigned getMaxVecRegSize() const {
788 return MaxVecRegSize;
789 }
790
791 // \returns minimum vector register size as set by cl::opt.
getMinVecRegSize() const792 unsigned getMinVecRegSize() const {
793 return MinVecRegSize;
794 }
795
getMaximumVF(unsigned ElemWidth,unsigned Opcode) const796 unsigned getMaximumVF(unsigned ElemWidth, unsigned Opcode) const {
797 unsigned MaxVF = MaxVFOption.getNumOccurrences() ?
798 MaxVFOption : TTI->getMaximumVF(ElemWidth, Opcode);
799 return MaxVF ? MaxVF : UINT_MAX;
800 }
801
802 /// Check if homogeneous aggregate is isomorphic to some VectorType.
803 /// Accepts homogeneous multidimensional aggregate of scalars/vectors like
804 /// {[4 x i16], [4 x i16]}, { <2 x float>, <2 x float> },
805 /// {{{i16, i16}, {i16, i16}}, {{i16, i16}, {i16, i16}}} and so on.
806 ///
807 /// \returns number of elements in vector if isomorphism exists, 0 otherwise.
808 unsigned canMapToVector(Type *T, const DataLayout &DL) const;
809
810 /// \returns True if the VectorizableTree is both tiny and not fully
811 /// vectorizable. We do not vectorize such trees.
812 bool isTreeTinyAndNotFullyVectorizable() const;
813
814 /// Assume that a legal-sized 'or'-reduction of shifted/zexted loaded values
815 /// can be load combined in the backend. Load combining may not be allowed in
816 /// the IR optimizer, so we do not want to alter the pattern. For example,
817 /// partially transforming a scalar bswap() pattern into vector code is
818 /// effectively impossible for the backend to undo.
819 /// TODO: If load combining is allowed in the IR optimizer, this analysis
820 /// may not be necessary.
821 bool isLoadCombineReductionCandidate(RecurKind RdxKind) const;
822
823 /// Assume that a vector of stores of bitwise-or/shifted/zexted loaded values
824 /// can be load combined in the backend. Load combining may not be allowed in
825 /// the IR optimizer, so we do not want to alter the pattern. For example,
826 /// partially transforming a scalar bswap() pattern into vector code is
827 /// effectively impossible for the backend to undo.
828 /// TODO: If load combining is allowed in the IR optimizer, this analysis
829 /// may not be necessary.
830 bool isLoadCombineCandidate() const;
831
getORE()832 OptimizationRemarkEmitter *getORE() { return ORE; }
833
834 /// This structure holds any data we need about the edges being traversed
835 /// during buildTree_rec(). We keep track of:
836 /// (i) the user TreeEntry index, and
837 /// (ii) the index of the edge.
838 struct EdgeInfo {
839 EdgeInfo() = default;
EdgeInfollvm::slpvectorizer::BoUpSLP::EdgeInfo840 EdgeInfo(TreeEntry *UserTE, unsigned EdgeIdx)
841 : UserTE(UserTE), EdgeIdx(EdgeIdx) {}
842 /// The user TreeEntry.
843 TreeEntry *UserTE = nullptr;
844 /// The operand index of the use.
845 unsigned EdgeIdx = UINT_MAX;
846 #ifndef NDEBUG
operator <<(raw_ostream & OS,const BoUpSLP::EdgeInfo & EI)847 friend inline raw_ostream &operator<<(raw_ostream &OS,
848 const BoUpSLP::EdgeInfo &EI) {
849 EI.dump(OS);
850 return OS;
851 }
852 /// Debug print.
dumpllvm::slpvectorizer::BoUpSLP::EdgeInfo853 void dump(raw_ostream &OS) const {
854 OS << "{User:" << (UserTE ? std::to_string(UserTE->Idx) : "null")
855 << " EdgeIdx:" << EdgeIdx << "}";
856 }
dumpllvm::slpvectorizer::BoUpSLP::EdgeInfo857 LLVM_DUMP_METHOD void dump() const { dump(dbgs()); }
858 #endif
859 };
860
861 /// A helper data structure to hold the operands of a vector of instructions.
862 /// This supports a fixed vector length for all operand vectors.
863 class VLOperands {
864 /// For each operand we need (i) the value, and (ii) the opcode that it
865 /// would be attached to if the expression was in a left-linearized form.
866 /// This is required to avoid illegal operand reordering.
867 /// For example:
868 /// \verbatim
869 /// 0 Op1
870 /// |/
871 /// Op1 Op2 Linearized + Op2
872 /// \ / ----------> |/
873 /// - -
874 ///
875 /// Op1 - Op2 (0 + Op1) - Op2
876 /// \endverbatim
877 ///
878 /// Value Op1 is attached to a '+' operation, and Op2 to a '-'.
879 ///
880 /// Another way to think of this is to track all the operations across the
881 /// path from the operand all the way to the root of the tree and to
882 /// calculate the operation that corresponds to this path. For example, the
883 /// path from Op2 to the root crosses the RHS of the '-', therefore the
884 /// corresponding operation is a '-' (which matches the one in the
885 /// linearized tree, as shown above).
886 ///
887 /// For lack of a better term, we refer to this operation as Accumulated
888 /// Path Operation (APO).
889 struct OperandData {
890 OperandData() = default;
OperandDatallvm::slpvectorizer::BoUpSLP::VLOperands::OperandData891 OperandData(Value *V, bool APO, bool IsUsed)
892 : V(V), APO(APO), IsUsed(IsUsed) {}
893 /// The operand value.
894 Value *V = nullptr;
895 /// TreeEntries only allow a single opcode, or an alternate sequence of
896 /// them (e.g, +, -). Therefore, we can safely use a boolean value for the
897 /// APO. It is set to 'true' if 'V' is attached to an inverse operation
898 /// in the left-linearized form (e.g., Sub/Div), and 'false' otherwise
899 /// (e.g., Add/Mul)
900 bool APO = false;
901 /// Helper data for the reordering function.
902 bool IsUsed = false;
903 };
904
905 /// During operand reordering, we are trying to select the operand at lane
906 /// that matches best with the operand at the neighboring lane. Our
907 /// selection is based on the type of value we are looking for. For example,
908 /// if the neighboring lane has a load, we need to look for a load that is
909 /// accessing a consecutive address. These strategies are summarized in the
910 /// 'ReorderingMode' enumerator.
911 enum class ReorderingMode {
912 Load, ///< Matching loads to consecutive memory addresses
913 Opcode, ///< Matching instructions based on opcode (same or alternate)
914 Constant, ///< Matching constants
915 Splat, ///< Matching the same instruction multiple times (broadcast)
916 Failed, ///< We failed to create a vectorizable group
917 };
918
919 using OperandDataVec = SmallVector<OperandData, 2>;
920
921 /// A vector of operand vectors.
922 SmallVector<OperandDataVec, 4> OpsVec;
923
924 const DataLayout &DL;
925 ScalarEvolution &SE;
926 const BoUpSLP &R;
927
928 /// \returns the operand data at \p OpIdx and \p Lane.
getData(unsigned OpIdx,unsigned Lane)929 OperandData &getData(unsigned OpIdx, unsigned Lane) {
930 return OpsVec[OpIdx][Lane];
931 }
932
933 /// \returns the operand data at \p OpIdx and \p Lane. Const version.
getData(unsigned OpIdx,unsigned Lane) const934 const OperandData &getData(unsigned OpIdx, unsigned Lane) const {
935 return OpsVec[OpIdx][Lane];
936 }
937
938 /// Clears the used flag for all entries.
clearUsed()939 void clearUsed() {
940 for (unsigned OpIdx = 0, NumOperands = getNumOperands();
941 OpIdx != NumOperands; ++OpIdx)
942 for (unsigned Lane = 0, NumLanes = getNumLanes(); Lane != NumLanes;
943 ++Lane)
944 OpsVec[OpIdx][Lane].IsUsed = false;
945 }
946
947 /// Swap the operand at \p OpIdx1 with that one at \p OpIdx2.
swap(unsigned OpIdx1,unsigned OpIdx2,unsigned Lane)948 void swap(unsigned OpIdx1, unsigned OpIdx2, unsigned Lane) {
949 std::swap(OpsVec[OpIdx1][Lane], OpsVec[OpIdx2][Lane]);
950 }
951
952 // The hard-coded scores listed here are not very important. When computing
953 // the scores of matching one sub-tree with another, we are basically
954 // counting the number of values that are matching. So even if all scores
955 // are set to 1, we would still get a decent matching result.
956 // However, sometimes we have to break ties. For example we may have to
957 // choose between matching loads vs matching opcodes. This is what these
958 // scores are helping us with: they provide the order of preference.
959
960 /// Loads from consecutive memory addresses, e.g. load(A[i]), load(A[i+1]).
961 static const int ScoreConsecutiveLoads = 3;
962 /// ExtractElementInst from same vector and consecutive indexes.
963 static const int ScoreConsecutiveExtracts = 3;
964 /// Constants.
965 static const int ScoreConstants = 2;
966 /// Instructions with the same opcode.
967 static const int ScoreSameOpcode = 2;
968 /// Instructions with alt opcodes (e.g, add + sub).
969 static const int ScoreAltOpcodes = 1;
970 /// Identical instructions (a.k.a. splat or broadcast).
971 static const int ScoreSplat = 1;
972 /// Matching with an undef is preferable to failing.
973 static const int ScoreUndef = 1;
974 /// Score for failing to find a decent match.
975 static const int ScoreFail = 0;
976 /// User exteranl to the vectorized code.
977 static const int ExternalUseCost = 1;
978 /// The user is internal but in a different lane.
979 static const int UserInDiffLaneCost = ExternalUseCost;
980
981 /// \returns the score of placing \p V1 and \p V2 in consecutive lanes.
getShallowScore(Value * V1,Value * V2,const DataLayout & DL,ScalarEvolution & SE)982 static int getShallowScore(Value *V1, Value *V2, const DataLayout &DL,
983 ScalarEvolution &SE) {
984 auto *LI1 = dyn_cast<LoadInst>(V1);
985 auto *LI2 = dyn_cast<LoadInst>(V2);
986 if (LI1 && LI2) {
987 if (LI1->getParent() != LI2->getParent())
988 return VLOperands::ScoreFail;
989
990 Optional<int> Dist = getPointersDiff(
991 LI1->getType(), LI1->getPointerOperand(), LI2->getType(),
992 LI2->getPointerOperand(), DL, SE, /*StrictCheck=*/true);
993 return (Dist && *Dist == 1) ? VLOperands::ScoreConsecutiveLoads
994 : VLOperands::ScoreFail;
995 }
996
997 auto *C1 = dyn_cast<Constant>(V1);
998 auto *C2 = dyn_cast<Constant>(V2);
999 if (C1 && C2)
1000 return VLOperands::ScoreConstants;
1001
1002 // Extracts from consecutive indexes of the same vector better score as
1003 // the extracts could be optimized away.
1004 Value *EV;
1005 ConstantInt *Ex1Idx, *Ex2Idx;
1006 if (match(V1, m_ExtractElt(m_Value(EV), m_ConstantInt(Ex1Idx))) &&
1007 match(V2, m_ExtractElt(m_Deferred(EV), m_ConstantInt(Ex2Idx))) &&
1008 Ex1Idx->getZExtValue() + 1 == Ex2Idx->getZExtValue())
1009 return VLOperands::ScoreConsecutiveExtracts;
1010
1011 auto *I1 = dyn_cast<Instruction>(V1);
1012 auto *I2 = dyn_cast<Instruction>(V2);
1013 if (I1 && I2) {
1014 if (I1 == I2)
1015 return VLOperands::ScoreSplat;
1016 InstructionsState S = getSameOpcode({I1, I2});
1017 // Note: Only consider instructions with <= 2 operands to avoid
1018 // complexity explosion.
1019 if (S.getOpcode() && S.MainOp->getNumOperands() <= 2)
1020 return S.isAltShuffle() ? VLOperands::ScoreAltOpcodes
1021 : VLOperands::ScoreSameOpcode;
1022 }
1023
1024 if (isa<UndefValue>(V2))
1025 return VLOperands::ScoreUndef;
1026
1027 return VLOperands::ScoreFail;
1028 }
1029
1030 /// Holds the values and their lane that are taking part in the look-ahead
1031 /// score calculation. This is used in the external uses cost calculation.
1032 SmallDenseMap<Value *, int> InLookAheadValues;
1033
1034 /// \Returns the additinal cost due to uses of \p LHS and \p RHS that are
1035 /// either external to the vectorized code, or require shuffling.
getExternalUsesCost(const std::pair<Value *,int> & LHS,const std::pair<Value *,int> & RHS)1036 int getExternalUsesCost(const std::pair<Value *, int> &LHS,
1037 const std::pair<Value *, int> &RHS) {
1038 int Cost = 0;
1039 std::array<std::pair<Value *, int>, 2> Values = {{LHS, RHS}};
1040 for (int Idx = 0, IdxE = Values.size(); Idx != IdxE; ++Idx) {
1041 Value *V = Values[Idx].first;
1042 if (isa<Constant>(V)) {
1043 // Since this is a function pass, it doesn't make semantic sense to
1044 // walk the users of a subclass of Constant. The users could be in
1045 // another function, or even another module that happens to be in
1046 // the same LLVMContext.
1047 continue;
1048 }
1049
1050 // Calculate the absolute lane, using the minimum relative lane of LHS
1051 // and RHS as base and Idx as the offset.
1052 int Ln = std::min(LHS.second, RHS.second) + Idx;
1053 assert(Ln >= 0 && "Bad lane calculation");
1054 unsigned UsersBudget = LookAheadUsersBudget;
1055 for (User *U : V->users()) {
1056 if (const TreeEntry *UserTE = R.getTreeEntry(U)) {
1057 // The user is in the VectorizableTree. Check if we need to insert.
1058 auto It = llvm::find(UserTE->Scalars, U);
1059 assert(It != UserTE->Scalars.end() && "U is in UserTE");
1060 int UserLn = std::distance(UserTE->Scalars.begin(), It);
1061 assert(UserLn >= 0 && "Bad lane");
1062 if (UserLn != Ln)
1063 Cost += UserInDiffLaneCost;
1064 } else {
1065 // Check if the user is in the look-ahead code.
1066 auto It2 = InLookAheadValues.find(U);
1067 if (It2 != InLookAheadValues.end()) {
1068 // The user is in the look-ahead code. Check the lane.
1069 if (It2->second != Ln)
1070 Cost += UserInDiffLaneCost;
1071 } else {
1072 // The user is neither in SLP tree nor in the look-ahead code.
1073 Cost += ExternalUseCost;
1074 }
1075 }
1076 // Limit the number of visited uses to cap compilation time.
1077 if (--UsersBudget == 0)
1078 break;
1079 }
1080 }
1081 return Cost;
1082 }
1083
1084 /// Go through the operands of \p LHS and \p RHS recursively until \p
1085 /// MaxLevel, and return the cummulative score. For example:
1086 /// \verbatim
1087 /// A[0] B[0] A[1] B[1] C[0] D[0] B[1] A[1]
1088 /// \ / \ / \ / \ /
1089 /// + + + +
1090 /// G1 G2 G3 G4
1091 /// \endverbatim
1092 /// The getScoreAtLevelRec(G1, G2) function will try to match the nodes at
1093 /// each level recursively, accumulating the score. It starts from matching
1094 /// the additions at level 0, then moves on to the loads (level 1). The
1095 /// score of G1 and G2 is higher than G1 and G3, because {A[0],A[1]} and
1096 /// {B[0],B[1]} match with VLOperands::ScoreConsecutiveLoads, while
1097 /// {A[0],C[0]} has a score of VLOperands::ScoreFail.
1098 /// Please note that the order of the operands does not matter, as we
1099 /// evaluate the score of all profitable combinations of operands. In
1100 /// other words the score of G1 and G4 is the same as G1 and G2. This
1101 /// heuristic is based on ideas described in:
1102 /// Look-ahead SLP: Auto-vectorization in the presence of commutative
1103 /// operations, CGO 2018 by Vasileios Porpodas, Rodrigo C. O. Rocha,
1104 /// Luís F. W. Góes
getScoreAtLevelRec(const std::pair<Value *,int> & LHS,const std::pair<Value *,int> & RHS,int CurrLevel,int MaxLevel)1105 int getScoreAtLevelRec(const std::pair<Value *, int> &LHS,
1106 const std::pair<Value *, int> &RHS, int CurrLevel,
1107 int MaxLevel) {
1108
1109 Value *V1 = LHS.first;
1110 Value *V2 = RHS.first;
1111 // Get the shallow score of V1 and V2.
1112 int ShallowScoreAtThisLevel =
1113 std::max((int)ScoreFail, getShallowScore(V1, V2, DL, SE) -
1114 getExternalUsesCost(LHS, RHS));
1115 int Lane1 = LHS.second;
1116 int Lane2 = RHS.second;
1117
1118 // If reached MaxLevel,
1119 // or if V1 and V2 are not instructions,
1120 // or if they are SPLAT,
1121 // or if they are not consecutive, early return the current cost.
1122 auto *I1 = dyn_cast<Instruction>(V1);
1123 auto *I2 = dyn_cast<Instruction>(V2);
1124 if (CurrLevel == MaxLevel || !(I1 && I2) || I1 == I2 ||
1125 ShallowScoreAtThisLevel == VLOperands::ScoreFail ||
1126 (isa<LoadInst>(I1) && isa<LoadInst>(I2) && ShallowScoreAtThisLevel))
1127 return ShallowScoreAtThisLevel;
1128 assert(I1 && I2 && "Should have early exited.");
1129
1130 // Keep track of in-tree values for determining the external-use cost.
1131 InLookAheadValues[V1] = Lane1;
1132 InLookAheadValues[V2] = Lane2;
1133
1134 // Contains the I2 operand indexes that got matched with I1 operands.
1135 SmallSet<unsigned, 4> Op2Used;
1136
1137 // Recursion towards the operands of I1 and I2. We are trying all possbile
1138 // operand pairs, and keeping track of the best score.
1139 for (unsigned OpIdx1 = 0, NumOperands1 = I1->getNumOperands();
1140 OpIdx1 != NumOperands1; ++OpIdx1) {
1141 // Try to pair op1I with the best operand of I2.
1142 int MaxTmpScore = 0;
1143 unsigned MaxOpIdx2 = 0;
1144 bool FoundBest = false;
1145 // If I2 is commutative try all combinations.
1146 unsigned FromIdx = isCommutative(I2) ? 0 : OpIdx1;
1147 unsigned ToIdx = isCommutative(I2)
1148 ? I2->getNumOperands()
1149 : std::min(I2->getNumOperands(), OpIdx1 + 1);
1150 assert(FromIdx <= ToIdx && "Bad index");
1151 for (unsigned OpIdx2 = FromIdx; OpIdx2 != ToIdx; ++OpIdx2) {
1152 // Skip operands already paired with OpIdx1.
1153 if (Op2Used.count(OpIdx2))
1154 continue;
1155 // Recursively calculate the cost at each level
1156 int TmpScore = getScoreAtLevelRec({I1->getOperand(OpIdx1), Lane1},
1157 {I2->getOperand(OpIdx2), Lane2},
1158 CurrLevel + 1, MaxLevel);
1159 // Look for the best score.
1160 if (TmpScore > VLOperands::ScoreFail && TmpScore > MaxTmpScore) {
1161 MaxTmpScore = TmpScore;
1162 MaxOpIdx2 = OpIdx2;
1163 FoundBest = true;
1164 }
1165 }
1166 if (FoundBest) {
1167 // Pair {OpIdx1, MaxOpIdx2} was found to be best. Never revisit it.
1168 Op2Used.insert(MaxOpIdx2);
1169 ShallowScoreAtThisLevel += MaxTmpScore;
1170 }
1171 }
1172 return ShallowScoreAtThisLevel;
1173 }
1174
1175 /// \Returns the look-ahead score, which tells us how much the sub-trees
1176 /// rooted at \p LHS and \p RHS match, the more they match the higher the
1177 /// score. This helps break ties in an informed way when we cannot decide on
1178 /// the order of the operands by just considering the immediate
1179 /// predecessors.
getLookAheadScore(const std::pair<Value *,int> & LHS,const std::pair<Value *,int> & RHS)1180 int getLookAheadScore(const std::pair<Value *, int> &LHS,
1181 const std::pair<Value *, int> &RHS) {
1182 InLookAheadValues.clear();
1183 return getScoreAtLevelRec(LHS, RHS, 1, LookAheadMaxDepth);
1184 }
1185
1186 // Search all operands in Ops[*][Lane] for the one that matches best
1187 // Ops[OpIdx][LastLane] and return its opreand index.
1188 // If no good match can be found, return None.
1189 Optional<unsigned>
getBestOperand(unsigned OpIdx,int Lane,int LastLane,ArrayRef<ReorderingMode> ReorderingModes)1190 getBestOperand(unsigned OpIdx, int Lane, int LastLane,
1191 ArrayRef<ReorderingMode> ReorderingModes) {
1192 unsigned NumOperands = getNumOperands();
1193
1194 // The operand of the previous lane at OpIdx.
1195 Value *OpLastLane = getData(OpIdx, LastLane).V;
1196
1197 // Our strategy mode for OpIdx.
1198 ReorderingMode RMode = ReorderingModes[OpIdx];
1199
1200 // The linearized opcode of the operand at OpIdx, Lane.
1201 bool OpIdxAPO = getData(OpIdx, Lane).APO;
1202
1203 // The best operand index and its score.
1204 // Sometimes we have more than one option (e.g., Opcode and Undefs), so we
1205 // are using the score to differentiate between the two.
1206 struct BestOpData {
1207 Optional<unsigned> Idx = None;
1208 unsigned Score = 0;
1209 } BestOp;
1210
1211 // Iterate through all unused operands and look for the best.
1212 for (unsigned Idx = 0; Idx != NumOperands; ++Idx) {
1213 // Get the operand at Idx and Lane.
1214 OperandData &OpData = getData(Idx, Lane);
1215 Value *Op = OpData.V;
1216 bool OpAPO = OpData.APO;
1217
1218 // Skip already selected operands.
1219 if (OpData.IsUsed)
1220 continue;
1221
1222 // Skip if we are trying to move the operand to a position with a
1223 // different opcode in the linearized tree form. This would break the
1224 // semantics.
1225 if (OpAPO != OpIdxAPO)
1226 continue;
1227
1228 // Look for an operand that matches the current mode.
1229 switch (RMode) {
1230 case ReorderingMode::Load:
1231 case ReorderingMode::Constant:
1232 case ReorderingMode::Opcode: {
1233 bool LeftToRight = Lane > LastLane;
1234 Value *OpLeft = (LeftToRight) ? OpLastLane : Op;
1235 Value *OpRight = (LeftToRight) ? Op : OpLastLane;
1236 unsigned Score =
1237 getLookAheadScore({OpLeft, LastLane}, {OpRight, Lane});
1238 if (Score > BestOp.Score) {
1239 BestOp.Idx = Idx;
1240 BestOp.Score = Score;
1241 }
1242 break;
1243 }
1244 case ReorderingMode::Splat:
1245 if (Op == OpLastLane)
1246 BestOp.Idx = Idx;
1247 break;
1248 case ReorderingMode::Failed:
1249 return None;
1250 }
1251 }
1252
1253 if (BestOp.Idx) {
1254 getData(BestOp.Idx.getValue(), Lane).IsUsed = true;
1255 return BestOp.Idx;
1256 }
1257 // If we could not find a good match return None.
1258 return None;
1259 }
1260
1261 /// Helper for reorderOperandVecs. \Returns the lane that we should start
1262 /// reordering from. This is the one which has the least number of operands
1263 /// that can freely move about.
getBestLaneToStartReordering() const1264 unsigned getBestLaneToStartReordering() const {
1265 unsigned BestLane = 0;
1266 unsigned Min = UINT_MAX;
1267 for (unsigned Lane = 0, NumLanes = getNumLanes(); Lane != NumLanes;
1268 ++Lane) {
1269 unsigned NumFreeOps = getMaxNumOperandsThatCanBeReordered(Lane);
1270 if (NumFreeOps < Min) {
1271 Min = NumFreeOps;
1272 BestLane = Lane;
1273 }
1274 }
1275 return BestLane;
1276 }
1277
1278 /// \Returns the maximum number of operands that are allowed to be reordered
1279 /// for \p Lane. This is used as a heuristic for selecting the first lane to
1280 /// start operand reordering.
getMaxNumOperandsThatCanBeReordered(unsigned Lane) const1281 unsigned getMaxNumOperandsThatCanBeReordered(unsigned Lane) const {
1282 unsigned CntTrue = 0;
1283 unsigned NumOperands = getNumOperands();
1284 // Operands with the same APO can be reordered. We therefore need to count
1285 // how many of them we have for each APO, like this: Cnt[APO] = x.
1286 // Since we only have two APOs, namely true and false, we can avoid using
1287 // a map. Instead we can simply count the number of operands that
1288 // correspond to one of them (in this case the 'true' APO), and calculate
1289 // the other by subtracting it from the total number of operands.
1290 for (unsigned OpIdx = 0; OpIdx != NumOperands; ++OpIdx)
1291 if (getData(OpIdx, Lane).APO)
1292 ++CntTrue;
1293 unsigned CntFalse = NumOperands - CntTrue;
1294 return std::max(CntTrue, CntFalse);
1295 }
1296
1297 /// Go through the instructions in VL and append their operands.
appendOperandsOfVL(ArrayRef<Value * > VL)1298 void appendOperandsOfVL(ArrayRef<Value *> VL) {
1299 assert(!VL.empty() && "Bad VL");
1300 assert((empty() || VL.size() == getNumLanes()) &&
1301 "Expected same number of lanes");
1302 assert(isa<Instruction>(VL[0]) && "Expected instruction");
1303 unsigned NumOperands = cast<Instruction>(VL[0])->getNumOperands();
1304 OpsVec.resize(NumOperands);
1305 unsigned NumLanes = VL.size();
1306 for (unsigned OpIdx = 0; OpIdx != NumOperands; ++OpIdx) {
1307 OpsVec[OpIdx].resize(NumLanes);
1308 for (unsigned Lane = 0; Lane != NumLanes; ++Lane) {
1309 assert(isa<Instruction>(VL[Lane]) && "Expected instruction");
1310 // Our tree has just 3 nodes: the root and two operands.
1311 // It is therefore trivial to get the APO. We only need to check the
1312 // opcode of VL[Lane] and whether the operand at OpIdx is the LHS or
1313 // RHS operand. The LHS operand of both add and sub is never attached
1314 // to an inversese operation in the linearized form, therefore its APO
1315 // is false. The RHS is true only if VL[Lane] is an inverse operation.
1316
1317 // Since operand reordering is performed on groups of commutative
1318 // operations or alternating sequences (e.g., +, -), we can safely
1319 // tell the inverse operations by checking commutativity.
1320 bool IsInverseOperation = !isCommutative(cast<Instruction>(VL[Lane]));
1321 bool APO = (OpIdx == 0) ? false : IsInverseOperation;
1322 OpsVec[OpIdx][Lane] = {cast<Instruction>(VL[Lane])->getOperand(OpIdx),
1323 APO, false};
1324 }
1325 }
1326 }
1327
1328 /// \returns the number of operands.
getNumOperands() const1329 unsigned getNumOperands() const { return OpsVec.size(); }
1330
1331 /// \returns the number of lanes.
getNumLanes() const1332 unsigned getNumLanes() const { return OpsVec[0].size(); }
1333
1334 /// \returns the operand value at \p OpIdx and \p Lane.
getValue(unsigned OpIdx,unsigned Lane) const1335 Value *getValue(unsigned OpIdx, unsigned Lane) const {
1336 return getData(OpIdx, Lane).V;
1337 }
1338
1339 /// \returns true if the data structure is empty.
empty() const1340 bool empty() const { return OpsVec.empty(); }
1341
1342 /// Clears the data.
clear()1343 void clear() { OpsVec.clear(); }
1344
1345 /// \Returns true if there are enough operands identical to \p Op to fill
1346 /// the whole vector.
1347 /// Note: This modifies the 'IsUsed' flag, so a cleanUsed() must follow.
shouldBroadcast(Value * Op,unsigned OpIdx,unsigned Lane)1348 bool shouldBroadcast(Value *Op, unsigned OpIdx, unsigned Lane) {
1349 bool OpAPO = getData(OpIdx, Lane).APO;
1350 for (unsigned Ln = 0, Lns = getNumLanes(); Ln != Lns; ++Ln) {
1351 if (Ln == Lane)
1352 continue;
1353 // This is set to true if we found a candidate for broadcast at Lane.
1354 bool FoundCandidate = false;
1355 for (unsigned OpI = 0, OpE = getNumOperands(); OpI != OpE; ++OpI) {
1356 OperandData &Data = getData(OpI, Ln);
1357 if (Data.APO != OpAPO || Data.IsUsed)
1358 continue;
1359 if (Data.V == Op) {
1360 FoundCandidate = true;
1361 Data.IsUsed = true;
1362 break;
1363 }
1364 }
1365 if (!FoundCandidate)
1366 return false;
1367 }
1368 return true;
1369 }
1370
1371 public:
1372 /// Initialize with all the operands of the instruction vector \p RootVL.
VLOperands(ArrayRef<Value * > RootVL,const DataLayout & DL,ScalarEvolution & SE,const BoUpSLP & R)1373 VLOperands(ArrayRef<Value *> RootVL, const DataLayout &DL,
1374 ScalarEvolution &SE, const BoUpSLP &R)
1375 : DL(DL), SE(SE), R(R) {
1376 // Append all the operands of RootVL.
1377 appendOperandsOfVL(RootVL);
1378 }
1379
1380 /// \Returns a value vector with the operands across all lanes for the
1381 /// opearnd at \p OpIdx.
getVL(unsigned OpIdx) const1382 ValueList getVL(unsigned OpIdx) const {
1383 ValueList OpVL(OpsVec[OpIdx].size());
1384 assert(OpsVec[OpIdx].size() == getNumLanes() &&
1385 "Expected same num of lanes across all operands");
1386 for (unsigned Lane = 0, Lanes = getNumLanes(); Lane != Lanes; ++Lane)
1387 OpVL[Lane] = OpsVec[OpIdx][Lane].V;
1388 return OpVL;
1389 }
1390
1391 // Performs operand reordering for 2 or more operands.
1392 // The original operands are in OrigOps[OpIdx][Lane].
1393 // The reordered operands are returned in 'SortedOps[OpIdx][Lane]'.
reorder()1394 void reorder() {
1395 unsigned NumOperands = getNumOperands();
1396 unsigned NumLanes = getNumLanes();
1397 // Each operand has its own mode. We are using this mode to help us select
1398 // the instructions for each lane, so that they match best with the ones
1399 // we have selected so far.
1400 SmallVector<ReorderingMode, 2> ReorderingModes(NumOperands);
1401
1402 // This is a greedy single-pass algorithm. We are going over each lane
1403 // once and deciding on the best order right away with no back-tracking.
1404 // However, in order to increase its effectiveness, we start with the lane
1405 // that has operands that can move the least. For example, given the
1406 // following lanes:
1407 // Lane 0 : A[0] = B[0] + C[0] // Visited 3rd
1408 // Lane 1 : A[1] = C[1] - B[1] // Visited 1st
1409 // Lane 2 : A[2] = B[2] + C[2] // Visited 2nd
1410 // Lane 3 : A[3] = C[3] - B[3] // Visited 4th
1411 // we will start at Lane 1, since the operands of the subtraction cannot
1412 // be reordered. Then we will visit the rest of the lanes in a circular
1413 // fashion. That is, Lanes 2, then Lane 0, and finally Lane 3.
1414
1415 // Find the first lane that we will start our search from.
1416 unsigned FirstLane = getBestLaneToStartReordering();
1417
1418 // Initialize the modes.
1419 for (unsigned OpIdx = 0; OpIdx != NumOperands; ++OpIdx) {
1420 Value *OpLane0 = getValue(OpIdx, FirstLane);
1421 // Keep track if we have instructions with all the same opcode on one
1422 // side.
1423 if (isa<LoadInst>(OpLane0))
1424 ReorderingModes[OpIdx] = ReorderingMode::Load;
1425 else if (isa<Instruction>(OpLane0)) {
1426 // Check if OpLane0 should be broadcast.
1427 if (shouldBroadcast(OpLane0, OpIdx, FirstLane))
1428 ReorderingModes[OpIdx] = ReorderingMode::Splat;
1429 else
1430 ReorderingModes[OpIdx] = ReorderingMode::Opcode;
1431 }
1432 else if (isa<Constant>(OpLane0))
1433 ReorderingModes[OpIdx] = ReorderingMode::Constant;
1434 else if (isa<Argument>(OpLane0))
1435 // Our best hope is a Splat. It may save some cost in some cases.
1436 ReorderingModes[OpIdx] = ReorderingMode::Splat;
1437 else
1438 // NOTE: This should be unreachable.
1439 ReorderingModes[OpIdx] = ReorderingMode::Failed;
1440 }
1441
1442 // If the initial strategy fails for any of the operand indexes, then we
1443 // perform reordering again in a second pass. This helps avoid assigning
1444 // high priority to the failed strategy, and should improve reordering for
1445 // the non-failed operand indexes.
1446 for (int Pass = 0; Pass != 2; ++Pass) {
1447 // Skip the second pass if the first pass did not fail.
1448 bool StrategyFailed = false;
1449 // Mark all operand data as free to use.
1450 clearUsed();
1451 // We keep the original operand order for the FirstLane, so reorder the
1452 // rest of the lanes. We are visiting the nodes in a circular fashion,
1453 // using FirstLane as the center point and increasing the radius
1454 // distance.
1455 for (unsigned Distance = 1; Distance != NumLanes; ++Distance) {
1456 // Visit the lane on the right and then the lane on the left.
1457 for (int Direction : {+1, -1}) {
1458 int Lane = FirstLane + Direction * Distance;
1459 if (Lane < 0 || Lane >= (int)NumLanes)
1460 continue;
1461 int LastLane = Lane - Direction;
1462 assert(LastLane >= 0 && LastLane < (int)NumLanes &&
1463 "Out of bounds");
1464 // Look for a good match for each operand.
1465 for (unsigned OpIdx = 0; OpIdx != NumOperands; ++OpIdx) {
1466 // Search for the operand that matches SortedOps[OpIdx][Lane-1].
1467 Optional<unsigned> BestIdx =
1468 getBestOperand(OpIdx, Lane, LastLane, ReorderingModes);
1469 // By not selecting a value, we allow the operands that follow to
1470 // select a better matching value. We will get a non-null value in
1471 // the next run of getBestOperand().
1472 if (BestIdx) {
1473 // Swap the current operand with the one returned by
1474 // getBestOperand().
1475 swap(OpIdx, BestIdx.getValue(), Lane);
1476 } else {
1477 // We failed to find a best operand, set mode to 'Failed'.
1478 ReorderingModes[OpIdx] = ReorderingMode::Failed;
1479 // Enable the second pass.
1480 StrategyFailed = true;
1481 }
1482 }
1483 }
1484 }
1485 // Skip second pass if the strategy did not fail.
1486 if (!StrategyFailed)
1487 break;
1488 }
1489 }
1490
1491 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
getModeStr(ReorderingMode RMode)1492 LLVM_DUMP_METHOD static StringRef getModeStr(ReorderingMode RMode) {
1493 switch (RMode) {
1494 case ReorderingMode::Load:
1495 return "Load";
1496 case ReorderingMode::Opcode:
1497 return "Opcode";
1498 case ReorderingMode::Constant:
1499 return "Constant";
1500 case ReorderingMode::Splat:
1501 return "Splat";
1502 case ReorderingMode::Failed:
1503 return "Failed";
1504 }
1505 llvm_unreachable("Unimplemented Reordering Type");
1506 }
1507
printMode(ReorderingMode RMode,raw_ostream & OS)1508 LLVM_DUMP_METHOD static raw_ostream &printMode(ReorderingMode RMode,
1509 raw_ostream &OS) {
1510 return OS << getModeStr(RMode);
1511 }
1512
1513 /// Debug print.
dumpMode(ReorderingMode RMode)1514 LLVM_DUMP_METHOD static void dumpMode(ReorderingMode RMode) {
1515 printMode(RMode, dbgs());
1516 }
1517
operator <<(raw_ostream & OS,ReorderingMode RMode)1518 friend raw_ostream &operator<<(raw_ostream &OS, ReorderingMode RMode) {
1519 return printMode(RMode, OS);
1520 }
1521
print(raw_ostream & OS) const1522 LLVM_DUMP_METHOD raw_ostream &print(raw_ostream &OS) const {
1523 const unsigned Indent = 2;
1524 unsigned Cnt = 0;
1525 for (const OperandDataVec &OpDataVec : OpsVec) {
1526 OS << "Operand " << Cnt++ << "\n";
1527 for (const OperandData &OpData : OpDataVec) {
1528 OS.indent(Indent) << "{";
1529 if (Value *V = OpData.V)
1530 OS << *V;
1531 else
1532 OS << "null";
1533 OS << ", APO:" << OpData.APO << "}\n";
1534 }
1535 OS << "\n";
1536 }
1537 return OS;
1538 }
1539
1540 /// Debug print.
dump() const1541 LLVM_DUMP_METHOD void dump() const { print(dbgs()); }
1542 #endif
1543 };
1544
1545 /// Checks if the instruction is marked for deletion.
isDeleted(Instruction * I) const1546 bool isDeleted(Instruction *I) const { return DeletedInstructions.count(I); }
1547
1548 /// Marks values operands for later deletion by replacing them with Undefs.
1549 void eraseInstructions(ArrayRef<Value *> AV);
1550
1551 ~BoUpSLP();
1552
1553 private:
1554 /// Checks if all users of \p I are the part of the vectorization tree.
1555 bool areAllUsersVectorized(Instruction *I,
1556 ArrayRef<Value *> VectorizedVals) const;
1557
1558 /// \returns the cost of the vectorizable entry.
1559 InstructionCost getEntryCost(const TreeEntry *E,
1560 ArrayRef<Value *> VectorizedVals);
1561
1562 /// This is the recursive part of buildTree.
1563 void buildTree_rec(ArrayRef<Value *> Roots, unsigned Depth,
1564 const EdgeInfo &EI);
1565
1566 /// \returns true if the ExtractElement/ExtractValue instructions in \p VL can
1567 /// be vectorized to use the original vector (or aggregate "bitcast" to a
1568 /// vector) and sets \p CurrentOrder to the identity permutation; otherwise
1569 /// returns false, setting \p CurrentOrder to either an empty vector or a
1570 /// non-identity permutation that allows to reuse extract instructions.
1571 bool canReuseExtract(ArrayRef<Value *> VL, Value *OpValue,
1572 SmallVectorImpl<unsigned> &CurrentOrder) const;
1573
1574 /// Vectorize a single entry in the tree.
1575 Value *vectorizeTree(TreeEntry *E);
1576
1577 /// Vectorize a single entry in the tree, starting in \p VL.
1578 Value *vectorizeTree(ArrayRef<Value *> VL);
1579
1580 /// \returns the scalarization cost for this type. Scalarization in this
1581 /// context means the creation of vectors from a group of scalars.
1582 InstructionCost
1583 getGatherCost(FixedVectorType *Ty,
1584 const DenseSet<unsigned> &ShuffledIndices) const;
1585
1586 /// Checks if the gathered \p VL can be represented as shuffle(s) of previous
1587 /// tree entries.
1588 /// \returns ShuffleKind, if gathered values can be represented as shuffles of
1589 /// previous tree entries. \p Mask is filled with the shuffle mask.
1590 Optional<TargetTransformInfo::ShuffleKind>
1591 isGatherShuffledEntry(const TreeEntry *TE, SmallVectorImpl<int> &Mask,
1592 SmallVectorImpl<const TreeEntry *> &Entries);
1593
1594 /// \returns the scalarization cost for this list of values. Assuming that
1595 /// this subtree gets vectorized, we may need to extract the values from the
1596 /// roots. This method calculates the cost of extracting the values.
1597 InstructionCost getGatherCost(ArrayRef<Value *> VL) const;
1598
1599 /// Set the Builder insert point to one after the last instruction in
1600 /// the bundle
1601 void setInsertPointAfterBundle(const TreeEntry *E);
1602
1603 /// \returns a vector from a collection of scalars in \p VL.
1604 Value *gather(ArrayRef<Value *> VL);
1605
1606 /// \returns whether the VectorizableTree is fully vectorizable and will
1607 /// be beneficial even the tree height is tiny.
1608 bool isFullyVectorizableTinyTree() const;
1609
1610 /// Reorder commutative or alt operands to get better probability of
1611 /// generating vectorized code.
1612 static void reorderInputsAccordingToOpcode(ArrayRef<Value *> VL,
1613 SmallVectorImpl<Value *> &Left,
1614 SmallVectorImpl<Value *> &Right,
1615 const DataLayout &DL,
1616 ScalarEvolution &SE,
1617 const BoUpSLP &R);
1618 struct TreeEntry {
1619 using VecTreeTy = SmallVector<std::unique_ptr<TreeEntry>, 8>;
TreeEntryllvm::slpvectorizer::BoUpSLP::TreeEntry1620 TreeEntry(VecTreeTy &Container) : Container(Container) {}
1621
1622 /// \returns true if the scalars in VL are equal to this entry.
isSamellvm::slpvectorizer::BoUpSLP::TreeEntry1623 bool isSame(ArrayRef<Value *> VL) const {
1624 if (VL.size() == Scalars.size())
1625 return std::equal(VL.begin(), VL.end(), Scalars.begin());
1626 return VL.size() == ReuseShuffleIndices.size() &&
1627 std::equal(
1628 VL.begin(), VL.end(), ReuseShuffleIndices.begin(),
1629 [this](Value *V, int Idx) { return V == Scalars[Idx]; });
1630 }
1631
1632 /// A vector of scalars.
1633 ValueList Scalars;
1634
1635 /// The Scalars are vectorized into this value. It is initialized to Null.
1636 Value *VectorizedValue = nullptr;
1637
1638 /// Do we need to gather this sequence or vectorize it
1639 /// (either with vector instruction or with scatter/gather
1640 /// intrinsics for store/load)?
1641 enum EntryState { Vectorize, ScatterVectorize, NeedToGather };
1642 EntryState State;
1643
1644 /// Does this sequence require some shuffling?
1645 SmallVector<int, 4> ReuseShuffleIndices;
1646
1647 /// Does this entry require reordering?
1648 SmallVector<unsigned, 4> ReorderIndices;
1649
1650 /// Points back to the VectorizableTree.
1651 ///
1652 /// Only used for Graphviz right now. Unfortunately GraphTrait::NodeRef has
1653 /// to be a pointer and needs to be able to initialize the child iterator.
1654 /// Thus we need a reference back to the container to translate the indices
1655 /// to entries.
1656 VecTreeTy &Container;
1657
1658 /// The TreeEntry index containing the user of this entry. We can actually
1659 /// have multiple users so the data structure is not truly a tree.
1660 SmallVector<EdgeInfo, 1> UserTreeIndices;
1661
1662 /// The index of this treeEntry in VectorizableTree.
1663 int Idx = -1;
1664
1665 private:
1666 /// The operands of each instruction in each lane Operands[op_index][lane].
1667 /// Note: This helps avoid the replication of the code that performs the
1668 /// reordering of operands during buildTree_rec() and vectorizeTree().
1669 SmallVector<ValueList, 2> Operands;
1670
1671 /// The main/alternate instruction.
1672 Instruction *MainOp = nullptr;
1673 Instruction *AltOp = nullptr;
1674
1675 public:
1676 /// Set this bundle's \p OpIdx'th operand to \p OpVL.
setOperandllvm::slpvectorizer::BoUpSLP::TreeEntry1677 void setOperand(unsigned OpIdx, ArrayRef<Value *> OpVL) {
1678 if (Operands.size() < OpIdx + 1)
1679 Operands.resize(OpIdx + 1);
1680 assert(Operands[OpIdx].empty() && "Already resized?");
1681 Operands[OpIdx].resize(Scalars.size());
1682 for (unsigned Lane = 0, E = Scalars.size(); Lane != E; ++Lane)
1683 Operands[OpIdx][Lane] = OpVL[Lane];
1684 }
1685
1686 /// Set the operands of this bundle in their original order.
setOperandsInOrderllvm::slpvectorizer::BoUpSLP::TreeEntry1687 void setOperandsInOrder() {
1688 assert(Operands.empty() && "Already initialized?");
1689 auto *I0 = cast<Instruction>(Scalars[0]);
1690 Operands.resize(I0->getNumOperands());
1691 unsigned NumLanes = Scalars.size();
1692 for (unsigned OpIdx = 0, NumOperands = I0->getNumOperands();
1693 OpIdx != NumOperands; ++OpIdx) {
1694 Operands[OpIdx].resize(NumLanes);
1695 for (unsigned Lane = 0; Lane != NumLanes; ++Lane) {
1696 auto *I = cast<Instruction>(Scalars[Lane]);
1697 assert(I->getNumOperands() == NumOperands &&
1698 "Expected same number of operands");
1699 Operands[OpIdx][Lane] = I->getOperand(OpIdx);
1700 }
1701 }
1702 }
1703
1704 /// \returns the \p OpIdx operand of this TreeEntry.
getOperandllvm::slpvectorizer::BoUpSLP::TreeEntry1705 ValueList &getOperand(unsigned OpIdx) {
1706 assert(OpIdx < Operands.size() && "Off bounds");
1707 return Operands[OpIdx];
1708 }
1709
1710 /// \returns the number of operands.
getNumOperandsllvm::slpvectorizer::BoUpSLP::TreeEntry1711 unsigned getNumOperands() const { return Operands.size(); }
1712
1713 /// \return the single \p OpIdx operand.
getSingleOperandllvm::slpvectorizer::BoUpSLP::TreeEntry1714 Value *getSingleOperand(unsigned OpIdx) const {
1715 assert(OpIdx < Operands.size() && "Off bounds");
1716 assert(!Operands[OpIdx].empty() && "No operand available");
1717 return Operands[OpIdx][0];
1718 }
1719
1720 /// Some of the instructions in the list have alternate opcodes.
isAltShufflellvm::slpvectorizer::BoUpSLP::TreeEntry1721 bool isAltShuffle() const {
1722 return getOpcode() != getAltOpcode();
1723 }
1724
isOpcodeOrAltllvm::slpvectorizer::BoUpSLP::TreeEntry1725 bool isOpcodeOrAlt(Instruction *I) const {
1726 unsigned CheckedOpcode = I->getOpcode();
1727 return (getOpcode() == CheckedOpcode ||
1728 getAltOpcode() == CheckedOpcode);
1729 }
1730
1731 /// Chooses the correct key for scheduling data. If \p Op has the same (or
1732 /// alternate) opcode as \p OpValue, the key is \p Op. Otherwise the key is
1733 /// \p OpValue.
isOneOfllvm::slpvectorizer::BoUpSLP::TreeEntry1734 Value *isOneOf(Value *Op) const {
1735 auto *I = dyn_cast<Instruction>(Op);
1736 if (I && isOpcodeOrAlt(I))
1737 return Op;
1738 return MainOp;
1739 }
1740
setOperationsllvm::slpvectorizer::BoUpSLP::TreeEntry1741 void setOperations(const InstructionsState &S) {
1742 MainOp = S.MainOp;
1743 AltOp = S.AltOp;
1744 }
1745
getMainOpllvm::slpvectorizer::BoUpSLP::TreeEntry1746 Instruction *getMainOp() const {
1747 return MainOp;
1748 }
1749
getAltOpllvm::slpvectorizer::BoUpSLP::TreeEntry1750 Instruction *getAltOp() const {
1751 return AltOp;
1752 }
1753
1754 /// The main/alternate opcodes for the list of instructions.
getOpcodellvm::slpvectorizer::BoUpSLP::TreeEntry1755 unsigned getOpcode() const {
1756 return MainOp ? MainOp->getOpcode() : 0;
1757 }
1758
getAltOpcodellvm::slpvectorizer::BoUpSLP::TreeEntry1759 unsigned getAltOpcode() const {
1760 return AltOp ? AltOp->getOpcode() : 0;
1761 }
1762
1763 /// Update operations state of this entry if reorder occurred.
updateStateIfReorderllvm::slpvectorizer::BoUpSLP::TreeEntry1764 bool updateStateIfReorder() {
1765 if (ReorderIndices.empty())
1766 return false;
1767 InstructionsState S = getSameOpcode(Scalars, ReorderIndices.front());
1768 setOperations(S);
1769 return true;
1770 }
1771 /// When ReuseShuffleIndices is empty it just returns position of \p V
1772 /// within vector of Scalars. Otherwise, try to remap on its reuse index.
findLaneForValuellvm::slpvectorizer::BoUpSLP::TreeEntry1773 int findLaneForValue(Value *V) const {
1774 unsigned FoundLane = std::distance(Scalars.begin(), find(Scalars, V));
1775 assert(FoundLane < Scalars.size() && "Couldn't find extract lane");
1776 if (!ReuseShuffleIndices.empty()) {
1777 FoundLane = std::distance(ReuseShuffleIndices.begin(),
1778 find(ReuseShuffleIndices, FoundLane));
1779 }
1780 return FoundLane;
1781 }
1782
1783 #ifndef NDEBUG
1784 /// Debug printer.
dumpllvm::slpvectorizer::BoUpSLP::TreeEntry1785 LLVM_DUMP_METHOD void dump() const {
1786 dbgs() << Idx << ".\n";
1787 for (unsigned OpI = 0, OpE = Operands.size(); OpI != OpE; ++OpI) {
1788 dbgs() << "Operand " << OpI << ":\n";
1789 for (const Value *V : Operands[OpI])
1790 dbgs().indent(2) << *V << "\n";
1791 }
1792 dbgs() << "Scalars: \n";
1793 for (Value *V : Scalars)
1794 dbgs().indent(2) << *V << "\n";
1795 dbgs() << "State: ";
1796 switch (State) {
1797 case Vectorize:
1798 dbgs() << "Vectorize\n";
1799 break;
1800 case ScatterVectorize:
1801 dbgs() << "ScatterVectorize\n";
1802 break;
1803 case NeedToGather:
1804 dbgs() << "NeedToGather\n";
1805 break;
1806 }
1807 dbgs() << "MainOp: ";
1808 if (MainOp)
1809 dbgs() << *MainOp << "\n";
1810 else
1811 dbgs() << "NULL\n";
1812 dbgs() << "AltOp: ";
1813 if (AltOp)
1814 dbgs() << *AltOp << "\n";
1815 else
1816 dbgs() << "NULL\n";
1817 dbgs() << "VectorizedValue: ";
1818 if (VectorizedValue)
1819 dbgs() << *VectorizedValue << "\n";
1820 else
1821 dbgs() << "NULL\n";
1822 dbgs() << "ReuseShuffleIndices: ";
1823 if (ReuseShuffleIndices.empty())
1824 dbgs() << "Empty";
1825 else
1826 for (unsigned ReuseIdx : ReuseShuffleIndices)
1827 dbgs() << ReuseIdx << ", ";
1828 dbgs() << "\n";
1829 dbgs() << "ReorderIndices: ";
1830 for (unsigned ReorderIdx : ReorderIndices)
1831 dbgs() << ReorderIdx << ", ";
1832 dbgs() << "\n";
1833 dbgs() << "UserTreeIndices: ";
1834 for (const auto &EInfo : UserTreeIndices)
1835 dbgs() << EInfo << ", ";
1836 dbgs() << "\n";
1837 }
1838 #endif
1839 };
1840
1841 #ifndef NDEBUG
dumpTreeCosts(const TreeEntry * E,InstructionCost ReuseShuffleCost,InstructionCost VecCost,InstructionCost ScalarCost) const1842 void dumpTreeCosts(const TreeEntry *E, InstructionCost ReuseShuffleCost,
1843 InstructionCost VecCost,
1844 InstructionCost ScalarCost) const {
1845 dbgs() << "SLP: Calculated costs for Tree:\n"; E->dump();
1846 dbgs() << "SLP: Costs:\n";
1847 dbgs() << "SLP: ReuseShuffleCost = " << ReuseShuffleCost << "\n";
1848 dbgs() << "SLP: VectorCost = " << VecCost << "\n";
1849 dbgs() << "SLP: ScalarCost = " << ScalarCost << "\n";
1850 dbgs() << "SLP: ReuseShuffleCost + VecCost - ScalarCost = " <<
1851 ReuseShuffleCost + VecCost - ScalarCost << "\n";
1852 }
1853 #endif
1854
1855 /// Create a new VectorizableTree entry.
newTreeEntry(ArrayRef<Value * > VL,Optional<ScheduleData * > Bundle,const InstructionsState & S,const EdgeInfo & UserTreeIdx,ArrayRef<unsigned> ReuseShuffleIndices=None,ArrayRef<unsigned> ReorderIndices=None)1856 TreeEntry *newTreeEntry(ArrayRef<Value *> VL, Optional<ScheduleData *> Bundle,
1857 const InstructionsState &S,
1858 const EdgeInfo &UserTreeIdx,
1859 ArrayRef<unsigned> ReuseShuffleIndices = None,
1860 ArrayRef<unsigned> ReorderIndices = None) {
1861 TreeEntry::EntryState EntryState =
1862 Bundle ? TreeEntry::Vectorize : TreeEntry::NeedToGather;
1863 return newTreeEntry(VL, EntryState, Bundle, S, UserTreeIdx,
1864 ReuseShuffleIndices, ReorderIndices);
1865 }
1866
newTreeEntry(ArrayRef<Value * > VL,TreeEntry::EntryState EntryState,Optional<ScheduleData * > Bundle,const InstructionsState & S,const EdgeInfo & UserTreeIdx,ArrayRef<unsigned> ReuseShuffleIndices=None,ArrayRef<unsigned> ReorderIndices=None)1867 TreeEntry *newTreeEntry(ArrayRef<Value *> VL,
1868 TreeEntry::EntryState EntryState,
1869 Optional<ScheduleData *> Bundle,
1870 const InstructionsState &S,
1871 const EdgeInfo &UserTreeIdx,
1872 ArrayRef<unsigned> ReuseShuffleIndices = None,
1873 ArrayRef<unsigned> ReorderIndices = None) {
1874 assert(((!Bundle && EntryState == TreeEntry::NeedToGather) ||
1875 (Bundle && EntryState != TreeEntry::NeedToGather)) &&
1876 "Need to vectorize gather entry?");
1877 VectorizableTree.push_back(std::make_unique<TreeEntry>(VectorizableTree));
1878 TreeEntry *Last = VectorizableTree.back().get();
1879 Last->Idx = VectorizableTree.size() - 1;
1880 Last->Scalars.insert(Last->Scalars.begin(), VL.begin(), VL.end());
1881 Last->State = EntryState;
1882 Last->ReuseShuffleIndices.append(ReuseShuffleIndices.begin(),
1883 ReuseShuffleIndices.end());
1884 Last->ReorderIndices.append(ReorderIndices.begin(), ReorderIndices.end());
1885 Last->setOperations(S);
1886 if (Last->State != TreeEntry::NeedToGather) {
1887 for (Value *V : VL) {
1888 assert(!getTreeEntry(V) && "Scalar already in tree!");
1889 ScalarToTreeEntry[V] = Last;
1890 }
1891 // Update the scheduler bundle to point to this TreeEntry.
1892 unsigned Lane = 0;
1893 for (ScheduleData *BundleMember = Bundle.getValue(); BundleMember;
1894 BundleMember = BundleMember->NextInBundle) {
1895 BundleMember->TE = Last;
1896 BundleMember->Lane = Lane;
1897 ++Lane;
1898 }
1899 assert((!Bundle.getValue() || Lane == VL.size()) &&
1900 "Bundle and VL out of sync");
1901 } else {
1902 MustGather.insert(VL.begin(), VL.end());
1903 }
1904
1905 if (UserTreeIdx.UserTE)
1906 Last->UserTreeIndices.push_back(UserTreeIdx);
1907
1908 return Last;
1909 }
1910
1911 /// -- Vectorization State --
1912 /// Holds all of the tree entries.
1913 TreeEntry::VecTreeTy VectorizableTree;
1914
1915 #ifndef NDEBUG
1916 /// Debug printer.
dumpVectorizableTree() const1917 LLVM_DUMP_METHOD void dumpVectorizableTree() const {
1918 for (unsigned Id = 0, IdE = VectorizableTree.size(); Id != IdE; ++Id) {
1919 VectorizableTree[Id]->dump();
1920 dbgs() << "\n";
1921 }
1922 }
1923 #endif
1924
getTreeEntry(Value * V)1925 TreeEntry *getTreeEntry(Value *V) { return ScalarToTreeEntry.lookup(V); }
1926
getTreeEntry(Value * V) const1927 const TreeEntry *getTreeEntry(Value *V) const {
1928 return ScalarToTreeEntry.lookup(V);
1929 }
1930
1931 /// Maps a specific scalar to its tree entry.
1932 SmallDenseMap<Value*, TreeEntry *> ScalarToTreeEntry;
1933
1934 /// Maps a value to the proposed vectorizable size.
1935 SmallDenseMap<Value *, unsigned> InstrElementSize;
1936
1937 /// A list of scalars that we found that we need to keep as scalars.
1938 ValueSet MustGather;
1939
1940 /// This POD struct describes one external user in the vectorized tree.
1941 struct ExternalUser {
ExternalUserllvm::slpvectorizer::BoUpSLP::ExternalUser1942 ExternalUser(Value *S, llvm::User *U, int L)
1943 : Scalar(S), User(U), Lane(L) {}
1944
1945 // Which scalar in our function.
1946 Value *Scalar;
1947
1948 // Which user that uses the scalar.
1949 llvm::User *User;
1950
1951 // Which lane does the scalar belong to.
1952 int Lane;
1953 };
1954 using UserList = SmallVector<ExternalUser, 16>;
1955
1956 /// Checks if two instructions may access the same memory.
1957 ///
1958 /// \p Loc1 is the location of \p Inst1. It is passed explicitly because it
1959 /// is invariant in the calling loop.
isAliased(const MemoryLocation & Loc1,Instruction * Inst1,Instruction * Inst2)1960 bool isAliased(const MemoryLocation &Loc1, Instruction *Inst1,
1961 Instruction *Inst2) {
1962 // First check if the result is already in the cache.
1963 AliasCacheKey key = std::make_pair(Inst1, Inst2);
1964 Optional<bool> &result = AliasCache[key];
1965 if (result.hasValue()) {
1966 return result.getValue();
1967 }
1968 MemoryLocation Loc2 = getLocation(Inst2, AA);
1969 bool aliased = true;
1970 if (Loc1.Ptr && Loc2.Ptr && isSimple(Inst1) && isSimple(Inst2)) {
1971 // Do the alias check.
1972 aliased = !AA->isNoAlias(Loc1, Loc2);
1973 }
1974 // Store the result in the cache.
1975 result = aliased;
1976 return aliased;
1977 }
1978
1979 using AliasCacheKey = std::pair<Instruction *, Instruction *>;
1980
1981 /// Cache for alias results.
1982 /// TODO: consider moving this to the AliasAnalysis itself.
1983 DenseMap<AliasCacheKey, Optional<bool>> AliasCache;
1984
1985 /// Removes an instruction from its block and eventually deletes it.
1986 /// It's like Instruction::eraseFromParent() except that the actual deletion
1987 /// is delayed until BoUpSLP is destructed.
1988 /// This is required to ensure that there are no incorrect collisions in the
1989 /// AliasCache, which can happen if a new instruction is allocated at the
1990 /// same address as a previously deleted instruction.
eraseInstruction(Instruction * I,bool ReplaceOpsWithUndef=false)1991 void eraseInstruction(Instruction *I, bool ReplaceOpsWithUndef = false) {
1992 auto It = DeletedInstructions.try_emplace(I, ReplaceOpsWithUndef).first;
1993 It->getSecond() = It->getSecond() && ReplaceOpsWithUndef;
1994 }
1995
1996 /// Temporary store for deleted instructions. Instructions will be deleted
1997 /// eventually when the BoUpSLP is destructed.
1998 DenseMap<Instruction *, bool> DeletedInstructions;
1999
2000 /// A list of values that need to extracted out of the tree.
2001 /// This list holds pairs of (Internal Scalar : External User). External User
2002 /// can be nullptr, it means that this Internal Scalar will be used later,
2003 /// after vectorization.
2004 UserList ExternalUses;
2005
2006 /// Values used only by @llvm.assume calls.
2007 SmallPtrSet<const Value *, 32> EphValues;
2008
2009 /// Holds all of the instructions that we gathered.
2010 SetVector<Instruction *> GatherSeq;
2011
2012 /// A list of blocks that we are going to CSE.
2013 SetVector<BasicBlock *> CSEBlocks;
2014
2015 /// Contains all scheduling relevant data for an instruction.
2016 /// A ScheduleData either represents a single instruction or a member of an
2017 /// instruction bundle (= a group of instructions which is combined into a
2018 /// vector instruction).
2019 struct ScheduleData {
2020 // The initial value for the dependency counters. It means that the
2021 // dependencies are not calculated yet.
2022 enum { InvalidDeps = -1 };
2023
2024 ScheduleData() = default;
2025
initllvm::slpvectorizer::BoUpSLP::ScheduleData2026 void init(int BlockSchedulingRegionID, Value *OpVal) {
2027 FirstInBundle = this;
2028 NextInBundle = nullptr;
2029 NextLoadStore = nullptr;
2030 IsScheduled = false;
2031 SchedulingRegionID = BlockSchedulingRegionID;
2032 UnscheduledDepsInBundle = UnscheduledDeps;
2033 clearDependencies();
2034 OpValue = OpVal;
2035 TE = nullptr;
2036 Lane = -1;
2037 }
2038
2039 /// Returns true if the dependency information has been calculated.
hasValidDependenciesllvm::slpvectorizer::BoUpSLP::ScheduleData2040 bool hasValidDependencies() const { return Dependencies != InvalidDeps; }
2041
2042 /// Returns true for single instructions and for bundle representatives
2043 /// (= the head of a bundle).
isSchedulingEntityllvm::slpvectorizer::BoUpSLP::ScheduleData2044 bool isSchedulingEntity() const { return FirstInBundle == this; }
2045
2046 /// Returns true if it represents an instruction bundle and not only a
2047 /// single instruction.
isPartOfBundlellvm::slpvectorizer::BoUpSLP::ScheduleData2048 bool isPartOfBundle() const {
2049 return NextInBundle != nullptr || FirstInBundle != this;
2050 }
2051
2052 /// Returns true if it is ready for scheduling, i.e. it has no more
2053 /// unscheduled depending instructions/bundles.
isReadyllvm::slpvectorizer::BoUpSLP::ScheduleData2054 bool isReady() const {
2055 assert(isSchedulingEntity() &&
2056 "can't consider non-scheduling entity for ready list");
2057 return UnscheduledDepsInBundle == 0 && !IsScheduled;
2058 }
2059
2060 /// Modifies the number of unscheduled dependencies, also updating it for
2061 /// the whole bundle.
incrementUnscheduledDepsllvm::slpvectorizer::BoUpSLP::ScheduleData2062 int incrementUnscheduledDeps(int Incr) {
2063 UnscheduledDeps += Incr;
2064 return FirstInBundle->UnscheduledDepsInBundle += Incr;
2065 }
2066
2067 /// Sets the number of unscheduled dependencies to the number of
2068 /// dependencies.
resetUnscheduledDepsllvm::slpvectorizer::BoUpSLP::ScheduleData2069 void resetUnscheduledDeps() {
2070 incrementUnscheduledDeps(Dependencies - UnscheduledDeps);
2071 }
2072
2073 /// Clears all dependency information.
clearDependenciesllvm::slpvectorizer::BoUpSLP::ScheduleData2074 void clearDependencies() {
2075 Dependencies = InvalidDeps;
2076 resetUnscheduledDeps();
2077 MemoryDependencies.clear();
2078 }
2079
dumpllvm::slpvectorizer::BoUpSLP::ScheduleData2080 void dump(raw_ostream &os) const {
2081 if (!isSchedulingEntity()) {
2082 os << "/ " << *Inst;
2083 } else if (NextInBundle) {
2084 os << '[' << *Inst;
2085 ScheduleData *SD = NextInBundle;
2086 while (SD) {
2087 os << ';' << *SD->Inst;
2088 SD = SD->NextInBundle;
2089 }
2090 os << ']';
2091 } else {
2092 os << *Inst;
2093 }
2094 }
2095
2096 Instruction *Inst = nullptr;
2097
2098 /// Points to the head in an instruction bundle (and always to this for
2099 /// single instructions).
2100 ScheduleData *FirstInBundle = nullptr;
2101
2102 /// Single linked list of all instructions in a bundle. Null if it is a
2103 /// single instruction.
2104 ScheduleData *NextInBundle = nullptr;
2105
2106 /// Single linked list of all memory instructions (e.g. load, store, call)
2107 /// in the block - until the end of the scheduling region.
2108 ScheduleData *NextLoadStore = nullptr;
2109
2110 /// The dependent memory instructions.
2111 /// This list is derived on demand in calculateDependencies().
2112 SmallVector<ScheduleData *, 4> MemoryDependencies;
2113
2114 /// This ScheduleData is in the current scheduling region if this matches
2115 /// the current SchedulingRegionID of BlockScheduling.
2116 int SchedulingRegionID = 0;
2117
2118 /// Used for getting a "good" final ordering of instructions.
2119 int SchedulingPriority = 0;
2120
2121 /// The number of dependencies. Constitutes of the number of users of the
2122 /// instruction plus the number of dependent memory instructions (if any).
2123 /// This value is calculated on demand.
2124 /// If InvalidDeps, the number of dependencies is not calculated yet.
2125 int Dependencies = InvalidDeps;
2126
2127 /// The number of dependencies minus the number of dependencies of scheduled
2128 /// instructions. As soon as this is zero, the instruction/bundle gets ready
2129 /// for scheduling.
2130 /// Note that this is negative as long as Dependencies is not calculated.
2131 int UnscheduledDeps = InvalidDeps;
2132
2133 /// The sum of UnscheduledDeps in a bundle. Equals to UnscheduledDeps for
2134 /// single instructions.
2135 int UnscheduledDepsInBundle = InvalidDeps;
2136
2137 /// True if this instruction is scheduled (or considered as scheduled in the
2138 /// dry-run).
2139 bool IsScheduled = false;
2140
2141 /// Opcode of the current instruction in the schedule data.
2142 Value *OpValue = nullptr;
2143
2144 /// The TreeEntry that this instruction corresponds to.
2145 TreeEntry *TE = nullptr;
2146
2147 /// The lane of this node in the TreeEntry.
2148 int Lane = -1;
2149 };
2150
2151 #ifndef NDEBUG
operator <<(raw_ostream & os,const BoUpSLP::ScheduleData & SD)2152 friend inline raw_ostream &operator<<(raw_ostream &os,
2153 const BoUpSLP::ScheduleData &SD) {
2154 SD.dump(os);
2155 return os;
2156 }
2157 #endif
2158
2159 friend struct GraphTraits<BoUpSLP *>;
2160 friend struct DOTGraphTraits<BoUpSLP *>;
2161
2162 /// Contains all scheduling data for a basic block.
2163 struct BlockScheduling {
BlockSchedulingllvm::slpvectorizer::BoUpSLP::BlockScheduling2164 BlockScheduling(BasicBlock *BB)
2165 : BB(BB), ChunkSize(BB->size()), ChunkPos(ChunkSize) {}
2166
clearllvm::slpvectorizer::BoUpSLP::BlockScheduling2167 void clear() {
2168 ReadyInsts.clear();
2169 ScheduleStart = nullptr;
2170 ScheduleEnd = nullptr;
2171 FirstLoadStoreInRegion = nullptr;
2172 LastLoadStoreInRegion = nullptr;
2173
2174 // Reduce the maximum schedule region size by the size of the
2175 // previous scheduling run.
2176 ScheduleRegionSizeLimit -= ScheduleRegionSize;
2177 if (ScheduleRegionSizeLimit < MinScheduleRegionSize)
2178 ScheduleRegionSizeLimit = MinScheduleRegionSize;
2179 ScheduleRegionSize = 0;
2180
2181 // Make a new scheduling region, i.e. all existing ScheduleData is not
2182 // in the new region yet.
2183 ++SchedulingRegionID;
2184 }
2185
getScheduleDatallvm::slpvectorizer::BoUpSLP::BlockScheduling2186 ScheduleData *getScheduleData(Value *V) {
2187 ScheduleData *SD = ScheduleDataMap[V];
2188 if (SD && SD->SchedulingRegionID == SchedulingRegionID)
2189 return SD;
2190 return nullptr;
2191 }
2192
getScheduleDatallvm::slpvectorizer::BoUpSLP::BlockScheduling2193 ScheduleData *getScheduleData(Value *V, Value *Key) {
2194 if (V == Key)
2195 return getScheduleData(V);
2196 auto I = ExtraScheduleDataMap.find(V);
2197 if (I != ExtraScheduleDataMap.end()) {
2198 ScheduleData *SD = I->second[Key];
2199 if (SD && SD->SchedulingRegionID == SchedulingRegionID)
2200 return SD;
2201 }
2202 return nullptr;
2203 }
2204
isInSchedulingRegionllvm::slpvectorizer::BoUpSLP::BlockScheduling2205 bool isInSchedulingRegion(ScheduleData *SD) const {
2206 return SD->SchedulingRegionID == SchedulingRegionID;
2207 }
2208
2209 /// Marks an instruction as scheduled and puts all dependent ready
2210 /// instructions into the ready-list.
2211 template <typename ReadyListType>
schedulellvm::slpvectorizer::BoUpSLP::BlockScheduling2212 void schedule(ScheduleData *SD, ReadyListType &ReadyList) {
2213 SD->IsScheduled = true;
2214 LLVM_DEBUG(dbgs() << "SLP: schedule " << *SD << "\n");
2215
2216 ScheduleData *BundleMember = SD;
2217 while (BundleMember) {
2218 if (BundleMember->Inst != BundleMember->OpValue) {
2219 BundleMember = BundleMember->NextInBundle;
2220 continue;
2221 }
2222 // Handle the def-use chain dependencies.
2223
2224 // Decrement the unscheduled counter and insert to ready list if ready.
2225 auto &&DecrUnsched = [this, &ReadyList](Instruction *I) {
2226 doForAllOpcodes(I, [&ReadyList](ScheduleData *OpDef) {
2227 if (OpDef && OpDef->hasValidDependencies() &&
2228 OpDef->incrementUnscheduledDeps(-1) == 0) {
2229 // There are no more unscheduled dependencies after
2230 // decrementing, so we can put the dependent instruction
2231 // into the ready list.
2232 ScheduleData *DepBundle = OpDef->FirstInBundle;
2233 assert(!DepBundle->IsScheduled &&
2234 "already scheduled bundle gets ready");
2235 ReadyList.insert(DepBundle);
2236 LLVM_DEBUG(dbgs()
2237 << "SLP: gets ready (def): " << *DepBundle << "\n");
2238 }
2239 });
2240 };
2241
2242 // If BundleMember is a vector bundle, its operands may have been
2243 // reordered duiring buildTree(). We therefore need to get its operands
2244 // through the TreeEntry.
2245 if (TreeEntry *TE = BundleMember->TE) {
2246 int Lane = BundleMember->Lane;
2247 assert(Lane >= 0 && "Lane not set");
2248
2249 // Since vectorization tree is being built recursively this assertion
2250 // ensures that the tree entry has all operands set before reaching
2251 // this code. Couple of exceptions known at the moment are extracts
2252 // where their second (immediate) operand is not added. Since
2253 // immediates do not affect scheduler behavior this is considered
2254 // okay.
2255 auto *In = TE->getMainOp();
2256 assert(In &&
2257 (isa<ExtractValueInst>(In) || isa<ExtractElementInst>(In) ||
2258 In->getNumOperands() == TE->getNumOperands()) &&
2259 "Missed TreeEntry operands?");
2260 (void)In; // fake use to avoid build failure when assertions disabled
2261
2262 for (unsigned OpIdx = 0, NumOperands = TE->getNumOperands();
2263 OpIdx != NumOperands; ++OpIdx)
2264 if (auto *I = dyn_cast<Instruction>(TE->getOperand(OpIdx)[Lane]))
2265 DecrUnsched(I);
2266 } else {
2267 // If BundleMember is a stand-alone instruction, no operand reordering
2268 // has taken place, so we directly access its operands.
2269 for (Use &U : BundleMember->Inst->operands())
2270 if (auto *I = dyn_cast<Instruction>(U.get()))
2271 DecrUnsched(I);
2272 }
2273 // Handle the memory dependencies.
2274 for (ScheduleData *MemoryDepSD : BundleMember->MemoryDependencies) {
2275 if (MemoryDepSD->incrementUnscheduledDeps(-1) == 0) {
2276 // There are no more unscheduled dependencies after decrementing,
2277 // so we can put the dependent instruction into the ready list.
2278 ScheduleData *DepBundle = MemoryDepSD->FirstInBundle;
2279 assert(!DepBundle->IsScheduled &&
2280 "already scheduled bundle gets ready");
2281 ReadyList.insert(DepBundle);
2282 LLVM_DEBUG(dbgs()
2283 << "SLP: gets ready (mem): " << *DepBundle << "\n");
2284 }
2285 }
2286 BundleMember = BundleMember->NextInBundle;
2287 }
2288 }
2289
doForAllOpcodesllvm::slpvectorizer::BoUpSLP::BlockScheduling2290 void doForAllOpcodes(Value *V,
2291 function_ref<void(ScheduleData *SD)> Action) {
2292 if (ScheduleData *SD = getScheduleData(V))
2293 Action(SD);
2294 auto I = ExtraScheduleDataMap.find(V);
2295 if (I != ExtraScheduleDataMap.end())
2296 for (auto &P : I->second)
2297 if (P.second->SchedulingRegionID == SchedulingRegionID)
2298 Action(P.second);
2299 }
2300
2301 /// Put all instructions into the ReadyList which are ready for scheduling.
2302 template <typename ReadyListType>
initialFillReadyListllvm::slpvectorizer::BoUpSLP::BlockScheduling2303 void initialFillReadyList(ReadyListType &ReadyList) {
2304 for (auto *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) {
2305 doForAllOpcodes(I, [&](ScheduleData *SD) {
2306 if (SD->isSchedulingEntity() && SD->isReady()) {
2307 ReadyList.insert(SD);
2308 LLVM_DEBUG(dbgs()
2309 << "SLP: initially in ready list: " << *I << "\n");
2310 }
2311 });
2312 }
2313 }
2314
2315 /// Checks if a bundle of instructions can be scheduled, i.e. has no
2316 /// cyclic dependencies. This is only a dry-run, no instructions are
2317 /// actually moved at this stage.
2318 /// \returns the scheduling bundle. The returned Optional value is non-None
2319 /// if \p VL is allowed to be scheduled.
2320 Optional<ScheduleData *>
2321 tryScheduleBundle(ArrayRef<Value *> VL, BoUpSLP *SLP,
2322 const InstructionsState &S);
2323
2324 /// Un-bundles a group of instructions.
2325 void cancelScheduling(ArrayRef<Value *> VL, Value *OpValue);
2326
2327 /// Allocates schedule data chunk.
2328 ScheduleData *allocateScheduleDataChunks();
2329
2330 /// Extends the scheduling region so that V is inside the region.
2331 /// \returns true if the region size is within the limit.
2332 bool extendSchedulingRegion(Value *V, const InstructionsState &S);
2333
2334 /// Initialize the ScheduleData structures for new instructions in the
2335 /// scheduling region.
2336 void initScheduleData(Instruction *FromI, Instruction *ToI,
2337 ScheduleData *PrevLoadStore,
2338 ScheduleData *NextLoadStore);
2339
2340 /// Updates the dependency information of a bundle and of all instructions/
2341 /// bundles which depend on the original bundle.
2342 void calculateDependencies(ScheduleData *SD, bool InsertInReadyList,
2343 BoUpSLP *SLP);
2344
2345 /// Sets all instruction in the scheduling region to un-scheduled.
2346 void resetSchedule();
2347
2348 BasicBlock *BB;
2349
2350 /// Simple memory allocation for ScheduleData.
2351 std::vector<std::unique_ptr<ScheduleData[]>> ScheduleDataChunks;
2352
2353 /// The size of a ScheduleData array in ScheduleDataChunks.
2354 int ChunkSize;
2355
2356 /// The allocator position in the current chunk, which is the last entry
2357 /// of ScheduleDataChunks.
2358 int ChunkPos;
2359
2360 /// Attaches ScheduleData to Instruction.
2361 /// Note that the mapping survives during all vectorization iterations, i.e.
2362 /// ScheduleData structures are recycled.
2363 DenseMap<Value *, ScheduleData *> ScheduleDataMap;
2364
2365 /// Attaches ScheduleData to Instruction with the leading key.
2366 DenseMap<Value *, SmallDenseMap<Value *, ScheduleData *>>
2367 ExtraScheduleDataMap;
2368
2369 struct ReadyList : SmallVector<ScheduleData *, 8> {
insertllvm::slpvectorizer::BoUpSLP::BlockScheduling::ReadyList2370 void insert(ScheduleData *SD) { push_back(SD); }
2371 };
2372
2373 /// The ready-list for scheduling (only used for the dry-run).
2374 ReadyList ReadyInsts;
2375
2376 /// The first instruction of the scheduling region.
2377 Instruction *ScheduleStart = nullptr;
2378
2379 /// The first instruction _after_ the scheduling region.
2380 Instruction *ScheduleEnd = nullptr;
2381
2382 /// The first memory accessing instruction in the scheduling region
2383 /// (can be null).
2384 ScheduleData *FirstLoadStoreInRegion = nullptr;
2385
2386 /// The last memory accessing instruction in the scheduling region
2387 /// (can be null).
2388 ScheduleData *LastLoadStoreInRegion = nullptr;
2389
2390 /// The current size of the scheduling region.
2391 int ScheduleRegionSize = 0;
2392
2393 /// The maximum size allowed for the scheduling region.
2394 int ScheduleRegionSizeLimit = ScheduleRegionSizeBudget;
2395
2396 /// The ID of the scheduling region. For a new vectorization iteration this
2397 /// is incremented which "removes" all ScheduleData from the region.
2398 // Make sure that the initial SchedulingRegionID is greater than the
2399 // initial SchedulingRegionID in ScheduleData (which is 0).
2400 int SchedulingRegionID = 1;
2401 };
2402
2403 /// Attaches the BlockScheduling structures to basic blocks.
2404 MapVector<BasicBlock *, std::unique_ptr<BlockScheduling>> BlocksSchedules;
2405
2406 /// Performs the "real" scheduling. Done before vectorization is actually
2407 /// performed in a basic block.
2408 void scheduleBlock(BlockScheduling *BS);
2409
2410 /// List of users to ignore during scheduling and that don't need extracting.
2411 ArrayRef<Value *> UserIgnoreList;
2412
2413 /// A DenseMapInfo implementation for holding DenseMaps and DenseSets of
2414 /// sorted SmallVectors of unsigned.
2415 struct OrdersTypeDenseMapInfo {
getEmptyKeyllvm::slpvectorizer::BoUpSLP::OrdersTypeDenseMapInfo2416 static OrdersType getEmptyKey() {
2417 OrdersType V;
2418 V.push_back(~1U);
2419 return V;
2420 }
2421
getTombstoneKeyllvm::slpvectorizer::BoUpSLP::OrdersTypeDenseMapInfo2422 static OrdersType getTombstoneKey() {
2423 OrdersType V;
2424 V.push_back(~2U);
2425 return V;
2426 }
2427
getHashValuellvm::slpvectorizer::BoUpSLP::OrdersTypeDenseMapInfo2428 static unsigned getHashValue(const OrdersType &V) {
2429 return static_cast<unsigned>(hash_combine_range(V.begin(), V.end()));
2430 }
2431
isEqualllvm::slpvectorizer::BoUpSLP::OrdersTypeDenseMapInfo2432 static bool isEqual(const OrdersType &LHS, const OrdersType &RHS) {
2433 return LHS == RHS;
2434 }
2435 };
2436
2437 /// Contains orders of operations along with the number of bundles that have
2438 /// operations in this order. It stores only those orders that require
2439 /// reordering, if reordering is not required it is counted using \a
2440 /// NumOpsWantToKeepOriginalOrder.
2441 DenseMap<OrdersType, unsigned, OrdersTypeDenseMapInfo> NumOpsWantToKeepOrder;
2442 /// Number of bundles that do not require reordering.
2443 unsigned NumOpsWantToKeepOriginalOrder = 0;
2444
2445 // Analysis and block reference.
2446 Function *F;
2447 ScalarEvolution *SE;
2448 TargetTransformInfo *TTI;
2449 TargetLibraryInfo *TLI;
2450 AAResults *AA;
2451 LoopInfo *LI;
2452 DominatorTree *DT;
2453 AssumptionCache *AC;
2454 DemandedBits *DB;
2455 const DataLayout *DL;
2456 OptimizationRemarkEmitter *ORE;
2457
2458 unsigned MaxVecRegSize; // This is set by TTI or overridden by cl::opt.
2459 unsigned MinVecRegSize; // Set by cl::opt (default: 128).
2460
2461 /// Instruction builder to construct the vectorized tree.
2462 IRBuilder<> Builder;
2463
2464 /// A map of scalar integer values to the smallest bit width with which they
2465 /// can legally be represented. The values map to (width, signed) pairs,
2466 /// where "width" indicates the minimum bit width and "signed" is True if the
2467 /// value must be signed-extended, rather than zero-extended, back to its
2468 /// original width.
2469 MapVector<Value *, std::pair<uint64_t, bool>> MinBWs;
2470 };
2471
2472 } // end namespace slpvectorizer
2473
2474 template <> struct GraphTraits<BoUpSLP *> {
2475 using TreeEntry = BoUpSLP::TreeEntry;
2476
2477 /// NodeRef has to be a pointer per the GraphWriter.
2478 using NodeRef = TreeEntry *;
2479
2480 using ContainerTy = BoUpSLP::TreeEntry::VecTreeTy;
2481
2482 /// Add the VectorizableTree to the index iterator to be able to return
2483 /// TreeEntry pointers.
2484 struct ChildIteratorType
2485 : public iterator_adaptor_base<
2486 ChildIteratorType, SmallVector<BoUpSLP::EdgeInfo, 1>::iterator> {
2487 ContainerTy &VectorizableTree;
2488
ChildIteratorTypellvm::GraphTraits::ChildIteratorType2489 ChildIteratorType(SmallVector<BoUpSLP::EdgeInfo, 1>::iterator W,
2490 ContainerTy &VT)
2491 : ChildIteratorType::iterator_adaptor_base(W), VectorizableTree(VT) {}
2492
operator *llvm::GraphTraits::ChildIteratorType2493 NodeRef operator*() { return I->UserTE; }
2494 };
2495
getEntryNodellvm::GraphTraits2496 static NodeRef getEntryNode(BoUpSLP &R) {
2497 return R.VectorizableTree[0].get();
2498 }
2499
child_beginllvm::GraphTraits2500 static ChildIteratorType child_begin(NodeRef N) {
2501 return {N->UserTreeIndices.begin(), N->Container};
2502 }
2503
child_endllvm::GraphTraits2504 static ChildIteratorType child_end(NodeRef N) {
2505 return {N->UserTreeIndices.end(), N->Container};
2506 }
2507
2508 /// For the node iterator we just need to turn the TreeEntry iterator into a
2509 /// TreeEntry* iterator so that it dereferences to NodeRef.
2510 class nodes_iterator {
2511 using ItTy = ContainerTy::iterator;
2512 ItTy It;
2513
2514 public:
nodes_iterator(const ItTy & It2)2515 nodes_iterator(const ItTy &It2) : It(It2) {}
operator *()2516 NodeRef operator*() { return It->get(); }
operator ++()2517 nodes_iterator operator++() {
2518 ++It;
2519 return *this;
2520 }
operator !=(const nodes_iterator & N2) const2521 bool operator!=(const nodes_iterator &N2) const { return N2.It != It; }
2522 };
2523
nodes_beginllvm::GraphTraits2524 static nodes_iterator nodes_begin(BoUpSLP *R) {
2525 return nodes_iterator(R->VectorizableTree.begin());
2526 }
2527
nodes_endllvm::GraphTraits2528 static nodes_iterator nodes_end(BoUpSLP *R) {
2529 return nodes_iterator(R->VectorizableTree.end());
2530 }
2531
sizellvm::GraphTraits2532 static unsigned size(BoUpSLP *R) { return R->VectorizableTree.size(); }
2533 };
2534
2535 template <> struct DOTGraphTraits<BoUpSLP *> : public DefaultDOTGraphTraits {
2536 using TreeEntry = BoUpSLP::TreeEntry;
2537
DOTGraphTraitsllvm::DOTGraphTraits2538 DOTGraphTraits(bool isSimple = false) : DefaultDOTGraphTraits(isSimple) {}
2539
getNodeLabelllvm::DOTGraphTraits2540 std::string getNodeLabel(const TreeEntry *Entry, const BoUpSLP *R) {
2541 std::string Str;
2542 raw_string_ostream OS(Str);
2543 if (isSplat(Entry->Scalars)) {
2544 OS << "<splat> " << *Entry->Scalars[0];
2545 return Str;
2546 }
2547 for (auto V : Entry->Scalars) {
2548 OS << *V;
2549 if (llvm::any_of(R->ExternalUses, [&](const BoUpSLP::ExternalUser &EU) {
2550 return EU.Scalar == V;
2551 }))
2552 OS << " <extract>";
2553 OS << "\n";
2554 }
2555 return Str;
2556 }
2557
getNodeAttributesllvm::DOTGraphTraits2558 static std::string getNodeAttributes(const TreeEntry *Entry,
2559 const BoUpSLP *) {
2560 if (Entry->State == TreeEntry::NeedToGather)
2561 return "color=red";
2562 return "";
2563 }
2564 };
2565
2566 } // end namespace llvm
2567
~BoUpSLP()2568 BoUpSLP::~BoUpSLP() {
2569 for (const auto &Pair : DeletedInstructions) {
2570 // Replace operands of ignored instructions with Undefs in case if they were
2571 // marked for deletion.
2572 if (Pair.getSecond()) {
2573 Value *Undef = UndefValue::get(Pair.getFirst()->getType());
2574 Pair.getFirst()->replaceAllUsesWith(Undef);
2575 }
2576 Pair.getFirst()->dropAllReferences();
2577 }
2578 for (const auto &Pair : DeletedInstructions) {
2579 assert(Pair.getFirst()->use_empty() &&
2580 "trying to erase instruction with users.");
2581 Pair.getFirst()->eraseFromParent();
2582 }
2583 #ifdef EXPENSIVE_CHECKS
2584 // If we could guarantee that this call is not extremely slow, we could
2585 // remove the ifdef limitation (see PR47712).
2586 assert(!verifyFunction(*F, &dbgs()));
2587 #endif
2588 }
2589
eraseInstructions(ArrayRef<Value * > AV)2590 void BoUpSLP::eraseInstructions(ArrayRef<Value *> AV) {
2591 for (auto *V : AV) {
2592 if (auto *I = dyn_cast<Instruction>(V))
2593 eraseInstruction(I, /*ReplaceOpsWithUndef=*/true);
2594 };
2595 }
2596
buildTree(ArrayRef<Value * > Roots,ArrayRef<Value * > UserIgnoreLst)2597 void BoUpSLP::buildTree(ArrayRef<Value *> Roots,
2598 ArrayRef<Value *> UserIgnoreLst) {
2599 ExtraValueToDebugLocsMap ExternallyUsedValues;
2600 buildTree(Roots, ExternallyUsedValues, UserIgnoreLst);
2601 }
2602
buildTree(ArrayRef<Value * > Roots,ExtraValueToDebugLocsMap & ExternallyUsedValues,ArrayRef<Value * > UserIgnoreLst)2603 void BoUpSLP::buildTree(ArrayRef<Value *> Roots,
2604 ExtraValueToDebugLocsMap &ExternallyUsedValues,
2605 ArrayRef<Value *> UserIgnoreLst) {
2606 deleteTree();
2607 UserIgnoreList = UserIgnoreLst;
2608 if (!allSameType(Roots))
2609 return;
2610 buildTree_rec(Roots, 0, EdgeInfo());
2611
2612 // Collect the values that we need to extract from the tree.
2613 for (auto &TEPtr : VectorizableTree) {
2614 TreeEntry *Entry = TEPtr.get();
2615
2616 // No need to handle users of gathered values.
2617 if (Entry->State == TreeEntry::NeedToGather)
2618 continue;
2619
2620 // For each lane:
2621 for (int Lane = 0, LE = Entry->Scalars.size(); Lane != LE; ++Lane) {
2622 Value *Scalar = Entry->Scalars[Lane];
2623 int FoundLane = Entry->findLaneForValue(Scalar);
2624
2625 // Check if the scalar is externally used as an extra arg.
2626 auto ExtI = ExternallyUsedValues.find(Scalar);
2627 if (ExtI != ExternallyUsedValues.end()) {
2628 LLVM_DEBUG(dbgs() << "SLP: Need to extract: Extra arg from lane "
2629 << Lane << " from " << *Scalar << ".\n");
2630 ExternalUses.emplace_back(Scalar, nullptr, FoundLane);
2631 }
2632 for (User *U : Scalar->users()) {
2633 LLVM_DEBUG(dbgs() << "SLP: Checking user:" << *U << ".\n");
2634
2635 Instruction *UserInst = dyn_cast<Instruction>(U);
2636 if (!UserInst)
2637 continue;
2638
2639 // Skip in-tree scalars that become vectors
2640 if (TreeEntry *UseEntry = getTreeEntry(U)) {
2641 Value *UseScalar = UseEntry->Scalars[0];
2642 // Some in-tree scalars will remain as scalar in vectorized
2643 // instructions. If that is the case, the one in Lane 0 will
2644 // be used.
2645 if (UseScalar != U ||
2646 UseEntry->State == TreeEntry::ScatterVectorize ||
2647 !InTreeUserNeedToExtract(Scalar, UserInst, TLI)) {
2648 LLVM_DEBUG(dbgs() << "SLP: \tInternal user will be removed:" << *U
2649 << ".\n");
2650 assert(UseEntry->State != TreeEntry::NeedToGather && "Bad state");
2651 continue;
2652 }
2653 }
2654
2655 // Ignore users in the user ignore list.
2656 if (is_contained(UserIgnoreList, UserInst))
2657 continue;
2658
2659 LLVM_DEBUG(dbgs() << "SLP: Need to extract:" << *U << " from lane "
2660 << Lane << " from " << *Scalar << ".\n");
2661 ExternalUses.push_back(ExternalUser(Scalar, U, FoundLane));
2662 }
2663 }
2664 }
2665 }
2666
buildTree_rec(ArrayRef<Value * > VL,unsigned Depth,const EdgeInfo & UserTreeIdx)2667 void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
2668 const EdgeInfo &UserTreeIdx) {
2669 assert((allConstant(VL) || allSameType(VL)) && "Invalid types!");
2670
2671 InstructionsState S = getSameOpcode(VL);
2672 if (Depth == RecursionMaxDepth) {
2673 LLVM_DEBUG(dbgs() << "SLP: Gathering due to max recursion depth.\n");
2674 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx);
2675 return;
2676 }
2677
2678 // Don't handle scalable vectors
2679 if (S.getOpcode() == Instruction::ExtractElement &&
2680 isa<ScalableVectorType>(
2681 cast<ExtractElementInst>(S.OpValue)->getVectorOperandType())) {
2682 LLVM_DEBUG(dbgs() << "SLP: Gathering due to scalable vector type.\n");
2683 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx);
2684 return;
2685 }
2686
2687 // Don't handle vectors.
2688 if (S.OpValue->getType()->isVectorTy() &&
2689 !isa<InsertElementInst>(S.OpValue)) {
2690 LLVM_DEBUG(dbgs() << "SLP: Gathering due to vector type.\n");
2691 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx);
2692 return;
2693 }
2694
2695 if (StoreInst *SI = dyn_cast<StoreInst>(S.OpValue))
2696 if (SI->getValueOperand()->getType()->isVectorTy()) {
2697 LLVM_DEBUG(dbgs() << "SLP: Gathering due to store vector type.\n");
2698 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx);
2699 return;
2700 }
2701
2702 // If all of the operands are identical or constant we have a simple solution.
2703 if (allConstant(VL) || isSplat(VL) || !allSameBlock(VL) || !S.getOpcode()) {
2704 LLVM_DEBUG(dbgs() << "SLP: Gathering due to C,S,B,O. \n");
2705 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx);
2706 return;
2707 }
2708
2709 // We now know that this is a vector of instructions of the same type from
2710 // the same block.
2711
2712 // Don't vectorize ephemeral values.
2713 for (Value *V : VL) {
2714 if (EphValues.count(V)) {
2715 LLVM_DEBUG(dbgs() << "SLP: The instruction (" << *V
2716 << ") is ephemeral.\n");
2717 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx);
2718 return;
2719 }
2720 }
2721
2722 // Check if this is a duplicate of another entry.
2723 if (TreeEntry *E = getTreeEntry(S.OpValue)) {
2724 LLVM_DEBUG(dbgs() << "SLP: \tChecking bundle: " << *S.OpValue << ".\n");
2725 if (!E->isSame(VL)) {
2726 LLVM_DEBUG(dbgs() << "SLP: Gathering due to partial overlap.\n");
2727 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx);
2728 return;
2729 }
2730 // Record the reuse of the tree node. FIXME, currently this is only used to
2731 // properly draw the graph rather than for the actual vectorization.
2732 E->UserTreeIndices.push_back(UserTreeIdx);
2733 LLVM_DEBUG(dbgs() << "SLP: Perfect diamond merge at " << *S.OpValue
2734 << ".\n");
2735 return;
2736 }
2737
2738 // Check that none of the instructions in the bundle are already in the tree.
2739 for (Value *V : VL) {
2740 auto *I = dyn_cast<Instruction>(V);
2741 if (!I)
2742 continue;
2743 if (getTreeEntry(I)) {
2744 LLVM_DEBUG(dbgs() << "SLP: The instruction (" << *V
2745 << ") is already in tree.\n");
2746 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx);
2747 return;
2748 }
2749 }
2750
2751 // If any of the scalars is marked as a value that needs to stay scalar, then
2752 // we need to gather the scalars.
2753 // The reduction nodes (stored in UserIgnoreList) also should stay scalar.
2754 for (Value *V : VL) {
2755 if (MustGather.count(V) || is_contained(UserIgnoreList, V)) {
2756 LLVM_DEBUG(dbgs() << "SLP: Gathering due to gathered scalar.\n");
2757 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx);
2758 return;
2759 }
2760 }
2761
2762 // Check that all of the users of the scalars that we want to vectorize are
2763 // schedulable.
2764 auto *VL0 = cast<Instruction>(S.OpValue);
2765 BasicBlock *BB = VL0->getParent();
2766
2767 if (!DT->isReachableFromEntry(BB)) {
2768 // Don't go into unreachable blocks. They may contain instructions with
2769 // dependency cycles which confuse the final scheduling.
2770 LLVM_DEBUG(dbgs() << "SLP: bundle in unreachable block.\n");
2771 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx);
2772 return;
2773 }
2774
2775 // Check that every instruction appears once in this bundle.
2776 SmallVector<unsigned, 4> ReuseShuffleIndicies;
2777 SmallVector<Value *, 4> UniqueValues;
2778 DenseMap<Value *, unsigned> UniquePositions;
2779 for (Value *V : VL) {
2780 auto Res = UniquePositions.try_emplace(V, UniqueValues.size());
2781 ReuseShuffleIndicies.emplace_back(Res.first->second);
2782 if (Res.second)
2783 UniqueValues.emplace_back(V);
2784 }
2785 size_t NumUniqueScalarValues = UniqueValues.size();
2786 if (NumUniqueScalarValues == VL.size()) {
2787 ReuseShuffleIndicies.clear();
2788 } else {
2789 LLVM_DEBUG(dbgs() << "SLP: Shuffle for reused scalars.\n");
2790 if (NumUniqueScalarValues <= 1 ||
2791 !llvm::isPowerOf2_32(NumUniqueScalarValues)) {
2792 LLVM_DEBUG(dbgs() << "SLP: Scalar used twice in bundle.\n");
2793 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx);
2794 return;
2795 }
2796 VL = UniqueValues;
2797 }
2798
2799 auto &BSRef = BlocksSchedules[BB];
2800 if (!BSRef)
2801 BSRef = std::make_unique<BlockScheduling>(BB);
2802
2803 BlockScheduling &BS = *BSRef.get();
2804
2805 Optional<ScheduleData *> Bundle = BS.tryScheduleBundle(VL, this, S);
2806 if (!Bundle) {
2807 LLVM_DEBUG(dbgs() << "SLP: We are not able to schedule this bundle!\n");
2808 assert((!BS.getScheduleData(VL0) ||
2809 !BS.getScheduleData(VL0)->isPartOfBundle()) &&
2810 "tryScheduleBundle should cancelScheduling on failure");
2811 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
2812 ReuseShuffleIndicies);
2813 return;
2814 }
2815 LLVM_DEBUG(dbgs() << "SLP: We are able to schedule this bundle.\n");
2816
2817 unsigned ShuffleOrOp = S.isAltShuffle() ?
2818 (unsigned) Instruction::ShuffleVector : S.getOpcode();
2819 switch (ShuffleOrOp) {
2820 case Instruction::PHI: {
2821 auto *PH = cast<PHINode>(VL0);
2822
2823 // Check for terminator values (e.g. invoke).
2824 for (Value *V : VL)
2825 for (unsigned I = 0, E = PH->getNumIncomingValues(); I < E; ++I) {
2826 Instruction *Term = dyn_cast<Instruction>(
2827 cast<PHINode>(V)->getIncomingValueForBlock(
2828 PH->getIncomingBlock(I)));
2829 if (Term && Term->isTerminator()) {
2830 LLVM_DEBUG(dbgs()
2831 << "SLP: Need to swizzle PHINodes (terminator use).\n");
2832 BS.cancelScheduling(VL, VL0);
2833 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
2834 ReuseShuffleIndicies);
2835 return;
2836 }
2837 }
2838
2839 TreeEntry *TE =
2840 newTreeEntry(VL, Bundle, S, UserTreeIdx, ReuseShuffleIndicies);
2841 LLVM_DEBUG(dbgs() << "SLP: added a vector of PHINodes.\n");
2842
2843 // Keeps the reordered operands to avoid code duplication.
2844 SmallVector<ValueList, 2> OperandsVec;
2845 for (unsigned I = 0, E = PH->getNumIncomingValues(); I < E; ++I) {
2846 if (!DT->isReachableFromEntry(PH->getIncomingBlock(I))) {
2847 ValueList Operands(VL.size(), PoisonValue::get(PH->getType()));
2848 TE->setOperand(I, Operands);
2849 OperandsVec.push_back(Operands);
2850 continue;
2851 }
2852 ValueList Operands;
2853 // Prepare the operand vector.
2854 for (Value *V : VL)
2855 Operands.push_back(cast<PHINode>(V)->getIncomingValueForBlock(
2856 PH->getIncomingBlock(I)));
2857 TE->setOperand(I, Operands);
2858 OperandsVec.push_back(Operands);
2859 }
2860 for (unsigned OpIdx = 0, OpE = OperandsVec.size(); OpIdx != OpE; ++OpIdx)
2861 buildTree_rec(OperandsVec[OpIdx], Depth + 1, {TE, OpIdx});
2862 return;
2863 }
2864 case Instruction::ExtractValue:
2865 case Instruction::ExtractElement: {
2866 OrdersType CurrentOrder;
2867 bool Reuse = canReuseExtract(VL, VL0, CurrentOrder);
2868 if (Reuse) {
2869 LLVM_DEBUG(dbgs() << "SLP: Reusing or shuffling extract sequence.\n");
2870 ++NumOpsWantToKeepOriginalOrder;
2871 newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
2872 ReuseShuffleIndicies);
2873 // This is a special case, as it does not gather, but at the same time
2874 // we are not extending buildTree_rec() towards the operands.
2875 ValueList Op0;
2876 Op0.assign(VL.size(), VL0->getOperand(0));
2877 VectorizableTree.back()->setOperand(0, Op0);
2878 return;
2879 }
2880 if (!CurrentOrder.empty()) {
2881 LLVM_DEBUG({
2882 dbgs() << "SLP: Reusing or shuffling of reordered extract sequence "
2883 "with order";
2884 for (unsigned Idx : CurrentOrder)
2885 dbgs() << " " << Idx;
2886 dbgs() << "\n";
2887 });
2888 // Insert new order with initial value 0, if it does not exist,
2889 // otherwise return the iterator to the existing one.
2890 newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
2891 ReuseShuffleIndicies, CurrentOrder);
2892 findRootOrder(CurrentOrder);
2893 ++NumOpsWantToKeepOrder[CurrentOrder];
2894 // This is a special case, as it does not gather, but at the same time
2895 // we are not extending buildTree_rec() towards the operands.
2896 ValueList Op0;
2897 Op0.assign(VL.size(), VL0->getOperand(0));
2898 VectorizableTree.back()->setOperand(0, Op0);
2899 return;
2900 }
2901 LLVM_DEBUG(dbgs() << "SLP: Gather extract sequence.\n");
2902 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
2903 ReuseShuffleIndicies);
2904 BS.cancelScheduling(VL, VL0);
2905 return;
2906 }
2907 case Instruction::InsertElement: {
2908 assert(ReuseShuffleIndicies.empty() && "All inserts should be unique");
2909
2910 // Check that we have a buildvector and not a shuffle of 2 or more
2911 // different vectors.
2912 ValueSet SourceVectors;
2913 for (Value *V : VL)
2914 SourceVectors.insert(cast<Instruction>(V)->getOperand(0));
2915
2916 if (count_if(VL, [&SourceVectors](Value *V) {
2917 return !SourceVectors.contains(V);
2918 }) >= 2) {
2919 // Found 2nd source vector - cancel.
2920 LLVM_DEBUG(dbgs() << "SLP: Gather of insertelement vectors with "
2921 "different source vectors.\n");
2922 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
2923 ReuseShuffleIndicies);
2924 BS.cancelScheduling(VL, VL0);
2925 return;
2926 }
2927
2928 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx);
2929 LLVM_DEBUG(dbgs() << "SLP: added inserts bundle.\n");
2930
2931 constexpr int NumOps = 2;
2932 ValueList VectorOperands[NumOps];
2933 for (int I = 0; I < NumOps; ++I) {
2934 for (Value *V : VL)
2935 VectorOperands[I].push_back(cast<Instruction>(V)->getOperand(I));
2936
2937 TE->setOperand(I, VectorOperands[I]);
2938 }
2939 buildTree_rec(VectorOperands[NumOps - 1], Depth + 1, {TE, 0});
2940 return;
2941 }
2942 case Instruction::Load: {
2943 // Check that a vectorized load would load the same memory as a scalar
2944 // load. For example, we don't want to vectorize loads that are smaller
2945 // than 8-bit. Even though we have a packed struct {<i2, i2, i2, i2>} LLVM
2946 // treats loading/storing it as an i8 struct. If we vectorize loads/stores
2947 // from such a struct, we read/write packed bits disagreeing with the
2948 // unvectorized version.
2949 Type *ScalarTy = VL0->getType();
2950
2951 if (DL->getTypeSizeInBits(ScalarTy) !=
2952 DL->getTypeAllocSizeInBits(ScalarTy)) {
2953 BS.cancelScheduling(VL, VL0);
2954 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
2955 ReuseShuffleIndicies);
2956 LLVM_DEBUG(dbgs() << "SLP: Gathering loads of non-packed type.\n");
2957 return;
2958 }
2959
2960 // Make sure all loads in the bundle are simple - we can't vectorize
2961 // atomic or volatile loads.
2962 SmallVector<Value *, 4> PointerOps(VL.size());
2963 auto POIter = PointerOps.begin();
2964 for (Value *V : VL) {
2965 auto *L = cast<LoadInst>(V);
2966 if (!L->isSimple()) {
2967 BS.cancelScheduling(VL, VL0);
2968 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
2969 ReuseShuffleIndicies);
2970 LLVM_DEBUG(dbgs() << "SLP: Gathering non-simple loads.\n");
2971 return;
2972 }
2973 *POIter = L->getPointerOperand();
2974 ++POIter;
2975 }
2976
2977 OrdersType CurrentOrder;
2978 // Check the order of pointer operands.
2979 if (llvm::sortPtrAccesses(PointerOps, ScalarTy, *DL, *SE, CurrentOrder)) {
2980 Value *Ptr0;
2981 Value *PtrN;
2982 if (CurrentOrder.empty()) {
2983 Ptr0 = PointerOps.front();
2984 PtrN = PointerOps.back();
2985 } else {
2986 Ptr0 = PointerOps[CurrentOrder.front()];
2987 PtrN = PointerOps[CurrentOrder.back()];
2988 }
2989 Optional<int> Diff = getPointersDiff(
2990 ScalarTy, Ptr0, ScalarTy, PtrN, *DL, *SE);
2991 // Check that the sorted loads are consecutive.
2992 if (static_cast<unsigned>(*Diff) == VL.size() - 1) {
2993 if (CurrentOrder.empty()) {
2994 // Original loads are consecutive and does not require reordering.
2995 ++NumOpsWantToKeepOriginalOrder;
2996 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S,
2997 UserTreeIdx, ReuseShuffleIndicies);
2998 TE->setOperandsInOrder();
2999 LLVM_DEBUG(dbgs() << "SLP: added a vector of loads.\n");
3000 } else {
3001 // Need to reorder.
3002 TreeEntry *TE =
3003 newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
3004 ReuseShuffleIndicies, CurrentOrder);
3005 TE->setOperandsInOrder();
3006 LLVM_DEBUG(dbgs() << "SLP: added a vector of jumbled loads.\n");
3007 findRootOrder(CurrentOrder);
3008 ++NumOpsWantToKeepOrder[CurrentOrder];
3009 }
3010 return;
3011 }
3012 Align CommonAlignment = cast<LoadInst>(VL0)->getAlign();
3013 for (Value *V : VL)
3014 CommonAlignment =
3015 commonAlignment(CommonAlignment, cast<LoadInst>(V)->getAlign());
3016 if (TTI->isLegalMaskedGather(FixedVectorType::get(ScalarTy, VL.size()),
3017 CommonAlignment)) {
3018 // Vectorizing non-consecutive loads with `llvm.masked.gather`.
3019 TreeEntry *TE = newTreeEntry(VL, TreeEntry::ScatterVectorize, Bundle,
3020 S, UserTreeIdx, ReuseShuffleIndicies);
3021 TE->setOperandsInOrder();
3022 buildTree_rec(PointerOps, Depth + 1, {TE, 0});
3023 LLVM_DEBUG(dbgs()
3024 << "SLP: added a vector of non-consecutive loads.\n");
3025 return;
3026 }
3027 }
3028
3029 LLVM_DEBUG(dbgs() << "SLP: Gathering non-consecutive loads.\n");
3030 BS.cancelScheduling(VL, VL0);
3031 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
3032 ReuseShuffleIndicies);
3033 return;
3034 }
3035 case Instruction::ZExt:
3036 case Instruction::SExt:
3037 case Instruction::FPToUI:
3038 case Instruction::FPToSI:
3039 case Instruction::FPExt:
3040 case Instruction::PtrToInt:
3041 case Instruction::IntToPtr:
3042 case Instruction::SIToFP:
3043 case Instruction::UIToFP:
3044 case Instruction::Trunc:
3045 case Instruction::FPTrunc:
3046 case Instruction::BitCast: {
3047 Type *SrcTy = VL0->getOperand(0)->getType();
3048 for (Value *V : VL) {
3049 Type *Ty = cast<Instruction>(V)->getOperand(0)->getType();
3050 if (Ty != SrcTy || !isValidElementType(Ty)) {
3051 BS.cancelScheduling(VL, VL0);
3052 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
3053 ReuseShuffleIndicies);
3054 LLVM_DEBUG(dbgs()
3055 << "SLP: Gathering casts with different src types.\n");
3056 return;
3057 }
3058 }
3059 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
3060 ReuseShuffleIndicies);
3061 LLVM_DEBUG(dbgs() << "SLP: added a vector of casts.\n");
3062
3063 TE->setOperandsInOrder();
3064 for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) {
3065 ValueList Operands;
3066 // Prepare the operand vector.
3067 for (Value *V : VL)
3068 Operands.push_back(cast<Instruction>(V)->getOperand(i));
3069
3070 buildTree_rec(Operands, Depth + 1, {TE, i});
3071 }
3072 return;
3073 }
3074 case Instruction::ICmp:
3075 case Instruction::FCmp: {
3076 // Check that all of the compares have the same predicate.
3077 CmpInst::Predicate P0 = cast<CmpInst>(VL0)->getPredicate();
3078 CmpInst::Predicate SwapP0 = CmpInst::getSwappedPredicate(P0);
3079 Type *ComparedTy = VL0->getOperand(0)->getType();
3080 for (Value *V : VL) {
3081 CmpInst *Cmp = cast<CmpInst>(V);
3082 if ((Cmp->getPredicate() != P0 && Cmp->getPredicate() != SwapP0) ||
3083 Cmp->getOperand(0)->getType() != ComparedTy) {
3084 BS.cancelScheduling(VL, VL0);
3085 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
3086 ReuseShuffleIndicies);
3087 LLVM_DEBUG(dbgs()
3088 << "SLP: Gathering cmp with different predicate.\n");
3089 return;
3090 }
3091 }
3092
3093 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
3094 ReuseShuffleIndicies);
3095 LLVM_DEBUG(dbgs() << "SLP: added a vector of compares.\n");
3096
3097 ValueList Left, Right;
3098 if (cast<CmpInst>(VL0)->isCommutative()) {
3099 // Commutative predicate - collect + sort operands of the instructions
3100 // so that each side is more likely to have the same opcode.
3101 assert(P0 == SwapP0 && "Commutative Predicate mismatch");
3102 reorderInputsAccordingToOpcode(VL, Left, Right, *DL, *SE, *this);
3103 } else {
3104 // Collect operands - commute if it uses the swapped predicate.
3105 for (Value *V : VL) {
3106 auto *Cmp = cast<CmpInst>(V);
3107 Value *LHS = Cmp->getOperand(0);
3108 Value *RHS = Cmp->getOperand(1);
3109 if (Cmp->getPredicate() != P0)
3110 std::swap(LHS, RHS);
3111 Left.push_back(LHS);
3112 Right.push_back(RHS);
3113 }
3114 }
3115 TE->setOperand(0, Left);
3116 TE->setOperand(1, Right);
3117 buildTree_rec(Left, Depth + 1, {TE, 0});
3118 buildTree_rec(Right, Depth + 1, {TE, 1});
3119 return;
3120 }
3121 case Instruction::Select:
3122 case Instruction::FNeg:
3123 case Instruction::Add:
3124 case Instruction::FAdd:
3125 case Instruction::Sub:
3126 case Instruction::FSub:
3127 case Instruction::Mul:
3128 case Instruction::FMul:
3129 case Instruction::UDiv:
3130 case Instruction::SDiv:
3131 case Instruction::FDiv:
3132 case Instruction::URem:
3133 case Instruction::SRem:
3134 case Instruction::FRem:
3135 case Instruction::Shl:
3136 case Instruction::LShr:
3137 case Instruction::AShr:
3138 case Instruction::And:
3139 case Instruction::Or:
3140 case Instruction::Xor: {
3141 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
3142 ReuseShuffleIndicies);
3143 LLVM_DEBUG(dbgs() << "SLP: added a vector of un/bin op.\n");
3144
3145 // Sort operands of the instructions so that each side is more likely to
3146 // have the same opcode.
3147 if (isa<BinaryOperator>(VL0) && VL0->isCommutative()) {
3148 ValueList Left, Right;
3149 reorderInputsAccordingToOpcode(VL, Left, Right, *DL, *SE, *this);
3150 TE->setOperand(0, Left);
3151 TE->setOperand(1, Right);
3152 buildTree_rec(Left, Depth + 1, {TE, 0});
3153 buildTree_rec(Right, Depth + 1, {TE, 1});
3154 return;
3155 }
3156
3157 TE->setOperandsInOrder();
3158 for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) {
3159 ValueList Operands;
3160 // Prepare the operand vector.
3161 for (Value *V : VL)
3162 Operands.push_back(cast<Instruction>(V)->getOperand(i));
3163
3164 buildTree_rec(Operands, Depth + 1, {TE, i});
3165 }
3166 return;
3167 }
3168 case Instruction::GetElementPtr: {
3169 // We don't combine GEPs with complicated (nested) indexing.
3170 for (Value *V : VL) {
3171 if (cast<Instruction>(V)->getNumOperands() != 2) {
3172 LLVM_DEBUG(dbgs() << "SLP: not-vectorizable GEP (nested indexes).\n");
3173 BS.cancelScheduling(VL, VL0);
3174 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
3175 ReuseShuffleIndicies);
3176 return;
3177 }
3178 }
3179
3180 // We can't combine several GEPs into one vector if they operate on
3181 // different types.
3182 Type *Ty0 = VL0->getOperand(0)->getType();
3183 for (Value *V : VL) {
3184 Type *CurTy = cast<Instruction>(V)->getOperand(0)->getType();
3185 if (Ty0 != CurTy) {
3186 LLVM_DEBUG(dbgs()
3187 << "SLP: not-vectorizable GEP (different types).\n");
3188 BS.cancelScheduling(VL, VL0);
3189 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
3190 ReuseShuffleIndicies);
3191 return;
3192 }
3193 }
3194
3195 // We don't combine GEPs with non-constant indexes.
3196 Type *Ty1 = VL0->getOperand(1)->getType();
3197 for (Value *V : VL) {
3198 auto Op = cast<Instruction>(V)->getOperand(1);
3199 if (!isa<ConstantInt>(Op) ||
3200 (Op->getType() != Ty1 &&
3201 Op->getType()->getScalarSizeInBits() >
3202 DL->getIndexSizeInBits(
3203 V->getType()->getPointerAddressSpace()))) {
3204 LLVM_DEBUG(dbgs()
3205 << "SLP: not-vectorizable GEP (non-constant indexes).\n");
3206 BS.cancelScheduling(VL, VL0);
3207 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
3208 ReuseShuffleIndicies);
3209 return;
3210 }
3211 }
3212
3213 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
3214 ReuseShuffleIndicies);
3215 LLVM_DEBUG(dbgs() << "SLP: added a vector of GEPs.\n");
3216 TE->setOperandsInOrder();
3217 for (unsigned i = 0, e = 2; i < e; ++i) {
3218 ValueList Operands;
3219 // Prepare the operand vector.
3220 for (Value *V : VL)
3221 Operands.push_back(cast<Instruction>(V)->getOperand(i));
3222
3223 buildTree_rec(Operands, Depth + 1, {TE, i});
3224 }
3225 return;
3226 }
3227 case Instruction::Store: {
3228 // Check if the stores are consecutive or if we need to swizzle them.
3229 llvm::Type *ScalarTy = cast<StoreInst>(VL0)->getValueOperand()->getType();
3230 // Avoid types that are padded when being allocated as scalars, while
3231 // being packed together in a vector (such as i1).
3232 if (DL->getTypeSizeInBits(ScalarTy) !=
3233 DL->getTypeAllocSizeInBits(ScalarTy)) {
3234 BS.cancelScheduling(VL, VL0);
3235 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
3236 ReuseShuffleIndicies);
3237 LLVM_DEBUG(dbgs() << "SLP: Gathering stores of non-packed type.\n");
3238 return;
3239 }
3240 // Make sure all stores in the bundle are simple - we can't vectorize
3241 // atomic or volatile stores.
3242 SmallVector<Value *, 4> PointerOps(VL.size());
3243 ValueList Operands(VL.size());
3244 auto POIter = PointerOps.begin();
3245 auto OIter = Operands.begin();
3246 for (Value *V : VL) {
3247 auto *SI = cast<StoreInst>(V);
3248 if (!SI->isSimple()) {
3249 BS.cancelScheduling(VL, VL0);
3250 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
3251 ReuseShuffleIndicies);
3252 LLVM_DEBUG(dbgs() << "SLP: Gathering non-simple stores.\n");
3253 return;
3254 }
3255 *POIter = SI->getPointerOperand();
3256 *OIter = SI->getValueOperand();
3257 ++POIter;
3258 ++OIter;
3259 }
3260
3261 OrdersType CurrentOrder;
3262 // Check the order of pointer operands.
3263 if (llvm::sortPtrAccesses(PointerOps, ScalarTy, *DL, *SE, CurrentOrder)) {
3264 Value *Ptr0;
3265 Value *PtrN;
3266 if (CurrentOrder.empty()) {
3267 Ptr0 = PointerOps.front();
3268 PtrN = PointerOps.back();
3269 } else {
3270 Ptr0 = PointerOps[CurrentOrder.front()];
3271 PtrN = PointerOps[CurrentOrder.back()];
3272 }
3273 Optional<int> Dist =
3274 getPointersDiff(ScalarTy, Ptr0, ScalarTy, PtrN, *DL, *SE);
3275 // Check that the sorted pointer operands are consecutive.
3276 if (static_cast<unsigned>(*Dist) == VL.size() - 1) {
3277 if (CurrentOrder.empty()) {
3278 // Original stores are consecutive and does not require reordering.
3279 ++NumOpsWantToKeepOriginalOrder;
3280 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S,
3281 UserTreeIdx, ReuseShuffleIndicies);
3282 TE->setOperandsInOrder();
3283 buildTree_rec(Operands, Depth + 1, {TE, 0});
3284 LLVM_DEBUG(dbgs() << "SLP: added a vector of stores.\n");
3285 } else {
3286 TreeEntry *TE =
3287 newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
3288 ReuseShuffleIndicies, CurrentOrder);
3289 TE->setOperandsInOrder();
3290 buildTree_rec(Operands, Depth + 1, {TE, 0});
3291 LLVM_DEBUG(dbgs() << "SLP: added a vector of jumbled stores.\n");
3292 findRootOrder(CurrentOrder);
3293 ++NumOpsWantToKeepOrder[CurrentOrder];
3294 }
3295 return;
3296 }
3297 }
3298
3299 BS.cancelScheduling(VL, VL0);
3300 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
3301 ReuseShuffleIndicies);
3302 LLVM_DEBUG(dbgs() << "SLP: Non-consecutive store.\n");
3303 return;
3304 }
3305 case Instruction::Call: {
3306 // Check if the calls are all to the same vectorizable intrinsic or
3307 // library function.
3308 CallInst *CI = cast<CallInst>(VL0);
3309 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
3310
3311 VFShape Shape = VFShape::get(
3312 *CI, ElementCount::getFixed(static_cast<unsigned int>(VL.size())),
3313 false /*HasGlobalPred*/);
3314 Function *VecFunc = VFDatabase(*CI).getVectorizedFunction(Shape);
3315
3316 if (!VecFunc && !isTriviallyVectorizable(ID)) {
3317 BS.cancelScheduling(VL, VL0);
3318 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
3319 ReuseShuffleIndicies);
3320 LLVM_DEBUG(dbgs() << "SLP: Non-vectorizable call.\n");
3321 return;
3322 }
3323 Function *F = CI->getCalledFunction();
3324 unsigned NumArgs = CI->getNumArgOperands();
3325 SmallVector<Value*, 4> ScalarArgs(NumArgs, nullptr);
3326 for (unsigned j = 0; j != NumArgs; ++j)
3327 if (hasVectorInstrinsicScalarOpd(ID, j))
3328 ScalarArgs[j] = CI->getArgOperand(j);
3329 for (Value *V : VL) {
3330 CallInst *CI2 = dyn_cast<CallInst>(V);
3331 if (!CI2 || CI2->getCalledFunction() != F ||
3332 getVectorIntrinsicIDForCall(CI2, TLI) != ID ||
3333 (VecFunc &&
3334 VecFunc != VFDatabase(*CI2).getVectorizedFunction(Shape)) ||
3335 !CI->hasIdenticalOperandBundleSchema(*CI2)) {
3336 BS.cancelScheduling(VL, VL0);
3337 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
3338 ReuseShuffleIndicies);
3339 LLVM_DEBUG(dbgs() << "SLP: mismatched calls:" << *CI << "!=" << *V
3340 << "\n");
3341 return;
3342 }
3343 // Some intrinsics have scalar arguments and should be same in order for
3344 // them to be vectorized.
3345 for (unsigned j = 0; j != NumArgs; ++j) {
3346 if (hasVectorInstrinsicScalarOpd(ID, j)) {
3347 Value *A1J = CI2->getArgOperand(j);
3348 if (ScalarArgs[j] != A1J) {
3349 BS.cancelScheduling(VL, VL0);
3350 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
3351 ReuseShuffleIndicies);
3352 LLVM_DEBUG(dbgs() << "SLP: mismatched arguments in call:" << *CI
3353 << " argument " << ScalarArgs[j] << "!=" << A1J
3354 << "\n");
3355 return;
3356 }
3357 }
3358 }
3359 // Verify that the bundle operands are identical between the two calls.
3360 if (CI->hasOperandBundles() &&
3361 !std::equal(CI->op_begin() + CI->getBundleOperandsStartIndex(),
3362 CI->op_begin() + CI->getBundleOperandsEndIndex(),
3363 CI2->op_begin() + CI2->getBundleOperandsStartIndex())) {
3364 BS.cancelScheduling(VL, VL0);
3365 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
3366 ReuseShuffleIndicies);
3367 LLVM_DEBUG(dbgs() << "SLP: mismatched bundle operands in calls:"
3368 << *CI << "!=" << *V << '\n');
3369 return;
3370 }
3371 }
3372
3373 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
3374 ReuseShuffleIndicies);
3375 TE->setOperandsInOrder();
3376 for (unsigned i = 0, e = CI->getNumArgOperands(); i != e; ++i) {
3377 ValueList Operands;
3378 // Prepare the operand vector.
3379 for (Value *V : VL) {
3380 auto *CI2 = cast<CallInst>(V);
3381 Operands.push_back(CI2->getArgOperand(i));
3382 }
3383 buildTree_rec(Operands, Depth + 1, {TE, i});
3384 }
3385 return;
3386 }
3387 case Instruction::ShuffleVector: {
3388 // If this is not an alternate sequence of opcode like add-sub
3389 // then do not vectorize this instruction.
3390 if (!S.isAltShuffle()) {
3391 BS.cancelScheduling(VL, VL0);
3392 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
3393 ReuseShuffleIndicies);
3394 LLVM_DEBUG(dbgs() << "SLP: ShuffleVector are not vectorized.\n");
3395 return;
3396 }
3397 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
3398 ReuseShuffleIndicies);
3399 LLVM_DEBUG(dbgs() << "SLP: added a ShuffleVector op.\n");
3400
3401 // Reorder operands if reordering would enable vectorization.
3402 if (isa<BinaryOperator>(VL0)) {
3403 ValueList Left, Right;
3404 reorderInputsAccordingToOpcode(VL, Left, Right, *DL, *SE, *this);
3405 TE->setOperand(0, Left);
3406 TE->setOperand(1, Right);
3407 buildTree_rec(Left, Depth + 1, {TE, 0});
3408 buildTree_rec(Right, Depth + 1, {TE, 1});
3409 return;
3410 }
3411
3412 TE->setOperandsInOrder();
3413 for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) {
3414 ValueList Operands;
3415 // Prepare the operand vector.
3416 for (Value *V : VL)
3417 Operands.push_back(cast<Instruction>(V)->getOperand(i));
3418
3419 buildTree_rec(Operands, Depth + 1, {TE, i});
3420 }
3421 return;
3422 }
3423 default:
3424 BS.cancelScheduling(VL, VL0);
3425 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
3426 ReuseShuffleIndicies);
3427 LLVM_DEBUG(dbgs() << "SLP: Gathering unknown instruction.\n");
3428 return;
3429 }
3430 }
3431
canMapToVector(Type * T,const DataLayout & DL) const3432 unsigned BoUpSLP::canMapToVector(Type *T, const DataLayout &DL) const {
3433 unsigned N = 1;
3434 Type *EltTy = T;
3435
3436 while (isa<StructType>(EltTy) || isa<ArrayType>(EltTy) ||
3437 isa<VectorType>(EltTy)) {
3438 if (auto *ST = dyn_cast<StructType>(EltTy)) {
3439 // Check that struct is homogeneous.
3440 for (const auto *Ty : ST->elements())
3441 if (Ty != *ST->element_begin())
3442 return 0;
3443 N *= ST->getNumElements();
3444 EltTy = *ST->element_begin();
3445 } else if (auto *AT = dyn_cast<ArrayType>(EltTy)) {
3446 N *= AT->getNumElements();
3447 EltTy = AT->getElementType();
3448 } else {
3449 auto *VT = cast<FixedVectorType>(EltTy);
3450 N *= VT->getNumElements();
3451 EltTy = VT->getElementType();
3452 }
3453 }
3454
3455 if (!isValidElementType(EltTy))
3456 return 0;
3457 uint64_t VTSize = DL.getTypeStoreSizeInBits(FixedVectorType::get(EltTy, N));
3458 if (VTSize < MinVecRegSize || VTSize > MaxVecRegSize || VTSize != DL.getTypeStoreSizeInBits(T))
3459 return 0;
3460 return N;
3461 }
3462
canReuseExtract(ArrayRef<Value * > VL,Value * OpValue,SmallVectorImpl<unsigned> & CurrentOrder) const3463 bool BoUpSLP::canReuseExtract(ArrayRef<Value *> VL, Value *OpValue,
3464 SmallVectorImpl<unsigned> &CurrentOrder) const {
3465 Instruction *E0 = cast<Instruction>(OpValue);
3466 assert(E0->getOpcode() == Instruction::ExtractElement ||
3467 E0->getOpcode() == Instruction::ExtractValue);
3468 assert(E0->getOpcode() == getSameOpcode(VL).getOpcode() && "Invalid opcode");
3469 // Check if all of the extracts come from the same vector and from the
3470 // correct offset.
3471 Value *Vec = E0->getOperand(0);
3472
3473 CurrentOrder.clear();
3474
3475 // We have to extract from a vector/aggregate with the same number of elements.
3476 unsigned NElts;
3477 if (E0->getOpcode() == Instruction::ExtractValue) {
3478 const DataLayout &DL = E0->getModule()->getDataLayout();
3479 NElts = canMapToVector(Vec->getType(), DL);
3480 if (!NElts)
3481 return false;
3482 // Check if load can be rewritten as load of vector.
3483 LoadInst *LI = dyn_cast<LoadInst>(Vec);
3484 if (!LI || !LI->isSimple() || !LI->hasNUses(VL.size()))
3485 return false;
3486 } else {
3487 NElts = cast<FixedVectorType>(Vec->getType())->getNumElements();
3488 }
3489
3490 if (NElts != VL.size())
3491 return false;
3492
3493 // Check that all of the indices extract from the correct offset.
3494 bool ShouldKeepOrder = true;
3495 unsigned E = VL.size();
3496 // Assign to all items the initial value E + 1 so we can check if the extract
3497 // instruction index was used already.
3498 // Also, later we can check that all the indices are used and we have a
3499 // consecutive access in the extract instructions, by checking that no
3500 // element of CurrentOrder still has value E + 1.
3501 CurrentOrder.assign(E, E + 1);
3502 unsigned I = 0;
3503 for (; I < E; ++I) {
3504 auto *Inst = cast<Instruction>(VL[I]);
3505 if (Inst->getOperand(0) != Vec)
3506 break;
3507 Optional<unsigned> Idx = getExtractIndex(Inst);
3508 if (!Idx)
3509 break;
3510 const unsigned ExtIdx = *Idx;
3511 if (ExtIdx != I) {
3512 if (ExtIdx >= E || CurrentOrder[ExtIdx] != E + 1)
3513 break;
3514 ShouldKeepOrder = false;
3515 CurrentOrder[ExtIdx] = I;
3516 } else {
3517 if (CurrentOrder[I] != E + 1)
3518 break;
3519 CurrentOrder[I] = I;
3520 }
3521 }
3522 if (I < E) {
3523 CurrentOrder.clear();
3524 return false;
3525 }
3526
3527 return ShouldKeepOrder;
3528 }
3529
areAllUsersVectorized(Instruction * I,ArrayRef<Value * > VectorizedVals) const3530 bool BoUpSLP::areAllUsersVectorized(Instruction *I,
3531 ArrayRef<Value *> VectorizedVals) const {
3532 return (I->hasOneUse() && is_contained(VectorizedVals, I)) ||
3533 llvm::all_of(I->users(), [this](User *U) {
3534 return ScalarToTreeEntry.count(U) > 0;
3535 });
3536 }
3537
3538 static std::pair<InstructionCost, InstructionCost>
getVectorCallCosts(CallInst * CI,FixedVectorType * VecTy,TargetTransformInfo * TTI,TargetLibraryInfo * TLI)3539 getVectorCallCosts(CallInst *CI, FixedVectorType *VecTy,
3540 TargetTransformInfo *TTI, TargetLibraryInfo *TLI) {
3541 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
3542
3543 // Calculate the cost of the scalar and vector calls.
3544 SmallVector<Type *, 4> VecTys;
3545 for (Use &Arg : CI->args())
3546 VecTys.push_back(
3547 FixedVectorType::get(Arg->getType(), VecTy->getNumElements()));
3548 FastMathFlags FMF;
3549 if (auto *FPCI = dyn_cast<FPMathOperator>(CI))
3550 FMF = FPCI->getFastMathFlags();
3551 SmallVector<const Value *> Arguments(CI->arg_begin(), CI->arg_end());
3552 IntrinsicCostAttributes CostAttrs(ID, VecTy, Arguments, VecTys, FMF,
3553 dyn_cast<IntrinsicInst>(CI));
3554 auto IntrinsicCost =
3555 TTI->getIntrinsicInstrCost(CostAttrs, TTI::TCK_RecipThroughput);
3556
3557 auto Shape = VFShape::get(*CI, ElementCount::getFixed(static_cast<unsigned>(
3558 VecTy->getNumElements())),
3559 false /*HasGlobalPred*/);
3560 Function *VecFunc = VFDatabase(*CI).getVectorizedFunction(Shape);
3561 auto LibCost = IntrinsicCost;
3562 if (!CI->isNoBuiltin() && VecFunc) {
3563 // Calculate the cost of the vector library call.
3564 // If the corresponding vector call is cheaper, return its cost.
3565 LibCost = TTI->getCallInstrCost(nullptr, VecTy, VecTys,
3566 TTI::TCK_RecipThroughput);
3567 }
3568 return {IntrinsicCost, LibCost};
3569 }
3570
3571 /// Compute the cost of creating a vector of type \p VecTy containing the
3572 /// extracted values from \p VL.
3573 static InstructionCost
computeExtractCost(ArrayRef<Value * > VL,FixedVectorType * VecTy,TargetTransformInfo::ShuffleKind ShuffleKind,ArrayRef<int> Mask,TargetTransformInfo & TTI)3574 computeExtractCost(ArrayRef<Value *> VL, FixedVectorType *VecTy,
3575 TargetTransformInfo::ShuffleKind ShuffleKind,
3576 ArrayRef<int> Mask, TargetTransformInfo &TTI) {
3577 unsigned NumOfParts = TTI.getNumberOfParts(VecTy);
3578
3579 if (ShuffleKind != TargetTransformInfo::SK_PermuteSingleSrc || !NumOfParts ||
3580 VecTy->getNumElements() < NumOfParts)
3581 return TTI.getShuffleCost(ShuffleKind, VecTy, Mask);
3582
3583 bool AllConsecutive = true;
3584 unsigned EltsPerVector = VecTy->getNumElements() / NumOfParts;
3585 unsigned Idx = -1;
3586 InstructionCost Cost = 0;
3587
3588 // Process extracts in blocks of EltsPerVector to check if the source vector
3589 // operand can be re-used directly. If not, add the cost of creating a shuffle
3590 // to extract the values into a vector register.
3591 for (auto *V : VL) {
3592 ++Idx;
3593
3594 // Reached the start of a new vector registers.
3595 if (Idx % EltsPerVector == 0) {
3596 AllConsecutive = true;
3597 continue;
3598 }
3599
3600 // Check all extracts for a vector register on the target directly
3601 // extract values in order.
3602 unsigned CurrentIdx = *getExtractIndex(cast<Instruction>(V));
3603 unsigned PrevIdx = *getExtractIndex(cast<Instruction>(VL[Idx - 1]));
3604 AllConsecutive &= PrevIdx + 1 == CurrentIdx &&
3605 CurrentIdx % EltsPerVector == Idx % EltsPerVector;
3606
3607 if (AllConsecutive)
3608 continue;
3609
3610 // Skip all indices, except for the last index per vector block.
3611 if ((Idx + 1) % EltsPerVector != 0 && Idx + 1 != VL.size())
3612 continue;
3613
3614 // If we have a series of extracts which are not consecutive and hence
3615 // cannot re-use the source vector register directly, compute the shuffle
3616 // cost to extract the a vector with EltsPerVector elements.
3617 Cost += TTI.getShuffleCost(
3618 TargetTransformInfo::SK_PermuteSingleSrc,
3619 FixedVectorType::get(VecTy->getElementType(), EltsPerVector));
3620 }
3621 return Cost;
3622 }
3623
3624 /// Shuffles \p Mask in accordance with the given \p SubMask.
addMask(SmallVectorImpl<int> & Mask,ArrayRef<int> SubMask)3625 static void addMask(SmallVectorImpl<int> &Mask, ArrayRef<int> SubMask) {
3626 if (SubMask.empty())
3627 return;
3628 if (Mask.empty()) {
3629 Mask.append(SubMask.begin(), SubMask.end());
3630 return;
3631 }
3632 SmallVector<int, 4> NewMask(SubMask.size(), SubMask.size());
3633 int TermValue = std::min(Mask.size(), SubMask.size());
3634 for (int I = 0, E = SubMask.size(); I < E; ++I) {
3635 if (SubMask[I] >= TermValue || SubMask[I] == UndefMaskElem ||
3636 Mask[SubMask[I]] >= TermValue) {
3637 NewMask[I] = UndefMaskElem;
3638 continue;
3639 }
3640 NewMask[I] = Mask[SubMask[I]];
3641 }
3642 Mask.swap(NewMask);
3643 }
3644
getEntryCost(const TreeEntry * E,ArrayRef<Value * > VectorizedVals)3645 InstructionCost BoUpSLP::getEntryCost(const TreeEntry *E,
3646 ArrayRef<Value *> VectorizedVals) {
3647 ArrayRef<Value*> VL = E->Scalars;
3648
3649 Type *ScalarTy = VL[0]->getType();
3650 if (StoreInst *SI = dyn_cast<StoreInst>(VL[0]))
3651 ScalarTy = SI->getValueOperand()->getType();
3652 else if (CmpInst *CI = dyn_cast<CmpInst>(VL[0]))
3653 ScalarTy = CI->getOperand(0)->getType();
3654 else if (auto *IE = dyn_cast<InsertElementInst>(VL[0]))
3655 ScalarTy = IE->getOperand(1)->getType();
3656 auto *VecTy = FixedVectorType::get(ScalarTy, VL.size());
3657 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
3658
3659 // If we have computed a smaller type for the expression, update VecTy so
3660 // that the costs will be accurate.
3661 if (MinBWs.count(VL[0]))
3662 VecTy = FixedVectorType::get(
3663 IntegerType::get(F->getContext(), MinBWs[VL[0]].first), VL.size());
3664 auto *FinalVecTy = VecTy;
3665
3666 unsigned ReuseShuffleNumbers = E->ReuseShuffleIndices.size();
3667 bool NeedToShuffleReuses = !E->ReuseShuffleIndices.empty();
3668 if (NeedToShuffleReuses)
3669 FinalVecTy =
3670 FixedVectorType::get(VecTy->getElementType(), ReuseShuffleNumbers);
3671 // FIXME: it tries to fix a problem with MSVC buildbots.
3672 TargetTransformInfo &TTIRef = *TTI;
3673 auto &&AdjustExtractsCost = [this, &TTIRef, CostKind, VL, VecTy,
3674 VectorizedVals](InstructionCost &Cost,
3675 bool IsGather) {
3676 DenseMap<Value *, int> ExtractVectorsTys;
3677 for (auto *V : VL) {
3678 // If all users of instruction are going to be vectorized and this
3679 // instruction itself is not going to be vectorized, consider this
3680 // instruction as dead and remove its cost from the final cost of the
3681 // vectorized tree.
3682 if (!areAllUsersVectorized(cast<Instruction>(V), VectorizedVals) ||
3683 (IsGather && ScalarToTreeEntry.count(V)))
3684 continue;
3685 auto *EE = cast<ExtractElementInst>(V);
3686 unsigned Idx = *getExtractIndex(EE);
3687 if (TTIRef.getNumberOfParts(VecTy) !=
3688 TTIRef.getNumberOfParts(EE->getVectorOperandType())) {
3689 auto It =
3690 ExtractVectorsTys.try_emplace(EE->getVectorOperand(), Idx).first;
3691 It->getSecond() = std::min<int>(It->second, Idx);
3692 }
3693 // Take credit for instruction that will become dead.
3694 if (EE->hasOneUse()) {
3695 Instruction *Ext = EE->user_back();
3696 if ((isa<SExtInst>(Ext) || isa<ZExtInst>(Ext)) &&
3697 all_of(Ext->users(),
3698 [](User *U) { return isa<GetElementPtrInst>(U); })) {
3699 // Use getExtractWithExtendCost() to calculate the cost of
3700 // extractelement/ext pair.
3701 Cost -=
3702 TTIRef.getExtractWithExtendCost(Ext->getOpcode(), Ext->getType(),
3703 EE->getVectorOperandType(), Idx);
3704 // Add back the cost of s|zext which is subtracted separately.
3705 Cost += TTIRef.getCastInstrCost(
3706 Ext->getOpcode(), Ext->getType(), EE->getType(),
3707 TTI::getCastContextHint(Ext), CostKind, Ext);
3708 continue;
3709 }
3710 }
3711 Cost -= TTIRef.getVectorInstrCost(Instruction::ExtractElement,
3712 EE->getVectorOperandType(), Idx);
3713 }
3714 // Add a cost for subvector extracts/inserts if required.
3715 for (const auto &Data : ExtractVectorsTys) {
3716 auto *EEVTy = cast<FixedVectorType>(Data.first->getType());
3717 unsigned NumElts = VecTy->getNumElements();
3718 if (TTIRef.getNumberOfParts(EEVTy) > TTIRef.getNumberOfParts(VecTy)) {
3719 unsigned Idx = (Data.second / NumElts) * NumElts;
3720 unsigned EENumElts = EEVTy->getNumElements();
3721 if (Idx + NumElts <= EENumElts) {
3722 Cost +=
3723 TTIRef.getShuffleCost(TargetTransformInfo::SK_ExtractSubvector,
3724 EEVTy, None, Idx, VecTy);
3725 } else {
3726 // Need to round up the subvector type vectorization factor to avoid a
3727 // crash in cost model functions. Make SubVT so that Idx + VF of SubVT
3728 // <= EENumElts.
3729 auto *SubVT =
3730 FixedVectorType::get(VecTy->getElementType(), EENumElts - Idx);
3731 Cost +=
3732 TTIRef.getShuffleCost(TargetTransformInfo::SK_ExtractSubvector,
3733 EEVTy, None, Idx, SubVT);
3734 }
3735 } else {
3736 Cost += TTIRef.getShuffleCost(TargetTransformInfo::SK_InsertSubvector,
3737 VecTy, None, 0, EEVTy);
3738 }
3739 }
3740 };
3741 if (E->State == TreeEntry::NeedToGather) {
3742 if (allConstant(VL))
3743 return 0;
3744 if (isa<InsertElementInst>(VL[0]))
3745 return InstructionCost::getInvalid();
3746 SmallVector<int> Mask;
3747 SmallVector<const TreeEntry *> Entries;
3748 Optional<TargetTransformInfo::ShuffleKind> Shuffle =
3749 isGatherShuffledEntry(E, Mask, Entries);
3750 if (Shuffle.hasValue()) {
3751 InstructionCost GatherCost = 0;
3752 if (ShuffleVectorInst::isIdentityMask(Mask)) {
3753 // Perfect match in the graph, will reuse the previously vectorized
3754 // node. Cost is 0.
3755 LLVM_DEBUG(
3756 dbgs()
3757 << "SLP: perfect diamond match for gather bundle that starts with "
3758 << *VL.front() << ".\n");
3759 if (NeedToShuffleReuses)
3760 GatherCost =
3761 TTI->getShuffleCost(TargetTransformInfo::SK_PermuteSingleSrc,
3762 FinalVecTy, E->ReuseShuffleIndices);
3763 } else {
3764 LLVM_DEBUG(dbgs() << "SLP: shuffled " << Entries.size()
3765 << " entries for bundle that starts with "
3766 << *VL.front() << ".\n");
3767 // Detected that instead of gather we can emit a shuffle of single/two
3768 // previously vectorized nodes. Add the cost of the permutation rather
3769 // than gather.
3770 ::addMask(Mask, E->ReuseShuffleIndices);
3771 GatherCost = TTI->getShuffleCost(*Shuffle, FinalVecTy, Mask);
3772 }
3773 return GatherCost;
3774 }
3775 if (isSplat(VL)) {
3776 // Found the broadcasting of the single scalar, calculate the cost as the
3777 // broadcast.
3778 return TTI->getShuffleCost(TargetTransformInfo::SK_Broadcast, VecTy);
3779 }
3780 if (E->getOpcode() == Instruction::ExtractElement && allSameType(VL) &&
3781 allSameBlock(VL) &&
3782 !isa<ScalableVectorType>(
3783 cast<ExtractElementInst>(E->getMainOp())->getVectorOperandType())) {
3784 // Check that gather of extractelements can be represented as just a
3785 // shuffle of a single/two vectors the scalars are extracted from.
3786 SmallVector<int> Mask;
3787 Optional<TargetTransformInfo::ShuffleKind> ShuffleKind =
3788 isShuffle(VL, Mask);
3789 if (ShuffleKind.hasValue()) {
3790 // Found the bunch of extractelement instructions that must be gathered
3791 // into a vector and can be represented as a permutation elements in a
3792 // single input vector or of 2 input vectors.
3793 InstructionCost Cost =
3794 computeExtractCost(VL, VecTy, *ShuffleKind, Mask, *TTI);
3795 AdjustExtractsCost(Cost, /*IsGather=*/true);
3796 if (NeedToShuffleReuses)
3797 Cost += TTI->getShuffleCost(TargetTransformInfo::SK_PermuteSingleSrc,
3798 FinalVecTy, E->ReuseShuffleIndices);
3799 return Cost;
3800 }
3801 }
3802 InstructionCost ReuseShuffleCost = 0;
3803 if (NeedToShuffleReuses)
3804 ReuseShuffleCost = TTI->getShuffleCost(
3805 TTI::SK_PermuteSingleSrc, FinalVecTy, E->ReuseShuffleIndices);
3806 return ReuseShuffleCost + getGatherCost(VL);
3807 }
3808 InstructionCost CommonCost = 0;
3809 SmallVector<int> Mask;
3810 if (!E->ReorderIndices.empty()) {
3811 SmallVector<int> NewMask;
3812 if (E->getOpcode() == Instruction::Store) {
3813 // For stores the order is actually a mask.
3814 NewMask.resize(E->ReorderIndices.size());
3815 copy(E->ReorderIndices, NewMask.begin());
3816 } else {
3817 inversePermutation(E->ReorderIndices, NewMask);
3818 }
3819 ::addMask(Mask, NewMask);
3820 }
3821 if (NeedToShuffleReuses)
3822 ::addMask(Mask, E->ReuseShuffleIndices);
3823 if (!Mask.empty() && !ShuffleVectorInst::isIdentityMask(Mask))
3824 CommonCost =
3825 TTI->getShuffleCost(TTI::SK_PermuteSingleSrc, FinalVecTy, Mask);
3826 assert((E->State == TreeEntry::Vectorize ||
3827 E->State == TreeEntry::ScatterVectorize) &&
3828 "Unhandled state");
3829 assert(E->getOpcode() && allSameType(VL) && allSameBlock(VL) && "Invalid VL");
3830 Instruction *VL0 = E->getMainOp();
3831 unsigned ShuffleOrOp =
3832 E->isAltShuffle() ? (unsigned)Instruction::ShuffleVector : E->getOpcode();
3833 switch (ShuffleOrOp) {
3834 case Instruction::PHI:
3835 return 0;
3836
3837 case Instruction::ExtractValue:
3838 case Instruction::ExtractElement: {
3839 // The common cost of removal ExtractElement/ExtractValue instructions +
3840 // the cost of shuffles, if required to resuffle the original vector.
3841 if (NeedToShuffleReuses) {
3842 unsigned Idx = 0;
3843 for (unsigned I : E->ReuseShuffleIndices) {
3844 if (ShuffleOrOp == Instruction::ExtractElement) {
3845 auto *EE = cast<ExtractElementInst>(VL[I]);
3846 CommonCost -= TTI->getVectorInstrCost(Instruction::ExtractElement,
3847 EE->getVectorOperandType(),
3848 *getExtractIndex(EE));
3849 } else {
3850 CommonCost -= TTI->getVectorInstrCost(Instruction::ExtractElement,
3851 VecTy, Idx);
3852 ++Idx;
3853 }
3854 }
3855 Idx = ReuseShuffleNumbers;
3856 for (Value *V : VL) {
3857 if (ShuffleOrOp == Instruction::ExtractElement) {
3858 auto *EE = cast<ExtractElementInst>(V);
3859 CommonCost += TTI->getVectorInstrCost(Instruction::ExtractElement,
3860 EE->getVectorOperandType(),
3861 *getExtractIndex(EE));
3862 } else {
3863 --Idx;
3864 CommonCost += TTI->getVectorInstrCost(Instruction::ExtractElement,
3865 VecTy, Idx);
3866 }
3867 }
3868 }
3869 if (ShuffleOrOp == Instruction::ExtractValue) {
3870 for (unsigned I = 0, E = VL.size(); I < E; ++I) {
3871 auto *EI = cast<Instruction>(VL[I]);
3872 // Take credit for instruction that will become dead.
3873 if (EI->hasOneUse()) {
3874 Instruction *Ext = EI->user_back();
3875 if ((isa<SExtInst>(Ext) || isa<ZExtInst>(Ext)) &&
3876 all_of(Ext->users(),
3877 [](User *U) { return isa<GetElementPtrInst>(U); })) {
3878 // Use getExtractWithExtendCost() to calculate the cost of
3879 // extractelement/ext pair.
3880 CommonCost -= TTI->getExtractWithExtendCost(
3881 Ext->getOpcode(), Ext->getType(), VecTy, I);
3882 // Add back the cost of s|zext which is subtracted separately.
3883 CommonCost += TTI->getCastInstrCost(
3884 Ext->getOpcode(), Ext->getType(), EI->getType(),
3885 TTI::getCastContextHint(Ext), CostKind, Ext);
3886 continue;
3887 }
3888 }
3889 CommonCost -=
3890 TTI->getVectorInstrCost(Instruction::ExtractElement, VecTy, I);
3891 }
3892 } else {
3893 AdjustExtractsCost(CommonCost, /*IsGather=*/false);
3894 }
3895 return CommonCost;
3896 }
3897 case Instruction::InsertElement: {
3898 auto *SrcVecTy = cast<FixedVectorType>(VL0->getType());
3899
3900 unsigned const NumElts = SrcVecTy->getNumElements();
3901 unsigned const NumScalars = VL.size();
3902 APInt DemandedElts = APInt::getNullValue(NumElts);
3903 // TODO: Add support for Instruction::InsertValue.
3904 unsigned Offset = UINT_MAX;
3905 bool IsIdentity = true;
3906 SmallVector<int> ShuffleMask(NumElts, UndefMaskElem);
3907 for (unsigned I = 0; I < NumScalars; ++I) {
3908 Optional<int> InsertIdx = getInsertIndex(VL[I], 0);
3909 if (!InsertIdx || *InsertIdx == UndefMaskElem)
3910 continue;
3911 unsigned Idx = *InsertIdx;
3912 DemandedElts.setBit(Idx);
3913 if (Idx < Offset) {
3914 Offset = Idx;
3915 IsIdentity &= I == 0;
3916 } else {
3917 assert(Idx >= Offset && "Failed to find vector index offset");
3918 IsIdentity &= Idx - Offset == I;
3919 }
3920 ShuffleMask[Idx] = I;
3921 }
3922 assert(Offset < NumElts && "Failed to find vector index offset");
3923
3924 InstructionCost Cost = 0;
3925 Cost -= TTI->getScalarizationOverhead(SrcVecTy, DemandedElts,
3926 /*Insert*/ true, /*Extract*/ false);
3927
3928 if (IsIdentity && NumElts != NumScalars && Offset % NumScalars != 0) {
3929 // FIXME: Replace with SK_InsertSubvector once it is properly supported.
3930 unsigned Sz = PowerOf2Ceil(Offset + NumScalars);
3931 Cost += TTI->getShuffleCost(
3932 TargetTransformInfo::SK_PermuteSingleSrc,
3933 FixedVectorType::get(SrcVecTy->getElementType(), Sz));
3934 } else if (!IsIdentity) {
3935 Cost += TTI->getShuffleCost(TTI::SK_PermuteSingleSrc, SrcVecTy,
3936 ShuffleMask);
3937 }
3938
3939 return Cost;
3940 }
3941 case Instruction::ZExt:
3942 case Instruction::SExt:
3943 case Instruction::FPToUI:
3944 case Instruction::FPToSI:
3945 case Instruction::FPExt:
3946 case Instruction::PtrToInt:
3947 case Instruction::IntToPtr:
3948 case Instruction::SIToFP:
3949 case Instruction::UIToFP:
3950 case Instruction::Trunc:
3951 case Instruction::FPTrunc:
3952 case Instruction::BitCast: {
3953 Type *SrcTy = VL0->getOperand(0)->getType();
3954 InstructionCost ScalarEltCost =
3955 TTI->getCastInstrCost(E->getOpcode(), ScalarTy, SrcTy,
3956 TTI::getCastContextHint(VL0), CostKind, VL0);
3957 if (NeedToShuffleReuses) {
3958 CommonCost -= (ReuseShuffleNumbers - VL.size()) * ScalarEltCost;
3959 }
3960
3961 // Calculate the cost of this instruction.
3962 InstructionCost ScalarCost = VL.size() * ScalarEltCost;
3963
3964 auto *SrcVecTy = FixedVectorType::get(SrcTy, VL.size());
3965 InstructionCost VecCost = 0;
3966 // Check if the values are candidates to demote.
3967 if (!MinBWs.count(VL0) || VecTy != SrcVecTy) {
3968 VecCost = CommonCost + TTI->getCastInstrCost(
3969 E->getOpcode(), VecTy, SrcVecTy,
3970 TTI::getCastContextHint(VL0), CostKind, VL0);
3971 }
3972 LLVM_DEBUG(dumpTreeCosts(E, CommonCost, VecCost, ScalarCost));
3973 return VecCost - ScalarCost;
3974 }
3975 case Instruction::FCmp:
3976 case Instruction::ICmp:
3977 case Instruction::Select: {
3978 // Calculate the cost of this instruction.
3979 InstructionCost ScalarEltCost =
3980 TTI->getCmpSelInstrCost(E->getOpcode(), ScalarTy, Builder.getInt1Ty(),
3981 CmpInst::BAD_ICMP_PREDICATE, CostKind, VL0);
3982 if (NeedToShuffleReuses) {
3983 CommonCost -= (ReuseShuffleNumbers - VL.size()) * ScalarEltCost;
3984 }
3985 auto *MaskTy = FixedVectorType::get(Builder.getInt1Ty(), VL.size());
3986 InstructionCost ScalarCost = VecTy->getNumElements() * ScalarEltCost;
3987
3988 // Check if all entries in VL are either compares or selects with compares
3989 // as condition that have the same predicates.
3990 CmpInst::Predicate VecPred = CmpInst::BAD_ICMP_PREDICATE;
3991 bool First = true;
3992 for (auto *V : VL) {
3993 CmpInst::Predicate CurrentPred;
3994 auto MatchCmp = m_Cmp(CurrentPred, m_Value(), m_Value());
3995 if ((!match(V, m_Select(MatchCmp, m_Value(), m_Value())) &&
3996 !match(V, MatchCmp)) ||
3997 (!First && VecPred != CurrentPred)) {
3998 VecPred = CmpInst::BAD_ICMP_PREDICATE;
3999 break;
4000 }
4001 First = false;
4002 VecPred = CurrentPred;
4003 }
4004
4005 InstructionCost VecCost = TTI->getCmpSelInstrCost(
4006 E->getOpcode(), VecTy, MaskTy, VecPred, CostKind, VL0);
4007 // Check if it is possible and profitable to use min/max for selects in
4008 // VL.
4009 //
4010 auto IntrinsicAndUse = canConvertToMinOrMaxIntrinsic(VL);
4011 if (IntrinsicAndUse.first != Intrinsic::not_intrinsic) {
4012 IntrinsicCostAttributes CostAttrs(IntrinsicAndUse.first, VecTy,
4013 {VecTy, VecTy});
4014 InstructionCost IntrinsicCost =
4015 TTI->getIntrinsicInstrCost(CostAttrs, CostKind);
4016 // If the selects are the only uses of the compares, they will be dead
4017 // and we can adjust the cost by removing their cost.
4018 if (IntrinsicAndUse.second)
4019 IntrinsicCost -=
4020 TTI->getCmpSelInstrCost(Instruction::ICmp, VecTy, MaskTy,
4021 CmpInst::BAD_ICMP_PREDICATE, CostKind);
4022 VecCost = std::min(VecCost, IntrinsicCost);
4023 }
4024 LLVM_DEBUG(dumpTreeCosts(E, CommonCost, VecCost, ScalarCost));
4025 return CommonCost + VecCost - ScalarCost;
4026 }
4027 case Instruction::FNeg:
4028 case Instruction::Add:
4029 case Instruction::FAdd:
4030 case Instruction::Sub:
4031 case Instruction::FSub:
4032 case Instruction::Mul:
4033 case Instruction::FMul:
4034 case Instruction::UDiv:
4035 case Instruction::SDiv:
4036 case Instruction::FDiv:
4037 case Instruction::URem:
4038 case Instruction::SRem:
4039 case Instruction::FRem:
4040 case Instruction::Shl:
4041 case Instruction::LShr:
4042 case Instruction::AShr:
4043 case Instruction::And:
4044 case Instruction::Or:
4045 case Instruction::Xor: {
4046 // Certain instructions can be cheaper to vectorize if they have a
4047 // constant second vector operand.
4048 TargetTransformInfo::OperandValueKind Op1VK =
4049 TargetTransformInfo::OK_AnyValue;
4050 TargetTransformInfo::OperandValueKind Op2VK =
4051 TargetTransformInfo::OK_UniformConstantValue;
4052 TargetTransformInfo::OperandValueProperties Op1VP =
4053 TargetTransformInfo::OP_None;
4054 TargetTransformInfo::OperandValueProperties Op2VP =
4055 TargetTransformInfo::OP_PowerOf2;
4056
4057 // If all operands are exactly the same ConstantInt then set the
4058 // operand kind to OK_UniformConstantValue.
4059 // If instead not all operands are constants, then set the operand kind
4060 // to OK_AnyValue. If all operands are constants but not the same,
4061 // then set the operand kind to OK_NonUniformConstantValue.
4062 ConstantInt *CInt0 = nullptr;
4063 for (unsigned i = 0, e = VL.size(); i < e; ++i) {
4064 const Instruction *I = cast<Instruction>(VL[i]);
4065 unsigned OpIdx = isa<BinaryOperator>(I) ? 1 : 0;
4066 ConstantInt *CInt = dyn_cast<ConstantInt>(I->getOperand(OpIdx));
4067 if (!CInt) {
4068 Op2VK = TargetTransformInfo::OK_AnyValue;
4069 Op2VP = TargetTransformInfo::OP_None;
4070 break;
4071 }
4072 if (Op2VP == TargetTransformInfo::OP_PowerOf2 &&
4073 !CInt->getValue().isPowerOf2())
4074 Op2VP = TargetTransformInfo::OP_None;
4075 if (i == 0) {
4076 CInt0 = CInt;
4077 continue;
4078 }
4079 if (CInt0 != CInt)
4080 Op2VK = TargetTransformInfo::OK_NonUniformConstantValue;
4081 }
4082
4083 SmallVector<const Value *, 4> Operands(VL0->operand_values());
4084 InstructionCost ScalarEltCost =
4085 TTI->getArithmeticInstrCost(E->getOpcode(), ScalarTy, CostKind, Op1VK,
4086 Op2VK, Op1VP, Op2VP, Operands, VL0);
4087 if (NeedToShuffleReuses) {
4088 CommonCost -= (ReuseShuffleNumbers - VL.size()) * ScalarEltCost;
4089 }
4090 InstructionCost ScalarCost = VecTy->getNumElements() * ScalarEltCost;
4091 InstructionCost VecCost =
4092 TTI->getArithmeticInstrCost(E->getOpcode(), VecTy, CostKind, Op1VK,
4093 Op2VK, Op1VP, Op2VP, Operands, VL0);
4094 LLVM_DEBUG(dumpTreeCosts(E, CommonCost, VecCost, ScalarCost));
4095 return CommonCost + VecCost - ScalarCost;
4096 }
4097 case Instruction::GetElementPtr: {
4098 TargetTransformInfo::OperandValueKind Op1VK =
4099 TargetTransformInfo::OK_AnyValue;
4100 TargetTransformInfo::OperandValueKind Op2VK =
4101 TargetTransformInfo::OK_UniformConstantValue;
4102
4103 InstructionCost ScalarEltCost = TTI->getArithmeticInstrCost(
4104 Instruction::Add, ScalarTy, CostKind, Op1VK, Op2VK);
4105 if (NeedToShuffleReuses) {
4106 CommonCost -= (ReuseShuffleNumbers - VL.size()) * ScalarEltCost;
4107 }
4108 InstructionCost ScalarCost = VecTy->getNumElements() * ScalarEltCost;
4109 InstructionCost VecCost = TTI->getArithmeticInstrCost(
4110 Instruction::Add, VecTy, CostKind, Op1VK, Op2VK);
4111 LLVM_DEBUG(dumpTreeCosts(E, CommonCost, VecCost, ScalarCost));
4112 return CommonCost + VecCost - ScalarCost;
4113 }
4114 case Instruction::Load: {
4115 // Cost of wide load - cost of scalar loads.
4116 Align Alignment = cast<LoadInst>(VL0)->getAlign();
4117 InstructionCost ScalarEltCost = TTI->getMemoryOpCost(
4118 Instruction::Load, ScalarTy, Alignment, 0, CostKind, VL0);
4119 if (NeedToShuffleReuses) {
4120 CommonCost -= (ReuseShuffleNumbers - VL.size()) * ScalarEltCost;
4121 }
4122 InstructionCost ScalarLdCost = VecTy->getNumElements() * ScalarEltCost;
4123 InstructionCost VecLdCost;
4124 if (E->State == TreeEntry::Vectorize) {
4125 VecLdCost = TTI->getMemoryOpCost(Instruction::Load, VecTy, Alignment, 0,
4126 CostKind, VL0);
4127 } else {
4128 assert(E->State == TreeEntry::ScatterVectorize && "Unknown EntryState");
4129 Align CommonAlignment = Alignment;
4130 for (Value *V : VL)
4131 CommonAlignment =
4132 commonAlignment(CommonAlignment, cast<LoadInst>(V)->getAlign());
4133 VecLdCost = TTI->getGatherScatterOpCost(
4134 Instruction::Load, VecTy, cast<LoadInst>(VL0)->getPointerOperand(),
4135 /*VariableMask=*/false, CommonAlignment, CostKind, VL0);
4136 }
4137 LLVM_DEBUG(dumpTreeCosts(E, CommonCost, VecLdCost, ScalarLdCost));
4138 return CommonCost + VecLdCost - ScalarLdCost;
4139 }
4140 case Instruction::Store: {
4141 // We know that we can merge the stores. Calculate the cost.
4142 bool IsReorder = !E->ReorderIndices.empty();
4143 auto *SI =
4144 cast<StoreInst>(IsReorder ? VL[E->ReorderIndices.front()] : VL0);
4145 Align Alignment = SI->getAlign();
4146 InstructionCost ScalarEltCost = TTI->getMemoryOpCost(
4147 Instruction::Store, ScalarTy, Alignment, 0, CostKind, VL0);
4148 InstructionCost ScalarStCost = VecTy->getNumElements() * ScalarEltCost;
4149 InstructionCost VecStCost = TTI->getMemoryOpCost(
4150 Instruction::Store, VecTy, Alignment, 0, CostKind, VL0);
4151 LLVM_DEBUG(dumpTreeCosts(E, CommonCost, VecStCost, ScalarStCost));
4152 return CommonCost + VecStCost - ScalarStCost;
4153 }
4154 case Instruction::Call: {
4155 CallInst *CI = cast<CallInst>(VL0);
4156 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
4157
4158 // Calculate the cost of the scalar and vector calls.
4159 IntrinsicCostAttributes CostAttrs(ID, *CI, 1);
4160 InstructionCost ScalarEltCost =
4161 TTI->getIntrinsicInstrCost(CostAttrs, CostKind);
4162 if (NeedToShuffleReuses) {
4163 CommonCost -= (ReuseShuffleNumbers - VL.size()) * ScalarEltCost;
4164 }
4165 InstructionCost ScalarCallCost = VecTy->getNumElements() * ScalarEltCost;
4166
4167 auto VecCallCosts = getVectorCallCosts(CI, VecTy, TTI, TLI);
4168 InstructionCost VecCallCost =
4169 std::min(VecCallCosts.first, VecCallCosts.second);
4170
4171 LLVM_DEBUG(dbgs() << "SLP: Call cost " << VecCallCost - ScalarCallCost
4172 << " (" << VecCallCost << "-" << ScalarCallCost << ")"
4173 << " for " << *CI << "\n");
4174
4175 return CommonCost + VecCallCost - ScalarCallCost;
4176 }
4177 case Instruction::ShuffleVector: {
4178 assert(E->isAltShuffle() &&
4179 ((Instruction::isBinaryOp(E->getOpcode()) &&
4180 Instruction::isBinaryOp(E->getAltOpcode())) ||
4181 (Instruction::isCast(E->getOpcode()) &&
4182 Instruction::isCast(E->getAltOpcode()))) &&
4183 "Invalid Shuffle Vector Operand");
4184 InstructionCost ScalarCost = 0;
4185 if (NeedToShuffleReuses) {
4186 for (unsigned Idx : E->ReuseShuffleIndices) {
4187 Instruction *I = cast<Instruction>(VL[Idx]);
4188 CommonCost -= TTI->getInstructionCost(I, CostKind);
4189 }
4190 for (Value *V : VL) {
4191 Instruction *I = cast<Instruction>(V);
4192 CommonCost += TTI->getInstructionCost(I, CostKind);
4193 }
4194 }
4195 for (Value *V : VL) {
4196 Instruction *I = cast<Instruction>(V);
4197 assert(E->isOpcodeOrAlt(I) && "Unexpected main/alternate opcode");
4198 ScalarCost += TTI->getInstructionCost(I, CostKind);
4199 }
4200 // VecCost is equal to sum of the cost of creating 2 vectors
4201 // and the cost of creating shuffle.
4202 InstructionCost VecCost = 0;
4203 if (Instruction::isBinaryOp(E->getOpcode())) {
4204 VecCost = TTI->getArithmeticInstrCost(E->getOpcode(), VecTy, CostKind);
4205 VecCost += TTI->getArithmeticInstrCost(E->getAltOpcode(), VecTy,
4206 CostKind);
4207 } else {
4208 Type *Src0SclTy = E->getMainOp()->getOperand(0)->getType();
4209 Type *Src1SclTy = E->getAltOp()->getOperand(0)->getType();
4210 auto *Src0Ty = FixedVectorType::get(Src0SclTy, VL.size());
4211 auto *Src1Ty = FixedVectorType::get(Src1SclTy, VL.size());
4212 VecCost = TTI->getCastInstrCost(E->getOpcode(), VecTy, Src0Ty,
4213 TTI::CastContextHint::None, CostKind);
4214 VecCost += TTI->getCastInstrCost(E->getAltOpcode(), VecTy, Src1Ty,
4215 TTI::CastContextHint::None, CostKind);
4216 }
4217
4218 SmallVector<int> Mask(E->Scalars.size());
4219 for (unsigned I = 0, End = E->Scalars.size(); I < End; ++I) {
4220 auto *OpInst = cast<Instruction>(E->Scalars[I]);
4221 assert(E->isOpcodeOrAlt(OpInst) && "Unexpected main/alternate opcode");
4222 Mask[I] = I + (OpInst->getOpcode() == E->getAltOpcode() ? End : 0);
4223 }
4224 VecCost +=
4225 TTI->getShuffleCost(TargetTransformInfo::SK_Select, VecTy, Mask, 0);
4226 LLVM_DEBUG(dumpTreeCosts(E, CommonCost, VecCost, ScalarCost));
4227 return CommonCost + VecCost - ScalarCost;
4228 }
4229 default:
4230 llvm_unreachable("Unknown instruction");
4231 }
4232 }
4233
isFullyVectorizableTinyTree() const4234 bool BoUpSLP::isFullyVectorizableTinyTree() const {
4235 LLVM_DEBUG(dbgs() << "SLP: Check whether the tree with height "
4236 << VectorizableTree.size() << " is fully vectorizable .\n");
4237
4238 // We only handle trees of heights 1 and 2.
4239 if (VectorizableTree.size() == 1 &&
4240 VectorizableTree[0]->State == TreeEntry::Vectorize)
4241 return true;
4242
4243 if (VectorizableTree.size() != 2)
4244 return false;
4245
4246 // Handle splat and all-constants stores. Also try to vectorize tiny trees
4247 // with the second gather nodes if they have less scalar operands rather than
4248 // the initial tree element (may be profitable to shuffle the second gather)
4249 // or they are extractelements, which form shuffle.
4250 SmallVector<int> Mask;
4251 if (VectorizableTree[0]->State == TreeEntry::Vectorize &&
4252 (allConstant(VectorizableTree[1]->Scalars) ||
4253 isSplat(VectorizableTree[1]->Scalars) ||
4254 (VectorizableTree[1]->State == TreeEntry::NeedToGather &&
4255 VectorizableTree[1]->Scalars.size() <
4256 VectorizableTree[0]->Scalars.size()) ||
4257 (VectorizableTree[1]->State == TreeEntry::NeedToGather &&
4258 VectorizableTree[1]->getOpcode() == Instruction::ExtractElement &&
4259 isShuffle(VectorizableTree[1]->Scalars, Mask))))
4260 return true;
4261
4262 // Gathering cost would be too much for tiny trees.
4263 if (VectorizableTree[0]->State == TreeEntry::NeedToGather ||
4264 VectorizableTree[1]->State == TreeEntry::NeedToGather)
4265 return false;
4266
4267 return true;
4268 }
4269
isLoadCombineCandidateImpl(Value * Root,unsigned NumElts,TargetTransformInfo * TTI,bool MustMatchOrInst)4270 static bool isLoadCombineCandidateImpl(Value *Root, unsigned NumElts,
4271 TargetTransformInfo *TTI,
4272 bool MustMatchOrInst) {
4273 // Look past the root to find a source value. Arbitrarily follow the
4274 // path through operand 0 of any 'or'. Also, peek through optional
4275 // shift-left-by-multiple-of-8-bits.
4276 Value *ZextLoad = Root;
4277 const APInt *ShAmtC;
4278 bool FoundOr = false;
4279 while (!isa<ConstantExpr>(ZextLoad) &&
4280 (match(ZextLoad, m_Or(m_Value(), m_Value())) ||
4281 (match(ZextLoad, m_Shl(m_Value(), m_APInt(ShAmtC))) &&
4282 ShAmtC->urem(8) == 0))) {
4283 auto *BinOp = cast<BinaryOperator>(ZextLoad);
4284 ZextLoad = BinOp->getOperand(0);
4285 if (BinOp->getOpcode() == Instruction::Or)
4286 FoundOr = true;
4287 }
4288 // Check if the input is an extended load of the required or/shift expression.
4289 Value *LoadPtr;
4290 if ((MustMatchOrInst && !FoundOr) || ZextLoad == Root ||
4291 !match(ZextLoad, m_ZExt(m_Load(m_Value(LoadPtr)))))
4292 return false;
4293
4294 // Require that the total load bit width is a legal integer type.
4295 // For example, <8 x i8> --> i64 is a legal integer on a 64-bit target.
4296 // But <16 x i8> --> i128 is not, so the backend probably can't reduce it.
4297 Type *SrcTy = LoadPtr->getType()->getPointerElementType();
4298 unsigned LoadBitWidth = SrcTy->getIntegerBitWidth() * NumElts;
4299 if (!TTI->isTypeLegal(IntegerType::get(Root->getContext(), LoadBitWidth)))
4300 return false;
4301
4302 // Everything matched - assume that we can fold the whole sequence using
4303 // load combining.
4304 LLVM_DEBUG(dbgs() << "SLP: Assume load combining for tree starting at "
4305 << *(cast<Instruction>(Root)) << "\n");
4306
4307 return true;
4308 }
4309
isLoadCombineReductionCandidate(RecurKind RdxKind) const4310 bool BoUpSLP::isLoadCombineReductionCandidate(RecurKind RdxKind) const {
4311 if (RdxKind != RecurKind::Or)
4312 return false;
4313
4314 unsigned NumElts = VectorizableTree[0]->Scalars.size();
4315 Value *FirstReduced = VectorizableTree[0]->Scalars[0];
4316 return isLoadCombineCandidateImpl(FirstReduced, NumElts, TTI,
4317 /* MatchOr */ false);
4318 }
4319
isLoadCombineCandidate() const4320 bool BoUpSLP::isLoadCombineCandidate() const {
4321 // Peek through a final sequence of stores and check if all operations are
4322 // likely to be load-combined.
4323 unsigned NumElts = VectorizableTree[0]->Scalars.size();
4324 for (Value *Scalar : VectorizableTree[0]->Scalars) {
4325 Value *X;
4326 if (!match(Scalar, m_Store(m_Value(X), m_Value())) ||
4327 !isLoadCombineCandidateImpl(X, NumElts, TTI, /* MatchOr */ true))
4328 return false;
4329 }
4330 return true;
4331 }
4332
isTreeTinyAndNotFullyVectorizable() const4333 bool BoUpSLP::isTreeTinyAndNotFullyVectorizable() const {
4334 // No need to vectorize inserts of gathered values.
4335 if (VectorizableTree.size() == 2 &&
4336 isa<InsertElementInst>(VectorizableTree[0]->Scalars[0]) &&
4337 VectorizableTree[1]->State == TreeEntry::NeedToGather)
4338 return true;
4339
4340 // We can vectorize the tree if its size is greater than or equal to the
4341 // minimum size specified by the MinTreeSize command line option.
4342 if (VectorizableTree.size() >= MinTreeSize)
4343 return false;
4344
4345 // If we have a tiny tree (a tree whose size is less than MinTreeSize), we
4346 // can vectorize it if we can prove it fully vectorizable.
4347 if (isFullyVectorizableTinyTree())
4348 return false;
4349
4350 assert(VectorizableTree.empty()
4351 ? ExternalUses.empty()
4352 : true && "We shouldn't have any external users");
4353
4354 // Otherwise, we can't vectorize the tree. It is both tiny and not fully
4355 // vectorizable.
4356 return true;
4357 }
4358
getSpillCost() const4359 InstructionCost BoUpSLP::getSpillCost() const {
4360 // Walk from the bottom of the tree to the top, tracking which values are
4361 // live. When we see a call instruction that is not part of our tree,
4362 // query TTI to see if there is a cost to keeping values live over it
4363 // (for example, if spills and fills are required).
4364 unsigned BundleWidth = VectorizableTree.front()->Scalars.size();
4365 InstructionCost Cost = 0;
4366
4367 SmallPtrSet<Instruction*, 4> LiveValues;
4368 Instruction *PrevInst = nullptr;
4369
4370 // The entries in VectorizableTree are not necessarily ordered by their
4371 // position in basic blocks. Collect them and order them by dominance so later
4372 // instructions are guaranteed to be visited first. For instructions in
4373 // different basic blocks, we only scan to the beginning of the block, so
4374 // their order does not matter, as long as all instructions in a basic block
4375 // are grouped together. Using dominance ensures a deterministic order.
4376 SmallVector<Instruction *, 16> OrderedScalars;
4377 for (const auto &TEPtr : VectorizableTree) {
4378 Instruction *Inst = dyn_cast<Instruction>(TEPtr->Scalars[0]);
4379 if (!Inst)
4380 continue;
4381 OrderedScalars.push_back(Inst);
4382 }
4383 llvm::sort(OrderedScalars, [&](Instruction *A, Instruction *B) {
4384 auto *NodeA = DT->getNode(A->getParent());
4385 auto *NodeB = DT->getNode(B->getParent());
4386 assert(NodeA && "Should only process reachable instructions");
4387 assert(NodeB && "Should only process reachable instructions");
4388 assert((NodeA == NodeB) == (NodeA->getDFSNumIn() == NodeB->getDFSNumIn()) &&
4389 "Different nodes should have different DFS numbers");
4390 if (NodeA != NodeB)
4391 return NodeA->getDFSNumIn() < NodeB->getDFSNumIn();
4392 return B->comesBefore(A);
4393 });
4394
4395 for (Instruction *Inst : OrderedScalars) {
4396 if (!PrevInst) {
4397 PrevInst = Inst;
4398 continue;
4399 }
4400
4401 // Update LiveValues.
4402 LiveValues.erase(PrevInst);
4403 for (auto &J : PrevInst->operands()) {
4404 if (isa<Instruction>(&*J) && getTreeEntry(&*J))
4405 LiveValues.insert(cast<Instruction>(&*J));
4406 }
4407
4408 LLVM_DEBUG({
4409 dbgs() << "SLP: #LV: " << LiveValues.size();
4410 for (auto *X : LiveValues)
4411 dbgs() << " " << X->getName();
4412 dbgs() << ", Looking at ";
4413 Inst->dump();
4414 });
4415
4416 // Now find the sequence of instructions between PrevInst and Inst.
4417 unsigned NumCalls = 0;
4418 BasicBlock::reverse_iterator InstIt = ++Inst->getIterator().getReverse(),
4419 PrevInstIt =
4420 PrevInst->getIterator().getReverse();
4421 while (InstIt != PrevInstIt) {
4422 if (PrevInstIt == PrevInst->getParent()->rend()) {
4423 PrevInstIt = Inst->getParent()->rbegin();
4424 continue;
4425 }
4426
4427 // Debug information does not impact spill cost.
4428 if ((isa<CallInst>(&*PrevInstIt) &&
4429 !isa<DbgInfoIntrinsic>(&*PrevInstIt)) &&
4430 &*PrevInstIt != PrevInst)
4431 NumCalls++;
4432
4433 ++PrevInstIt;
4434 }
4435
4436 if (NumCalls) {
4437 SmallVector<Type*, 4> V;
4438 for (auto *II : LiveValues) {
4439 auto *ScalarTy = II->getType();
4440 if (auto *VectorTy = dyn_cast<FixedVectorType>(ScalarTy))
4441 ScalarTy = VectorTy->getElementType();
4442 V.push_back(FixedVectorType::get(ScalarTy, BundleWidth));
4443 }
4444 Cost += NumCalls * TTI->getCostOfKeepingLiveOverCall(V);
4445 }
4446
4447 PrevInst = Inst;
4448 }
4449
4450 return Cost;
4451 }
4452
getTreeCost(ArrayRef<Value * > VectorizedVals)4453 InstructionCost BoUpSLP::getTreeCost(ArrayRef<Value *> VectorizedVals) {
4454 InstructionCost Cost = 0;
4455 LLVM_DEBUG(dbgs() << "SLP: Calculating cost for tree of size "
4456 << VectorizableTree.size() << ".\n");
4457
4458 unsigned BundleWidth = VectorizableTree[0]->Scalars.size();
4459
4460 for (unsigned I = 0, E = VectorizableTree.size(); I < E; ++I) {
4461 TreeEntry &TE = *VectorizableTree[I].get();
4462
4463 InstructionCost C = getEntryCost(&TE, VectorizedVals);
4464 Cost += C;
4465 LLVM_DEBUG(dbgs() << "SLP: Adding cost " << C
4466 << " for bundle that starts with " << *TE.Scalars[0]
4467 << ".\n"
4468 << "SLP: Current total cost = " << Cost << "\n");
4469 }
4470
4471 SmallPtrSet<Value *, 16> ExtractCostCalculated;
4472 InstructionCost ExtractCost = 0;
4473 SmallVector<unsigned> VF;
4474 SmallVector<SmallVector<int>> ShuffleMask;
4475 SmallVector<Value *> FirstUsers;
4476 SmallVector<APInt> DemandedElts;
4477 for (ExternalUser &EU : ExternalUses) {
4478 // We only add extract cost once for the same scalar.
4479 if (!ExtractCostCalculated.insert(EU.Scalar).second)
4480 continue;
4481
4482 // Uses by ephemeral values are free (because the ephemeral value will be
4483 // removed prior to code generation, and so the extraction will be
4484 // removed as well).
4485 if (EphValues.count(EU.User))
4486 continue;
4487
4488 // No extract cost for vector "scalar"
4489 if (isa<FixedVectorType>(EU.Scalar->getType()))
4490 continue;
4491
4492 // Already counted the cost for external uses when tried to adjust the cost
4493 // for extractelements, no need to add it again.
4494 if (isa<ExtractElementInst>(EU.Scalar))
4495 continue;
4496
4497 // If found user is an insertelement, do not calculate extract cost but try
4498 // to detect it as a final shuffled/identity match.
4499 if (EU.User && isa<InsertElementInst>(EU.User)) {
4500 if (auto *FTy = dyn_cast<FixedVectorType>(EU.User->getType())) {
4501 Optional<int> InsertIdx = getInsertIndex(EU.User, 0);
4502 if (!InsertIdx || *InsertIdx == UndefMaskElem)
4503 continue;
4504 Value *VU = EU.User;
4505 auto *It = find_if(FirstUsers, [VU](Value *V) {
4506 // Checks if 2 insertelements are from the same buildvector.
4507 if (VU->getType() != V->getType())
4508 return false;
4509 auto *IE1 = cast<InsertElementInst>(VU);
4510 auto *IE2 = cast<InsertElementInst>(V);
4511 // Go though of insertelement instructions trying to find either VU as
4512 // the original vector for IE2 or V as the original vector for IE1.
4513 do {
4514 if (IE1 == VU || IE2 == V)
4515 return true;
4516 if (IE1)
4517 IE1 = dyn_cast<InsertElementInst>(IE1->getOperand(0));
4518 if (IE2)
4519 IE2 = dyn_cast<InsertElementInst>(IE2->getOperand(0));
4520 } while (IE1 || IE2);
4521 return false;
4522 });
4523 int VecId = -1;
4524 if (It == FirstUsers.end()) {
4525 VF.push_back(FTy->getNumElements());
4526 ShuffleMask.emplace_back(VF.back(), UndefMaskElem);
4527 FirstUsers.push_back(EU.User);
4528 DemandedElts.push_back(APInt::getNullValue(VF.back()));
4529 VecId = FirstUsers.size() - 1;
4530 } else {
4531 VecId = std::distance(FirstUsers.begin(), It);
4532 }
4533 int Idx = *InsertIdx;
4534 ShuffleMask[VecId][Idx] = EU.Lane;
4535 DemandedElts[VecId].setBit(Idx);
4536 }
4537 }
4538
4539 // If we plan to rewrite the tree in a smaller type, we will need to sign
4540 // extend the extracted value back to the original type. Here, we account
4541 // for the extract and the added cost of the sign extend if needed.
4542 auto *VecTy = FixedVectorType::get(EU.Scalar->getType(), BundleWidth);
4543 auto *ScalarRoot = VectorizableTree[0]->Scalars[0];
4544 if (MinBWs.count(ScalarRoot)) {
4545 auto *MinTy = IntegerType::get(F->getContext(), MinBWs[ScalarRoot].first);
4546 auto Extend =
4547 MinBWs[ScalarRoot].second ? Instruction::SExt : Instruction::ZExt;
4548 VecTy = FixedVectorType::get(MinTy, BundleWidth);
4549 ExtractCost += TTI->getExtractWithExtendCost(Extend, EU.Scalar->getType(),
4550 VecTy, EU.Lane);
4551 } else {
4552 ExtractCost +=
4553 TTI->getVectorInstrCost(Instruction::ExtractElement, VecTy, EU.Lane);
4554 }
4555 }
4556
4557 InstructionCost SpillCost = getSpillCost();
4558 Cost += SpillCost + ExtractCost;
4559 for (int I = 0, E = FirstUsers.size(); I < E; ++I) {
4560 // For the very first element - simple shuffle of the source vector.
4561 int Limit = ShuffleMask[I].size() * 2;
4562 if (I == 0 &&
4563 all_of(ShuffleMask[I], [Limit](int Idx) { return Idx < Limit; }) &&
4564 !ShuffleVectorInst::isIdentityMask(ShuffleMask[I])) {
4565 InstructionCost C = TTI->getShuffleCost(
4566 TTI::SK_PermuteSingleSrc,
4567 cast<FixedVectorType>(FirstUsers[I]->getType()), ShuffleMask[I]);
4568 LLVM_DEBUG(dbgs() << "SLP: Adding cost " << C
4569 << " for final shuffle of insertelement external users "
4570 << *VectorizableTree.front()->Scalars.front() << ".\n"
4571 << "SLP: Current total cost = " << Cost << "\n");
4572 Cost += C;
4573 continue;
4574 }
4575 // Other elements - permutation of 2 vectors (the initial one and the next
4576 // Ith incoming vector).
4577 unsigned VF = ShuffleMask[I].size();
4578 for (unsigned Idx = 0; Idx < VF; ++Idx) {
4579 int &Mask = ShuffleMask[I][Idx];
4580 Mask = Mask == UndefMaskElem ? Idx : VF + Mask;
4581 }
4582 InstructionCost C = TTI->getShuffleCost(
4583 TTI::SK_PermuteTwoSrc, cast<FixedVectorType>(FirstUsers[I]->getType()),
4584 ShuffleMask[I]);
4585 LLVM_DEBUG(
4586 dbgs()
4587 << "SLP: Adding cost " << C
4588 << " for final shuffle of vector node and external insertelement users "
4589 << *VectorizableTree.front()->Scalars.front() << ".\n"
4590 << "SLP: Current total cost = " << Cost << "\n");
4591 Cost += C;
4592 InstructionCost InsertCost = TTI->getScalarizationOverhead(
4593 cast<FixedVectorType>(FirstUsers[I]->getType()), DemandedElts[I],
4594 /*Insert*/ true,
4595 /*Extract*/ false);
4596 Cost -= InsertCost;
4597 LLVM_DEBUG(dbgs() << "SLP: subtracting the cost " << InsertCost
4598 << " for insertelements gather.\n"
4599 << "SLP: Current total cost = " << Cost << "\n");
4600 }
4601
4602 #ifndef NDEBUG
4603 SmallString<256> Str;
4604 {
4605 raw_svector_ostream OS(Str);
4606 OS << "SLP: Spill Cost = " << SpillCost << ".\n"
4607 << "SLP: Extract Cost = " << ExtractCost << ".\n"
4608 << "SLP: Total Cost = " << Cost << ".\n";
4609 }
4610 LLVM_DEBUG(dbgs() << Str);
4611 if (ViewSLPTree)
4612 ViewGraph(this, "SLP" + F->getName(), false, Str);
4613 #endif
4614
4615 return Cost;
4616 }
4617
4618 Optional<TargetTransformInfo::ShuffleKind>
isGatherShuffledEntry(const TreeEntry * TE,SmallVectorImpl<int> & Mask,SmallVectorImpl<const TreeEntry * > & Entries)4619 BoUpSLP::isGatherShuffledEntry(const TreeEntry *TE, SmallVectorImpl<int> &Mask,
4620 SmallVectorImpl<const TreeEntry *> &Entries) {
4621 // TODO: currently checking only for Scalars in the tree entry, need to count
4622 // reused elements too for better cost estimation.
4623 Mask.assign(TE->Scalars.size(), UndefMaskElem);
4624 Entries.clear();
4625 // Build a lists of values to tree entries.
4626 DenseMap<Value *, SmallPtrSet<const TreeEntry *, 4>> ValueToTEs;
4627 for (const std::unique_ptr<TreeEntry> &EntryPtr : VectorizableTree) {
4628 if (EntryPtr.get() == TE)
4629 break;
4630 if (EntryPtr->State != TreeEntry::NeedToGather)
4631 continue;
4632 for (Value *V : EntryPtr->Scalars)
4633 ValueToTEs.try_emplace(V).first->getSecond().insert(EntryPtr.get());
4634 }
4635 // Find all tree entries used by the gathered values. If no common entries
4636 // found - not a shuffle.
4637 // Here we build a set of tree nodes for each gathered value and trying to
4638 // find the intersection between these sets. If we have at least one common
4639 // tree node for each gathered value - we have just a permutation of the
4640 // single vector. If we have 2 different sets, we're in situation where we
4641 // have a permutation of 2 input vectors.
4642 SmallVector<SmallPtrSet<const TreeEntry *, 4>> UsedTEs;
4643 DenseMap<Value *, int> UsedValuesEntry;
4644 for (Value *V : TE->Scalars) {
4645 if (isa<UndefValue>(V))
4646 continue;
4647 // Build a list of tree entries where V is used.
4648 SmallPtrSet<const TreeEntry *, 4> VToTEs;
4649 auto It = ValueToTEs.find(V);
4650 if (It != ValueToTEs.end())
4651 VToTEs = It->second;
4652 if (const TreeEntry *VTE = getTreeEntry(V))
4653 VToTEs.insert(VTE);
4654 if (VToTEs.empty())
4655 return None;
4656 if (UsedTEs.empty()) {
4657 // The first iteration, just insert the list of nodes to vector.
4658 UsedTEs.push_back(VToTEs);
4659 } else {
4660 // Need to check if there are any previously used tree nodes which use V.
4661 // If there are no such nodes, consider that we have another one input
4662 // vector.
4663 SmallPtrSet<const TreeEntry *, 4> SavedVToTEs(VToTEs);
4664 unsigned Idx = 0;
4665 for (SmallPtrSet<const TreeEntry *, 4> &Set : UsedTEs) {
4666 // Do we have a non-empty intersection of previously listed tree entries
4667 // and tree entries using current V?
4668 set_intersect(VToTEs, Set);
4669 if (!VToTEs.empty()) {
4670 // Yes, write the new subset and continue analysis for the next
4671 // scalar.
4672 Set.swap(VToTEs);
4673 break;
4674 }
4675 VToTEs = SavedVToTEs;
4676 ++Idx;
4677 }
4678 // No non-empty intersection found - need to add a second set of possible
4679 // source vectors.
4680 if (Idx == UsedTEs.size()) {
4681 // If the number of input vectors is greater than 2 - not a permutation,
4682 // fallback to the regular gather.
4683 if (UsedTEs.size() == 2)
4684 return None;
4685 UsedTEs.push_back(SavedVToTEs);
4686 Idx = UsedTEs.size() - 1;
4687 }
4688 UsedValuesEntry.try_emplace(V, Idx);
4689 }
4690 }
4691
4692 unsigned VF = 0;
4693 if (UsedTEs.size() == 1) {
4694 // Try to find the perfect match in another gather node at first.
4695 auto It = find_if(UsedTEs.front(), [TE](const TreeEntry *EntryPtr) {
4696 return EntryPtr->isSame(TE->Scalars);
4697 });
4698 if (It != UsedTEs.front().end()) {
4699 Entries.push_back(*It);
4700 std::iota(Mask.begin(), Mask.end(), 0);
4701 return TargetTransformInfo::SK_PermuteSingleSrc;
4702 }
4703 // No perfect match, just shuffle, so choose the first tree node.
4704 Entries.push_back(*UsedTEs.front().begin());
4705 } else {
4706 // Try to find nodes with the same vector factor.
4707 assert(UsedTEs.size() == 2 && "Expected at max 2 permuted entries.");
4708 // FIXME: Shall be replaced by GetVF function once non-power-2 patch is
4709 // landed.
4710 auto &&GetVF = [](const TreeEntry *TE) {
4711 if (!TE->ReuseShuffleIndices.empty())
4712 return TE->ReuseShuffleIndices.size();
4713 return TE->Scalars.size();
4714 };
4715 DenseMap<int, const TreeEntry *> VFToTE;
4716 for (const TreeEntry *TE : UsedTEs.front())
4717 VFToTE.try_emplace(GetVF(TE), TE);
4718 for (const TreeEntry *TE : UsedTEs.back()) {
4719 auto It = VFToTE.find(GetVF(TE));
4720 if (It != VFToTE.end()) {
4721 VF = It->first;
4722 Entries.push_back(It->second);
4723 Entries.push_back(TE);
4724 break;
4725 }
4726 }
4727 // No 2 source vectors with the same vector factor - give up and do regular
4728 // gather.
4729 if (Entries.empty())
4730 return None;
4731 }
4732
4733 // Build a shuffle mask for better cost estimation and vector emission.
4734 for (int I = 0, E = TE->Scalars.size(); I < E; ++I) {
4735 Value *V = TE->Scalars[I];
4736 if (isa<UndefValue>(V))
4737 continue;
4738 unsigned Idx = UsedValuesEntry.lookup(V);
4739 const TreeEntry *VTE = Entries[Idx];
4740 int FoundLane = VTE->findLaneForValue(V);
4741 Mask[I] = Idx * VF + FoundLane;
4742 // Extra check required by isSingleSourceMaskImpl function (called by
4743 // ShuffleVectorInst::isSingleSourceMask).
4744 if (Mask[I] >= 2 * E)
4745 return None;
4746 }
4747 switch (Entries.size()) {
4748 case 1:
4749 return TargetTransformInfo::SK_PermuteSingleSrc;
4750 case 2:
4751 return TargetTransformInfo::SK_PermuteTwoSrc;
4752 default:
4753 break;
4754 }
4755 return None;
4756 }
4757
4758 InstructionCost
getGatherCost(FixedVectorType * Ty,const DenseSet<unsigned> & ShuffledIndices) const4759 BoUpSLP::getGatherCost(FixedVectorType *Ty,
4760 const DenseSet<unsigned> &ShuffledIndices) const {
4761 unsigned NumElts = Ty->getNumElements();
4762 APInt DemandedElts = APInt::getNullValue(NumElts);
4763 for (unsigned I = 0; I < NumElts; ++I)
4764 if (!ShuffledIndices.count(I))
4765 DemandedElts.setBit(I);
4766 InstructionCost Cost =
4767 TTI->getScalarizationOverhead(Ty, DemandedElts, /*Insert*/ true,
4768 /*Extract*/ false);
4769 if (!ShuffledIndices.empty())
4770 Cost += TTI->getShuffleCost(TargetTransformInfo::SK_PermuteSingleSrc, Ty);
4771 return Cost;
4772 }
4773
getGatherCost(ArrayRef<Value * > VL) const4774 InstructionCost BoUpSLP::getGatherCost(ArrayRef<Value *> VL) const {
4775 // Find the type of the operands in VL.
4776 Type *ScalarTy = VL[0]->getType();
4777 if (StoreInst *SI = dyn_cast<StoreInst>(VL[0]))
4778 ScalarTy = SI->getValueOperand()->getType();
4779 auto *VecTy = FixedVectorType::get(ScalarTy, VL.size());
4780 // Find the cost of inserting/extracting values from the vector.
4781 // Check if the same elements are inserted several times and count them as
4782 // shuffle candidates.
4783 DenseSet<unsigned> ShuffledElements;
4784 DenseSet<Value *> UniqueElements;
4785 // Iterate in reverse order to consider insert elements with the high cost.
4786 for (unsigned I = VL.size(); I > 0; --I) {
4787 unsigned Idx = I - 1;
4788 if (isConstant(VL[Idx]))
4789 continue;
4790 if (!UniqueElements.insert(VL[Idx]).second)
4791 ShuffledElements.insert(Idx);
4792 }
4793 return getGatherCost(VecTy, ShuffledElements);
4794 }
4795
4796 // Perform operand reordering on the instructions in VL and return the reordered
4797 // operands in Left and Right.
reorderInputsAccordingToOpcode(ArrayRef<Value * > VL,SmallVectorImpl<Value * > & Left,SmallVectorImpl<Value * > & Right,const DataLayout & DL,ScalarEvolution & SE,const BoUpSLP & R)4798 void BoUpSLP::reorderInputsAccordingToOpcode(ArrayRef<Value *> VL,
4799 SmallVectorImpl<Value *> &Left,
4800 SmallVectorImpl<Value *> &Right,
4801 const DataLayout &DL,
4802 ScalarEvolution &SE,
4803 const BoUpSLP &R) {
4804 if (VL.empty())
4805 return;
4806 VLOperands Ops(VL, DL, SE, R);
4807 // Reorder the operands in place.
4808 Ops.reorder();
4809 Left = Ops.getVL(0);
4810 Right = Ops.getVL(1);
4811 }
4812
setInsertPointAfterBundle(const TreeEntry * E)4813 void BoUpSLP::setInsertPointAfterBundle(const TreeEntry *E) {
4814 // Get the basic block this bundle is in. All instructions in the bundle
4815 // should be in this block.
4816 auto *Front = E->getMainOp();
4817 auto *BB = Front->getParent();
4818 assert(llvm::all_of(E->Scalars, [=](Value *V) -> bool {
4819 auto *I = cast<Instruction>(V);
4820 return !E->isOpcodeOrAlt(I) || I->getParent() == BB;
4821 }));
4822
4823 // The last instruction in the bundle in program order.
4824 Instruction *LastInst = nullptr;
4825
4826 // Find the last instruction. The common case should be that BB has been
4827 // scheduled, and the last instruction is VL.back(). So we start with
4828 // VL.back() and iterate over schedule data until we reach the end of the
4829 // bundle. The end of the bundle is marked by null ScheduleData.
4830 if (BlocksSchedules.count(BB)) {
4831 auto *Bundle =
4832 BlocksSchedules[BB]->getScheduleData(E->isOneOf(E->Scalars.back()));
4833 if (Bundle && Bundle->isPartOfBundle())
4834 for (; Bundle; Bundle = Bundle->NextInBundle)
4835 if (Bundle->OpValue == Bundle->Inst)
4836 LastInst = Bundle->Inst;
4837 }
4838
4839 // LastInst can still be null at this point if there's either not an entry
4840 // for BB in BlocksSchedules or there's no ScheduleData available for
4841 // VL.back(). This can be the case if buildTree_rec aborts for various
4842 // reasons (e.g., the maximum recursion depth is reached, the maximum region
4843 // size is reached, etc.). ScheduleData is initialized in the scheduling
4844 // "dry-run".
4845 //
4846 // If this happens, we can still find the last instruction by brute force. We
4847 // iterate forwards from Front (inclusive) until we either see all
4848 // instructions in the bundle or reach the end of the block. If Front is the
4849 // last instruction in program order, LastInst will be set to Front, and we
4850 // will visit all the remaining instructions in the block.
4851 //
4852 // One of the reasons we exit early from buildTree_rec is to place an upper
4853 // bound on compile-time. Thus, taking an additional compile-time hit here is
4854 // not ideal. However, this should be exceedingly rare since it requires that
4855 // we both exit early from buildTree_rec and that the bundle be out-of-order
4856 // (causing us to iterate all the way to the end of the block).
4857 if (!LastInst) {
4858 SmallPtrSet<Value *, 16> Bundle(E->Scalars.begin(), E->Scalars.end());
4859 for (auto &I : make_range(BasicBlock::iterator(Front), BB->end())) {
4860 if (Bundle.erase(&I) && E->isOpcodeOrAlt(&I))
4861 LastInst = &I;
4862 if (Bundle.empty())
4863 break;
4864 }
4865 }
4866 assert(LastInst && "Failed to find last instruction in bundle");
4867
4868 // Set the insertion point after the last instruction in the bundle. Set the
4869 // debug location to Front.
4870 Builder.SetInsertPoint(BB, ++LastInst->getIterator());
4871 Builder.SetCurrentDebugLocation(Front->getDebugLoc());
4872 }
4873
gather(ArrayRef<Value * > VL)4874 Value *BoUpSLP::gather(ArrayRef<Value *> VL) {
4875 // List of instructions/lanes from current block and/or the blocks which are
4876 // part of the current loop. These instructions will be inserted at the end to
4877 // make it possible to optimize loops and hoist invariant instructions out of
4878 // the loops body with better chances for success.
4879 SmallVector<std::pair<Value *, unsigned>, 4> PostponedInsts;
4880 SmallSet<int, 4> PostponedIndices;
4881 Loop *L = LI->getLoopFor(Builder.GetInsertBlock());
4882 auto &&CheckPredecessor = [](BasicBlock *InstBB, BasicBlock *InsertBB) {
4883 SmallPtrSet<BasicBlock *, 4> Visited;
4884 while (InsertBB && InsertBB != InstBB && Visited.insert(InsertBB).second)
4885 InsertBB = InsertBB->getSinglePredecessor();
4886 return InsertBB && InsertBB == InstBB;
4887 };
4888 for (int I = 0, E = VL.size(); I < E; ++I) {
4889 if (auto *Inst = dyn_cast<Instruction>(VL[I]))
4890 if ((CheckPredecessor(Inst->getParent(), Builder.GetInsertBlock()) ||
4891 getTreeEntry(Inst) || (L && (L->contains(Inst)))) &&
4892 PostponedIndices.insert(I).second)
4893 PostponedInsts.emplace_back(Inst, I);
4894 }
4895
4896 auto &&CreateInsertElement = [this](Value *Vec, Value *V, unsigned Pos) {
4897 Vec = Builder.CreateInsertElement(Vec, V, Builder.getInt32(Pos));
4898 auto *InsElt = dyn_cast<InsertElementInst>(Vec);
4899 if (!InsElt)
4900 return Vec;
4901 GatherSeq.insert(InsElt);
4902 CSEBlocks.insert(InsElt->getParent());
4903 // Add to our 'need-to-extract' list.
4904 if (TreeEntry *Entry = getTreeEntry(V)) {
4905 // Find which lane we need to extract.
4906 unsigned FoundLane = Entry->findLaneForValue(V);
4907 ExternalUses.emplace_back(V, InsElt, FoundLane);
4908 }
4909 return Vec;
4910 };
4911 Value *Val0 =
4912 isa<StoreInst>(VL[0]) ? cast<StoreInst>(VL[0])->getValueOperand() : VL[0];
4913 FixedVectorType *VecTy = FixedVectorType::get(Val0->getType(), VL.size());
4914 Value *Vec = PoisonValue::get(VecTy);
4915 SmallVector<int> NonConsts;
4916 // Insert constant values at first.
4917 for (int I = 0, E = VL.size(); I < E; ++I) {
4918 if (PostponedIndices.contains(I))
4919 continue;
4920 if (!isConstant(VL[I])) {
4921 NonConsts.push_back(I);
4922 continue;
4923 }
4924 Vec = CreateInsertElement(Vec, VL[I], I);
4925 }
4926 // Insert non-constant values.
4927 for (int I : NonConsts)
4928 Vec = CreateInsertElement(Vec, VL[I], I);
4929 // Append instructions, which are/may be part of the loop, in the end to make
4930 // it possible to hoist non-loop-based instructions.
4931 for (const std::pair<Value *, unsigned> &Pair : PostponedInsts)
4932 Vec = CreateInsertElement(Vec, Pair.first, Pair.second);
4933
4934 return Vec;
4935 }
4936
4937 namespace {
4938 /// Merges shuffle masks and emits final shuffle instruction, if required.
4939 class ShuffleInstructionBuilder {
4940 IRBuilderBase &Builder;
4941 const unsigned VF = 0;
4942 bool IsFinalized = false;
4943 SmallVector<int, 4> Mask;
4944
4945 public:
ShuffleInstructionBuilder(IRBuilderBase & Builder,unsigned VF)4946 ShuffleInstructionBuilder(IRBuilderBase &Builder, unsigned VF)
4947 : Builder(Builder), VF(VF) {}
4948
4949 /// Adds a mask, inverting it before applying.
addInversedMask(ArrayRef<unsigned> SubMask)4950 void addInversedMask(ArrayRef<unsigned> SubMask) {
4951 if (SubMask.empty())
4952 return;
4953 SmallVector<int, 4> NewMask;
4954 inversePermutation(SubMask, NewMask);
4955 addMask(NewMask);
4956 }
4957
4958 /// Functions adds masks, merging them into single one.
addMask(ArrayRef<unsigned> SubMask)4959 void addMask(ArrayRef<unsigned> SubMask) {
4960 SmallVector<int, 4> NewMask(SubMask.begin(), SubMask.end());
4961 addMask(NewMask);
4962 }
4963
addMask(ArrayRef<int> SubMask)4964 void addMask(ArrayRef<int> SubMask) { ::addMask(Mask, SubMask); }
4965
finalize(Value * V)4966 Value *finalize(Value *V) {
4967 IsFinalized = true;
4968 unsigned ValueVF = cast<FixedVectorType>(V->getType())->getNumElements();
4969 if (VF == ValueVF && Mask.empty())
4970 return V;
4971 SmallVector<int, 4> NormalizedMask(VF, UndefMaskElem);
4972 std::iota(NormalizedMask.begin(), NormalizedMask.end(), 0);
4973 addMask(NormalizedMask);
4974
4975 if (VF == ValueVF && ShuffleVectorInst::isIdentityMask(Mask))
4976 return V;
4977 return Builder.CreateShuffleVector(V, Mask, "shuffle");
4978 }
4979
~ShuffleInstructionBuilder()4980 ~ShuffleInstructionBuilder() {
4981 assert((IsFinalized || Mask.empty()) &&
4982 "Shuffle construction must be finalized.");
4983 }
4984 };
4985 } // namespace
4986
vectorizeTree(ArrayRef<Value * > VL)4987 Value *BoUpSLP::vectorizeTree(ArrayRef<Value *> VL) {
4988 unsigned VF = VL.size();
4989 InstructionsState S = getSameOpcode(VL);
4990 if (S.getOpcode()) {
4991 if (TreeEntry *E = getTreeEntry(S.OpValue))
4992 if (E->isSame(VL)) {
4993 Value *V = vectorizeTree(E);
4994 if (VF != cast<FixedVectorType>(V->getType())->getNumElements()) {
4995 if (!E->ReuseShuffleIndices.empty()) {
4996 // Reshuffle to get only unique values.
4997 // If some of the scalars are duplicated in the vectorization tree
4998 // entry, we do not vectorize them but instead generate a mask for
4999 // the reuses. But if there are several users of the same entry,
5000 // they may have different vectorization factors. This is especially
5001 // important for PHI nodes. In this case, we need to adapt the
5002 // resulting instruction for the user vectorization factor and have
5003 // to reshuffle it again to take only unique elements of the vector.
5004 // Without this code the function incorrectly returns reduced vector
5005 // instruction with the same elements, not with the unique ones.
5006
5007 // block:
5008 // %phi = phi <2 x > { .., %entry} {%shuffle, %block}
5009 // %2 = shuffle <2 x > %phi, %poison, <4 x > <0, 0, 1, 1>
5010 // ... (use %2)
5011 // %shuffle = shuffle <2 x> %2, poison, <2 x> {0, 2}
5012 // br %block
5013 SmallVector<int> UniqueIdxs;
5014 SmallSet<int, 4> UsedIdxs;
5015 int Pos = 0;
5016 int Sz = VL.size();
5017 for (int Idx : E->ReuseShuffleIndices) {
5018 if (Idx != Sz && UsedIdxs.insert(Idx).second)
5019 UniqueIdxs.emplace_back(Pos);
5020 ++Pos;
5021 }
5022 assert(VF >= UsedIdxs.size() && "Expected vectorization factor "
5023 "less than original vector size.");
5024 UniqueIdxs.append(VF - UsedIdxs.size(), UndefMaskElem);
5025 V = Builder.CreateShuffleVector(V, UniqueIdxs, "shrink.shuffle");
5026 } else {
5027 assert(VF < cast<FixedVectorType>(V->getType())->getNumElements() &&
5028 "Expected vectorization factor less "
5029 "than original vector size.");
5030 SmallVector<int> UniformMask(VF, 0);
5031 std::iota(UniformMask.begin(), UniformMask.end(), 0);
5032 V = Builder.CreateShuffleVector(V, UniformMask, "shrink.shuffle");
5033 }
5034 }
5035 return V;
5036 }
5037 }
5038
5039 // Check that every instruction appears once in this bundle.
5040 SmallVector<int> ReuseShuffleIndicies;
5041 SmallVector<Value *> UniqueValues;
5042 if (VL.size() > 2) {
5043 DenseMap<Value *, unsigned> UniquePositions;
5044 unsigned NumValues =
5045 std::distance(VL.begin(), find_if(reverse(VL), [](Value *V) {
5046 return !isa<UndefValue>(V);
5047 }).base());
5048 VF = std::max<unsigned>(VF, PowerOf2Ceil(NumValues));
5049 int UniqueVals = 0;
5050 bool HasUndefs = false;
5051 for (Value *V : VL.drop_back(VL.size() - VF)) {
5052 if (isa<UndefValue>(V)) {
5053 ReuseShuffleIndicies.emplace_back(UndefMaskElem);
5054 HasUndefs = true;
5055 continue;
5056 }
5057 if (isConstant(V)) {
5058 ReuseShuffleIndicies.emplace_back(UniqueValues.size());
5059 UniqueValues.emplace_back(V);
5060 continue;
5061 }
5062 auto Res = UniquePositions.try_emplace(V, UniqueValues.size());
5063 ReuseShuffleIndicies.emplace_back(Res.first->second);
5064 if (Res.second) {
5065 UniqueValues.emplace_back(V);
5066 ++UniqueVals;
5067 }
5068 }
5069 if (HasUndefs && UniqueVals == 1 && UniqueValues.size() == 1) {
5070 // Emit pure splat vector.
5071 // FIXME: why it is not identified as an identity.
5072 unsigned NumUndefs = count(ReuseShuffleIndicies, UndefMaskElem);
5073 if (NumUndefs == ReuseShuffleIndicies.size() - 1)
5074 ReuseShuffleIndicies.append(VF - ReuseShuffleIndicies.size(),
5075 UndefMaskElem);
5076 else
5077 ReuseShuffleIndicies.assign(VF, 0);
5078 } else if (UniqueValues.size() >= VF - 1 || UniqueValues.size() <= 1) {
5079 ReuseShuffleIndicies.clear();
5080 UniqueValues.clear();
5081 UniqueValues.append(VL.begin(), std::next(VL.begin(), NumValues));
5082 }
5083 UniqueValues.append(VF - UniqueValues.size(),
5084 PoisonValue::get(VL[0]->getType()));
5085 VL = UniqueValues;
5086 }
5087
5088 ShuffleInstructionBuilder ShuffleBuilder(Builder, VF);
5089 Value *Vec = gather(VL);
5090 if (!ReuseShuffleIndicies.empty()) {
5091 ShuffleBuilder.addMask(ReuseShuffleIndicies);
5092 Vec = ShuffleBuilder.finalize(Vec);
5093 if (auto *I = dyn_cast<Instruction>(Vec)) {
5094 GatherSeq.insert(I);
5095 CSEBlocks.insert(I->getParent());
5096 }
5097 }
5098 return Vec;
5099 }
5100
vectorizeTree(TreeEntry * E)5101 Value *BoUpSLP::vectorizeTree(TreeEntry *E) {
5102 IRBuilder<>::InsertPointGuard Guard(Builder);
5103
5104 if (E->VectorizedValue) {
5105 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *E->Scalars[0] << ".\n");
5106 return E->VectorizedValue;
5107 }
5108
5109 bool NeedToShuffleReuses = !E->ReuseShuffleIndices.empty();
5110 unsigned VF = E->Scalars.size();
5111 if (NeedToShuffleReuses)
5112 VF = E->ReuseShuffleIndices.size();
5113 ShuffleInstructionBuilder ShuffleBuilder(Builder, VF);
5114 if (E->State == TreeEntry::NeedToGather) {
5115 setInsertPointAfterBundle(E);
5116 Value *Vec;
5117 SmallVector<int> Mask;
5118 SmallVector<const TreeEntry *> Entries;
5119 Optional<TargetTransformInfo::ShuffleKind> Shuffle =
5120 isGatherShuffledEntry(E, Mask, Entries);
5121 if (Shuffle.hasValue()) {
5122 assert((Entries.size() == 1 || Entries.size() == 2) &&
5123 "Expected shuffle of 1 or 2 entries.");
5124 Vec = Builder.CreateShuffleVector(Entries.front()->VectorizedValue,
5125 Entries.back()->VectorizedValue, Mask);
5126 } else {
5127 Vec = gather(E->Scalars);
5128 }
5129 if (NeedToShuffleReuses) {
5130 ShuffleBuilder.addMask(E->ReuseShuffleIndices);
5131 Vec = ShuffleBuilder.finalize(Vec);
5132 if (auto *I = dyn_cast<Instruction>(Vec)) {
5133 GatherSeq.insert(I);
5134 CSEBlocks.insert(I->getParent());
5135 }
5136 }
5137 E->VectorizedValue = Vec;
5138 return Vec;
5139 }
5140
5141 assert((E->State == TreeEntry::Vectorize ||
5142 E->State == TreeEntry::ScatterVectorize) &&
5143 "Unhandled state");
5144 unsigned ShuffleOrOp =
5145 E->isAltShuffle() ? (unsigned)Instruction::ShuffleVector : E->getOpcode();
5146 Instruction *VL0 = E->getMainOp();
5147 Type *ScalarTy = VL0->getType();
5148 if (auto *Store = dyn_cast<StoreInst>(VL0))
5149 ScalarTy = Store->getValueOperand()->getType();
5150 else if (auto *IE = dyn_cast<InsertElementInst>(VL0))
5151 ScalarTy = IE->getOperand(1)->getType();
5152 auto *VecTy = FixedVectorType::get(ScalarTy, E->Scalars.size());
5153 switch (ShuffleOrOp) {
5154 case Instruction::PHI: {
5155 auto *PH = cast<PHINode>(VL0);
5156 Builder.SetInsertPoint(PH->getParent()->getFirstNonPHI());
5157 Builder.SetCurrentDebugLocation(PH->getDebugLoc());
5158 PHINode *NewPhi = Builder.CreatePHI(VecTy, PH->getNumIncomingValues());
5159 Value *V = NewPhi;
5160 if (NeedToShuffleReuses)
5161 V = Builder.CreateShuffleVector(V, E->ReuseShuffleIndices, "shuffle");
5162
5163 E->VectorizedValue = V;
5164
5165 // PHINodes may have multiple entries from the same block. We want to
5166 // visit every block once.
5167 SmallPtrSet<BasicBlock*, 4> VisitedBBs;
5168
5169 for (unsigned i = 0, e = PH->getNumIncomingValues(); i < e; ++i) {
5170 ValueList Operands;
5171 BasicBlock *IBB = PH->getIncomingBlock(i);
5172
5173 if (!VisitedBBs.insert(IBB).second) {
5174 NewPhi->addIncoming(NewPhi->getIncomingValueForBlock(IBB), IBB);
5175 continue;
5176 }
5177
5178 Builder.SetInsertPoint(IBB->getTerminator());
5179 Builder.SetCurrentDebugLocation(PH->getDebugLoc());
5180 Value *Vec = vectorizeTree(E->getOperand(i));
5181 NewPhi->addIncoming(Vec, IBB);
5182 }
5183
5184 assert(NewPhi->getNumIncomingValues() == PH->getNumIncomingValues() &&
5185 "Invalid number of incoming values");
5186 return V;
5187 }
5188
5189 case Instruction::ExtractElement: {
5190 Value *V = E->getSingleOperand(0);
5191 Builder.SetInsertPoint(VL0);
5192 ShuffleBuilder.addInversedMask(E->ReorderIndices);
5193 ShuffleBuilder.addMask(E->ReuseShuffleIndices);
5194 V = ShuffleBuilder.finalize(V);
5195 E->VectorizedValue = V;
5196 return V;
5197 }
5198 case Instruction::ExtractValue: {
5199 auto *LI = cast<LoadInst>(E->getSingleOperand(0));
5200 Builder.SetInsertPoint(LI);
5201 auto *PtrTy = PointerType::get(VecTy, LI->getPointerAddressSpace());
5202 Value *Ptr = Builder.CreateBitCast(LI->getOperand(0), PtrTy);
5203 LoadInst *V = Builder.CreateAlignedLoad(VecTy, Ptr, LI->getAlign());
5204 Value *NewV = propagateMetadata(V, E->Scalars);
5205 ShuffleBuilder.addInversedMask(E->ReorderIndices);
5206 ShuffleBuilder.addMask(E->ReuseShuffleIndices);
5207 NewV = ShuffleBuilder.finalize(NewV);
5208 E->VectorizedValue = NewV;
5209 return NewV;
5210 }
5211 case Instruction::InsertElement: {
5212 Builder.SetInsertPoint(VL0);
5213 Value *V = vectorizeTree(E->getOperand(1));
5214
5215 const unsigned NumElts =
5216 cast<FixedVectorType>(VL0->getType())->getNumElements();
5217 const unsigned NumScalars = E->Scalars.size();
5218
5219 // Create InsertVector shuffle if necessary
5220 Instruction *FirstInsert = nullptr;
5221 bool IsIdentity = true;
5222 unsigned Offset = UINT_MAX;
5223 for (unsigned I = 0; I < NumScalars; ++I) {
5224 Value *Scalar = E->Scalars[I];
5225 if (!FirstInsert &&
5226 !is_contained(E->Scalars, cast<Instruction>(Scalar)->getOperand(0)))
5227 FirstInsert = cast<Instruction>(Scalar);
5228 Optional<int> InsertIdx = getInsertIndex(Scalar, 0);
5229 if (!InsertIdx || *InsertIdx == UndefMaskElem)
5230 continue;
5231 unsigned Idx = *InsertIdx;
5232 if (Idx < Offset) {
5233 Offset = Idx;
5234 IsIdentity &= I == 0;
5235 } else {
5236 assert(Idx >= Offset && "Failed to find vector index offset");
5237 IsIdentity &= Idx - Offset == I;
5238 }
5239 }
5240 assert(Offset < NumElts && "Failed to find vector index offset");
5241
5242 // Create shuffle to resize vector
5243 SmallVector<int> Mask(NumElts, UndefMaskElem);
5244 if (!IsIdentity) {
5245 for (unsigned I = 0; I < NumScalars; ++I) {
5246 Value *Scalar = E->Scalars[I];
5247 Optional<int> InsertIdx = getInsertIndex(Scalar, 0);
5248 if (!InsertIdx || *InsertIdx == UndefMaskElem)
5249 continue;
5250 Mask[*InsertIdx - Offset] = I;
5251 }
5252 } else {
5253 std::iota(Mask.begin(), std::next(Mask.begin(), NumScalars), 0);
5254 }
5255 if (!IsIdentity || NumElts != NumScalars)
5256 V = Builder.CreateShuffleVector(V, Mask);
5257
5258 if (NumElts != NumScalars) {
5259 SmallVector<int> InsertMask(NumElts);
5260 std::iota(InsertMask.begin(), InsertMask.end(), 0);
5261 for (unsigned I = 0; I < NumElts; I++) {
5262 if (Mask[I] != UndefMaskElem)
5263 InsertMask[Offset + I] = NumElts + I;
5264 }
5265
5266 V = Builder.CreateShuffleVector(
5267 FirstInsert->getOperand(0), V, InsertMask,
5268 cast<Instruction>(E->Scalars.back())->getName());
5269 }
5270
5271 ++NumVectorInstructions;
5272 E->VectorizedValue = V;
5273 return V;
5274 }
5275 case Instruction::ZExt:
5276 case Instruction::SExt:
5277 case Instruction::FPToUI:
5278 case Instruction::FPToSI:
5279 case Instruction::FPExt:
5280 case Instruction::PtrToInt:
5281 case Instruction::IntToPtr:
5282 case Instruction::SIToFP:
5283 case Instruction::UIToFP:
5284 case Instruction::Trunc:
5285 case Instruction::FPTrunc:
5286 case Instruction::BitCast: {
5287 setInsertPointAfterBundle(E);
5288
5289 Value *InVec = vectorizeTree(E->getOperand(0));
5290
5291 if (E->VectorizedValue) {
5292 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n");
5293 return E->VectorizedValue;
5294 }
5295
5296 auto *CI = cast<CastInst>(VL0);
5297 Value *V = Builder.CreateCast(CI->getOpcode(), InVec, VecTy);
5298 ShuffleBuilder.addMask(E->ReuseShuffleIndices);
5299 V = ShuffleBuilder.finalize(V);
5300
5301 E->VectorizedValue = V;
5302 ++NumVectorInstructions;
5303 return V;
5304 }
5305 case Instruction::FCmp:
5306 case Instruction::ICmp: {
5307 setInsertPointAfterBundle(E);
5308
5309 Value *L = vectorizeTree(E->getOperand(0));
5310 Value *R = vectorizeTree(E->getOperand(1));
5311
5312 if (E->VectorizedValue) {
5313 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n");
5314 return E->VectorizedValue;
5315 }
5316
5317 CmpInst::Predicate P0 = cast<CmpInst>(VL0)->getPredicate();
5318 Value *V = Builder.CreateCmp(P0, L, R);
5319 propagateIRFlags(V, E->Scalars, VL0);
5320 ShuffleBuilder.addMask(E->ReuseShuffleIndices);
5321 V = ShuffleBuilder.finalize(V);
5322
5323 E->VectorizedValue = V;
5324 ++NumVectorInstructions;
5325 return V;
5326 }
5327 case Instruction::Select: {
5328 setInsertPointAfterBundle(E);
5329
5330 Value *Cond = vectorizeTree(E->getOperand(0));
5331 Value *True = vectorizeTree(E->getOperand(1));
5332 Value *False = vectorizeTree(E->getOperand(2));
5333
5334 if (E->VectorizedValue) {
5335 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n");
5336 return E->VectorizedValue;
5337 }
5338
5339 Value *V = Builder.CreateSelect(Cond, True, False);
5340 ShuffleBuilder.addMask(E->ReuseShuffleIndices);
5341 V = ShuffleBuilder.finalize(V);
5342
5343 E->VectorizedValue = V;
5344 ++NumVectorInstructions;
5345 return V;
5346 }
5347 case Instruction::FNeg: {
5348 setInsertPointAfterBundle(E);
5349
5350 Value *Op = vectorizeTree(E->getOperand(0));
5351
5352 if (E->VectorizedValue) {
5353 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n");
5354 return E->VectorizedValue;
5355 }
5356
5357 Value *V = Builder.CreateUnOp(
5358 static_cast<Instruction::UnaryOps>(E->getOpcode()), Op);
5359 propagateIRFlags(V, E->Scalars, VL0);
5360 if (auto *I = dyn_cast<Instruction>(V))
5361 V = propagateMetadata(I, E->Scalars);
5362
5363 ShuffleBuilder.addMask(E->ReuseShuffleIndices);
5364 V = ShuffleBuilder.finalize(V);
5365
5366 E->VectorizedValue = V;
5367 ++NumVectorInstructions;
5368
5369 return V;
5370 }
5371 case Instruction::Add:
5372 case Instruction::FAdd:
5373 case Instruction::Sub:
5374 case Instruction::FSub:
5375 case Instruction::Mul:
5376 case Instruction::FMul:
5377 case Instruction::UDiv:
5378 case Instruction::SDiv:
5379 case Instruction::FDiv:
5380 case Instruction::URem:
5381 case Instruction::SRem:
5382 case Instruction::FRem:
5383 case Instruction::Shl:
5384 case Instruction::LShr:
5385 case Instruction::AShr:
5386 case Instruction::And:
5387 case Instruction::Or:
5388 case Instruction::Xor: {
5389 setInsertPointAfterBundle(E);
5390
5391 Value *LHS = vectorizeTree(E->getOperand(0));
5392 Value *RHS = vectorizeTree(E->getOperand(1));
5393
5394 if (E->VectorizedValue) {
5395 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n");
5396 return E->VectorizedValue;
5397 }
5398
5399 Value *V = Builder.CreateBinOp(
5400 static_cast<Instruction::BinaryOps>(E->getOpcode()), LHS,
5401 RHS);
5402 propagateIRFlags(V, E->Scalars, VL0);
5403 if (auto *I = dyn_cast<Instruction>(V))
5404 V = propagateMetadata(I, E->Scalars);
5405
5406 ShuffleBuilder.addMask(E->ReuseShuffleIndices);
5407 V = ShuffleBuilder.finalize(V);
5408
5409 E->VectorizedValue = V;
5410 ++NumVectorInstructions;
5411
5412 return V;
5413 }
5414 case Instruction::Load: {
5415 // Loads are inserted at the head of the tree because we don't want to
5416 // sink them all the way down past store instructions.
5417 bool IsReorder = E->updateStateIfReorder();
5418 if (IsReorder)
5419 VL0 = E->getMainOp();
5420 setInsertPointAfterBundle(E);
5421
5422 LoadInst *LI = cast<LoadInst>(VL0);
5423 Instruction *NewLI;
5424 unsigned AS = LI->getPointerAddressSpace();
5425 Value *PO = LI->getPointerOperand();
5426 if (E->State == TreeEntry::Vectorize) {
5427
5428 Value *VecPtr = Builder.CreateBitCast(PO, VecTy->getPointerTo(AS));
5429
5430 // The pointer operand uses an in-tree scalar so we add the new BitCast
5431 // to ExternalUses list to make sure that an extract will be generated
5432 // in the future.
5433 if (TreeEntry *Entry = getTreeEntry(PO)) {
5434 // Find which lane we need to extract.
5435 unsigned FoundLane = Entry->findLaneForValue(PO);
5436 ExternalUses.emplace_back(PO, cast<User>(VecPtr), FoundLane);
5437 }
5438
5439 NewLI = Builder.CreateAlignedLoad(VecTy, VecPtr, LI->getAlign());
5440 } else {
5441 assert(E->State == TreeEntry::ScatterVectorize && "Unhandled state");
5442 Value *VecPtr = vectorizeTree(E->getOperand(0));
5443 // Use the minimum alignment of the gathered loads.
5444 Align CommonAlignment = LI->getAlign();
5445 for (Value *V : E->Scalars)
5446 CommonAlignment =
5447 commonAlignment(CommonAlignment, cast<LoadInst>(V)->getAlign());
5448 NewLI = Builder.CreateMaskedGather(VecTy, VecPtr, CommonAlignment);
5449 }
5450 Value *V = propagateMetadata(NewLI, E->Scalars);
5451
5452 ShuffleBuilder.addInversedMask(E->ReorderIndices);
5453 ShuffleBuilder.addMask(E->ReuseShuffleIndices);
5454 V = ShuffleBuilder.finalize(V);
5455 E->VectorizedValue = V;
5456 ++NumVectorInstructions;
5457 return V;
5458 }
5459 case Instruction::Store: {
5460 bool IsReorder = !E->ReorderIndices.empty();
5461 auto *SI = cast<StoreInst>(
5462 IsReorder ? E->Scalars[E->ReorderIndices.front()] : VL0);
5463 unsigned AS = SI->getPointerAddressSpace();
5464
5465 setInsertPointAfterBundle(E);
5466
5467 Value *VecValue = vectorizeTree(E->getOperand(0));
5468 ShuffleBuilder.addMask(E->ReorderIndices);
5469 VecValue = ShuffleBuilder.finalize(VecValue);
5470
5471 Value *ScalarPtr = SI->getPointerOperand();
5472 Value *VecPtr = Builder.CreateBitCast(
5473 ScalarPtr, VecValue->getType()->getPointerTo(AS));
5474 StoreInst *ST = Builder.CreateAlignedStore(VecValue, VecPtr,
5475 SI->getAlign());
5476
5477 // The pointer operand uses an in-tree scalar, so add the new BitCast to
5478 // ExternalUses to make sure that an extract will be generated in the
5479 // future.
5480 if (TreeEntry *Entry = getTreeEntry(ScalarPtr)) {
5481 // Find which lane we need to extract.
5482 unsigned FoundLane = Entry->findLaneForValue(ScalarPtr);
5483 ExternalUses.push_back(
5484 ExternalUser(ScalarPtr, cast<User>(VecPtr), FoundLane));
5485 }
5486
5487 Value *V = propagateMetadata(ST, E->Scalars);
5488
5489 E->VectorizedValue = V;
5490 ++NumVectorInstructions;
5491 return V;
5492 }
5493 case Instruction::GetElementPtr: {
5494 setInsertPointAfterBundle(E);
5495
5496 Value *Op0 = vectorizeTree(E->getOperand(0));
5497
5498 std::vector<Value *> OpVecs;
5499 for (int j = 1, e = cast<GetElementPtrInst>(VL0)->getNumOperands(); j < e;
5500 ++j) {
5501 ValueList &VL = E->getOperand(j);
5502 // Need to cast all elements to the same type before vectorization to
5503 // avoid crash.
5504 Type *VL0Ty = VL0->getOperand(j)->getType();
5505 Type *Ty = llvm::all_of(
5506 VL, [VL0Ty](Value *V) { return VL0Ty == V->getType(); })
5507 ? VL0Ty
5508 : DL->getIndexType(cast<GetElementPtrInst>(VL0)
5509 ->getPointerOperandType()
5510 ->getScalarType());
5511 for (Value *&V : VL) {
5512 auto *CI = cast<ConstantInt>(V);
5513 V = ConstantExpr::getIntegerCast(CI, Ty,
5514 CI->getValue().isSignBitSet());
5515 }
5516 Value *OpVec = vectorizeTree(VL);
5517 OpVecs.push_back(OpVec);
5518 }
5519
5520 Value *V = Builder.CreateGEP(
5521 cast<GetElementPtrInst>(VL0)->getSourceElementType(), Op0, OpVecs);
5522 if (Instruction *I = dyn_cast<Instruction>(V))
5523 V = propagateMetadata(I, E->Scalars);
5524
5525 ShuffleBuilder.addMask(E->ReuseShuffleIndices);
5526 V = ShuffleBuilder.finalize(V);
5527
5528 E->VectorizedValue = V;
5529 ++NumVectorInstructions;
5530
5531 return V;
5532 }
5533 case Instruction::Call: {
5534 CallInst *CI = cast<CallInst>(VL0);
5535 setInsertPointAfterBundle(E);
5536
5537 Intrinsic::ID IID = Intrinsic::not_intrinsic;
5538 if (Function *FI = CI->getCalledFunction())
5539 IID = FI->getIntrinsicID();
5540
5541 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
5542
5543 auto VecCallCosts = getVectorCallCosts(CI, VecTy, TTI, TLI);
5544 bool UseIntrinsic = ID != Intrinsic::not_intrinsic &&
5545 VecCallCosts.first <= VecCallCosts.second;
5546
5547 Value *ScalarArg = nullptr;
5548 std::vector<Value *> OpVecs;
5549 SmallVector<Type *, 2> TysForDecl =
5550 {FixedVectorType::get(CI->getType(), E->Scalars.size())};
5551 for (int j = 0, e = CI->getNumArgOperands(); j < e; ++j) {
5552 ValueList OpVL;
5553 // Some intrinsics have scalar arguments. This argument should not be
5554 // vectorized.
5555 if (UseIntrinsic && hasVectorInstrinsicScalarOpd(IID, j)) {
5556 CallInst *CEI = cast<CallInst>(VL0);
5557 ScalarArg = CEI->getArgOperand(j);
5558 OpVecs.push_back(CEI->getArgOperand(j));
5559 if (hasVectorInstrinsicOverloadedScalarOpd(IID, j))
5560 TysForDecl.push_back(ScalarArg->getType());
5561 continue;
5562 }
5563
5564 Value *OpVec = vectorizeTree(E->getOperand(j));
5565 LLVM_DEBUG(dbgs() << "SLP: OpVec[" << j << "]: " << *OpVec << "\n");
5566 OpVecs.push_back(OpVec);
5567 }
5568
5569 Function *CF;
5570 if (!UseIntrinsic) {
5571 VFShape Shape =
5572 VFShape::get(*CI, ElementCount::getFixed(static_cast<unsigned>(
5573 VecTy->getNumElements())),
5574 false /*HasGlobalPred*/);
5575 CF = VFDatabase(*CI).getVectorizedFunction(Shape);
5576 } else {
5577 CF = Intrinsic::getDeclaration(F->getParent(), ID, TysForDecl);
5578 }
5579
5580 SmallVector<OperandBundleDef, 1> OpBundles;
5581 CI->getOperandBundlesAsDefs(OpBundles);
5582 Value *V = Builder.CreateCall(CF, OpVecs, OpBundles);
5583
5584 // The scalar argument uses an in-tree scalar so we add the new vectorized
5585 // call to ExternalUses list to make sure that an extract will be
5586 // generated in the future.
5587 if (ScalarArg) {
5588 if (TreeEntry *Entry = getTreeEntry(ScalarArg)) {
5589 // Find which lane we need to extract.
5590 unsigned FoundLane = Entry->findLaneForValue(ScalarArg);
5591 ExternalUses.push_back(
5592 ExternalUser(ScalarArg, cast<User>(V), FoundLane));
5593 }
5594 }
5595
5596 propagateIRFlags(V, E->Scalars, VL0);
5597 ShuffleBuilder.addMask(E->ReuseShuffleIndices);
5598 V = ShuffleBuilder.finalize(V);
5599
5600 E->VectorizedValue = V;
5601 ++NumVectorInstructions;
5602 return V;
5603 }
5604 case Instruction::ShuffleVector: {
5605 assert(E->isAltShuffle() &&
5606 ((Instruction::isBinaryOp(E->getOpcode()) &&
5607 Instruction::isBinaryOp(E->getAltOpcode())) ||
5608 (Instruction::isCast(E->getOpcode()) &&
5609 Instruction::isCast(E->getAltOpcode()))) &&
5610 "Invalid Shuffle Vector Operand");
5611
5612 Value *LHS = nullptr, *RHS = nullptr;
5613 if (Instruction::isBinaryOp(E->getOpcode())) {
5614 setInsertPointAfterBundle(E);
5615 LHS = vectorizeTree(E->getOperand(0));
5616 RHS = vectorizeTree(E->getOperand(1));
5617 } else {
5618 setInsertPointAfterBundle(E);
5619 LHS = vectorizeTree(E->getOperand(0));
5620 }
5621
5622 if (E->VectorizedValue) {
5623 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n");
5624 return E->VectorizedValue;
5625 }
5626
5627 Value *V0, *V1;
5628 if (Instruction::isBinaryOp(E->getOpcode())) {
5629 V0 = Builder.CreateBinOp(
5630 static_cast<Instruction::BinaryOps>(E->getOpcode()), LHS, RHS);
5631 V1 = Builder.CreateBinOp(
5632 static_cast<Instruction::BinaryOps>(E->getAltOpcode()), LHS, RHS);
5633 } else {
5634 V0 = Builder.CreateCast(
5635 static_cast<Instruction::CastOps>(E->getOpcode()), LHS, VecTy);
5636 V1 = Builder.CreateCast(
5637 static_cast<Instruction::CastOps>(E->getAltOpcode()), LHS, VecTy);
5638 }
5639
5640 // Create shuffle to take alternate operations from the vector.
5641 // Also, gather up main and alt scalar ops to propagate IR flags to
5642 // each vector operation.
5643 ValueList OpScalars, AltScalars;
5644 unsigned Sz = E->Scalars.size();
5645 SmallVector<int> Mask(Sz);
5646 for (unsigned I = 0; I < Sz; ++I) {
5647 auto *OpInst = cast<Instruction>(E->Scalars[I]);
5648 assert(E->isOpcodeOrAlt(OpInst) && "Unexpected main/alternate opcode");
5649 if (OpInst->getOpcode() == E->getAltOpcode()) {
5650 Mask[I] = Sz + I;
5651 AltScalars.push_back(E->Scalars[I]);
5652 } else {
5653 Mask[I] = I;
5654 OpScalars.push_back(E->Scalars[I]);
5655 }
5656 }
5657
5658 propagateIRFlags(V0, OpScalars);
5659 propagateIRFlags(V1, AltScalars);
5660
5661 Value *V = Builder.CreateShuffleVector(V0, V1, Mask);
5662 if (Instruction *I = dyn_cast<Instruction>(V))
5663 V = propagateMetadata(I, E->Scalars);
5664 ShuffleBuilder.addMask(E->ReuseShuffleIndices);
5665 V = ShuffleBuilder.finalize(V);
5666
5667 E->VectorizedValue = V;
5668 ++NumVectorInstructions;
5669
5670 return V;
5671 }
5672 default:
5673 llvm_unreachable("unknown inst");
5674 }
5675 return nullptr;
5676 }
5677
vectorizeTree()5678 Value *BoUpSLP::vectorizeTree() {
5679 ExtraValueToDebugLocsMap ExternallyUsedValues;
5680 return vectorizeTree(ExternallyUsedValues);
5681 }
5682
5683 Value *
vectorizeTree(ExtraValueToDebugLocsMap & ExternallyUsedValues)5684 BoUpSLP::vectorizeTree(ExtraValueToDebugLocsMap &ExternallyUsedValues) {
5685 // All blocks must be scheduled before any instructions are inserted.
5686 for (auto &BSIter : BlocksSchedules) {
5687 scheduleBlock(BSIter.second.get());
5688 }
5689
5690 Builder.SetInsertPoint(&F->getEntryBlock().front());
5691 auto *VectorRoot = vectorizeTree(VectorizableTree[0].get());
5692
5693 // If the vectorized tree can be rewritten in a smaller type, we truncate the
5694 // vectorized root. InstCombine will then rewrite the entire expression. We
5695 // sign extend the extracted values below.
5696 auto *ScalarRoot = VectorizableTree[0]->Scalars[0];
5697 if (MinBWs.count(ScalarRoot)) {
5698 if (auto *I = dyn_cast<Instruction>(VectorRoot)) {
5699 // If current instr is a phi and not the last phi, insert it after the
5700 // last phi node.
5701 if (isa<PHINode>(I))
5702 Builder.SetInsertPoint(&*I->getParent()->getFirstInsertionPt());
5703 else
5704 Builder.SetInsertPoint(&*++BasicBlock::iterator(I));
5705 }
5706 auto BundleWidth = VectorizableTree[0]->Scalars.size();
5707 auto *MinTy = IntegerType::get(F->getContext(), MinBWs[ScalarRoot].first);
5708 auto *VecTy = FixedVectorType::get(MinTy, BundleWidth);
5709 auto *Trunc = Builder.CreateTrunc(VectorRoot, VecTy);
5710 VectorizableTree[0]->VectorizedValue = Trunc;
5711 }
5712
5713 LLVM_DEBUG(dbgs() << "SLP: Extracting " << ExternalUses.size()
5714 << " values .\n");
5715
5716 // Extract all of the elements with the external uses.
5717 for (const auto &ExternalUse : ExternalUses) {
5718 Value *Scalar = ExternalUse.Scalar;
5719 llvm::User *User = ExternalUse.User;
5720
5721 // Skip users that we already RAUW. This happens when one instruction
5722 // has multiple uses of the same value.
5723 if (User && !is_contained(Scalar->users(), User))
5724 continue;
5725 TreeEntry *E = getTreeEntry(Scalar);
5726 assert(E && "Invalid scalar");
5727 assert(E->State != TreeEntry::NeedToGather &&
5728 "Extracting from a gather list");
5729
5730 Value *Vec = E->VectorizedValue;
5731 assert(Vec && "Can't find vectorizable value");
5732
5733 Value *Lane = Builder.getInt32(ExternalUse.Lane);
5734 auto ExtractAndExtendIfNeeded = [&](Value *Vec) {
5735 if (Scalar->getType() != Vec->getType()) {
5736 Value *Ex;
5737 // "Reuse" the existing extract to improve final codegen.
5738 if (auto *ES = dyn_cast<ExtractElementInst>(Scalar)) {
5739 Ex = Builder.CreateExtractElement(ES->getOperand(0),
5740 ES->getOperand(1));
5741 } else {
5742 Ex = Builder.CreateExtractElement(Vec, Lane);
5743 }
5744 // If necessary, sign-extend or zero-extend ScalarRoot
5745 // to the larger type.
5746 if (!MinBWs.count(ScalarRoot))
5747 return Ex;
5748 if (MinBWs[ScalarRoot].second)
5749 return Builder.CreateSExt(Ex, Scalar->getType());
5750 return Builder.CreateZExt(Ex, Scalar->getType());
5751 }
5752 assert(isa<FixedVectorType>(Scalar->getType()) &&
5753 isa<InsertElementInst>(Scalar) &&
5754 "In-tree scalar of vector type is not insertelement?");
5755 return Vec;
5756 };
5757 // If User == nullptr, the Scalar is used as extra arg. Generate
5758 // ExtractElement instruction and update the record for this scalar in
5759 // ExternallyUsedValues.
5760 if (!User) {
5761 assert(ExternallyUsedValues.count(Scalar) &&
5762 "Scalar with nullptr as an external user must be registered in "
5763 "ExternallyUsedValues map");
5764 if (auto *VecI = dyn_cast<Instruction>(Vec)) {
5765 Builder.SetInsertPoint(VecI->getParent(),
5766 std::next(VecI->getIterator()));
5767 } else {
5768 Builder.SetInsertPoint(&F->getEntryBlock().front());
5769 }
5770 Value *NewInst = ExtractAndExtendIfNeeded(Vec);
5771 CSEBlocks.insert(cast<Instruction>(Scalar)->getParent());
5772 auto &NewInstLocs = ExternallyUsedValues[NewInst];
5773 auto It = ExternallyUsedValues.find(Scalar);
5774 assert(It != ExternallyUsedValues.end() &&
5775 "Externally used scalar is not found in ExternallyUsedValues");
5776 NewInstLocs.append(It->second);
5777 ExternallyUsedValues.erase(Scalar);
5778 // Required to update internally referenced instructions.
5779 Scalar->replaceAllUsesWith(NewInst);
5780 continue;
5781 }
5782
5783 // Generate extracts for out-of-tree users.
5784 // Find the insertion point for the extractelement lane.
5785 if (auto *VecI = dyn_cast<Instruction>(Vec)) {
5786 if (PHINode *PH = dyn_cast<PHINode>(User)) {
5787 for (int i = 0, e = PH->getNumIncomingValues(); i != e; ++i) {
5788 if (PH->getIncomingValue(i) == Scalar) {
5789 Instruction *IncomingTerminator =
5790 PH->getIncomingBlock(i)->getTerminator();
5791 if (isa<CatchSwitchInst>(IncomingTerminator)) {
5792 Builder.SetInsertPoint(VecI->getParent(),
5793 std::next(VecI->getIterator()));
5794 } else {
5795 Builder.SetInsertPoint(PH->getIncomingBlock(i)->getTerminator());
5796 }
5797 Value *NewInst = ExtractAndExtendIfNeeded(Vec);
5798 CSEBlocks.insert(PH->getIncomingBlock(i));
5799 PH->setOperand(i, NewInst);
5800 }
5801 }
5802 } else {
5803 Builder.SetInsertPoint(cast<Instruction>(User));
5804 Value *NewInst = ExtractAndExtendIfNeeded(Vec);
5805 CSEBlocks.insert(cast<Instruction>(User)->getParent());
5806 User->replaceUsesOfWith(Scalar, NewInst);
5807 }
5808 } else {
5809 Builder.SetInsertPoint(&F->getEntryBlock().front());
5810 Value *NewInst = ExtractAndExtendIfNeeded(Vec);
5811 CSEBlocks.insert(&F->getEntryBlock());
5812 User->replaceUsesOfWith(Scalar, NewInst);
5813 }
5814
5815 LLVM_DEBUG(dbgs() << "SLP: Replaced:" << *User << ".\n");
5816 }
5817
5818 // For each vectorized value:
5819 for (auto &TEPtr : VectorizableTree) {
5820 TreeEntry *Entry = TEPtr.get();
5821
5822 // No need to handle users of gathered values.
5823 if (Entry->State == TreeEntry::NeedToGather)
5824 continue;
5825
5826 assert(Entry->VectorizedValue && "Can't find vectorizable value");
5827
5828 // For each lane:
5829 for (int Lane = 0, LE = Entry->Scalars.size(); Lane != LE; ++Lane) {
5830 Value *Scalar = Entry->Scalars[Lane];
5831
5832 #ifndef NDEBUG
5833 Type *Ty = Scalar->getType();
5834 if (!Ty->isVoidTy()) {
5835 for (User *U : Scalar->users()) {
5836 LLVM_DEBUG(dbgs() << "SLP: \tvalidating user:" << *U << ".\n");
5837
5838 // It is legal to delete users in the ignorelist.
5839 assert((getTreeEntry(U) || is_contained(UserIgnoreList, U)) &&
5840 "Deleting out-of-tree value");
5841 }
5842 }
5843 #endif
5844 LLVM_DEBUG(dbgs() << "SLP: \tErasing scalar:" << *Scalar << ".\n");
5845 eraseInstruction(cast<Instruction>(Scalar));
5846 }
5847 }
5848
5849 Builder.ClearInsertionPoint();
5850 InstrElementSize.clear();
5851
5852 return VectorizableTree[0]->VectorizedValue;
5853 }
5854
optimizeGatherSequence()5855 void BoUpSLP::optimizeGatherSequence() {
5856 LLVM_DEBUG(dbgs() << "SLP: Optimizing " << GatherSeq.size()
5857 << " gather sequences instructions.\n");
5858 // LICM InsertElementInst sequences.
5859 for (Instruction *I : GatherSeq) {
5860 if (isDeleted(I))
5861 continue;
5862
5863 // Check if this block is inside a loop.
5864 Loop *L = LI->getLoopFor(I->getParent());
5865 if (!L)
5866 continue;
5867
5868 // Check if it has a preheader.
5869 BasicBlock *PreHeader = L->getLoopPreheader();
5870 if (!PreHeader)
5871 continue;
5872
5873 // If the vector or the element that we insert into it are
5874 // instructions that are defined in this basic block then we can't
5875 // hoist this instruction.
5876 auto *Op0 = dyn_cast<Instruction>(I->getOperand(0));
5877 auto *Op1 = dyn_cast<Instruction>(I->getOperand(1));
5878 if (Op0 && L->contains(Op0))
5879 continue;
5880 if (Op1 && L->contains(Op1))
5881 continue;
5882
5883 // We can hoist this instruction. Move it to the pre-header.
5884 I->moveBefore(PreHeader->getTerminator());
5885 }
5886
5887 // Make a list of all reachable blocks in our CSE queue.
5888 SmallVector<const DomTreeNode *, 8> CSEWorkList;
5889 CSEWorkList.reserve(CSEBlocks.size());
5890 for (BasicBlock *BB : CSEBlocks)
5891 if (DomTreeNode *N = DT->getNode(BB)) {
5892 assert(DT->isReachableFromEntry(N));
5893 CSEWorkList.push_back(N);
5894 }
5895
5896 // Sort blocks by domination. This ensures we visit a block after all blocks
5897 // dominating it are visited.
5898 llvm::sort(CSEWorkList, [](const DomTreeNode *A, const DomTreeNode *B) {
5899 assert((A == B) == (A->getDFSNumIn() == B->getDFSNumIn()) &&
5900 "Different nodes should have different DFS numbers");
5901 return A->getDFSNumIn() < B->getDFSNumIn();
5902 });
5903
5904 // Perform O(N^2) search over the gather sequences and merge identical
5905 // instructions. TODO: We can further optimize this scan if we split the
5906 // instructions into different buckets based on the insert lane.
5907 SmallVector<Instruction *, 16> Visited;
5908 for (auto I = CSEWorkList.begin(), E = CSEWorkList.end(); I != E; ++I) {
5909 assert(*I &&
5910 (I == CSEWorkList.begin() || !DT->dominates(*I, *std::prev(I))) &&
5911 "Worklist not sorted properly!");
5912 BasicBlock *BB = (*I)->getBlock();
5913 // For all instructions in blocks containing gather sequences:
5914 for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e;) {
5915 Instruction *In = &*it++;
5916 if (isDeleted(In))
5917 continue;
5918 if (!isa<InsertElementInst>(In) && !isa<ExtractElementInst>(In))
5919 continue;
5920
5921 // Check if we can replace this instruction with any of the
5922 // visited instructions.
5923 for (Instruction *v : Visited) {
5924 if (In->isIdenticalTo(v) &&
5925 DT->dominates(v->getParent(), In->getParent())) {
5926 In->replaceAllUsesWith(v);
5927 eraseInstruction(In);
5928 In = nullptr;
5929 break;
5930 }
5931 }
5932 if (In) {
5933 assert(!is_contained(Visited, In));
5934 Visited.push_back(In);
5935 }
5936 }
5937 }
5938 CSEBlocks.clear();
5939 GatherSeq.clear();
5940 }
5941
5942 // Groups the instructions to a bundle (which is then a single scheduling entity)
5943 // and schedules instructions until the bundle gets ready.
5944 Optional<BoUpSLP::ScheduleData *>
tryScheduleBundle(ArrayRef<Value * > VL,BoUpSLP * SLP,const InstructionsState & S)5945 BoUpSLP::BlockScheduling::tryScheduleBundle(ArrayRef<Value *> VL, BoUpSLP *SLP,
5946 const InstructionsState &S) {
5947 if (isa<PHINode>(S.OpValue) || isa<InsertElementInst>(S.OpValue))
5948 return nullptr;
5949
5950 // Initialize the instruction bundle.
5951 Instruction *OldScheduleEnd = ScheduleEnd;
5952 ScheduleData *PrevInBundle = nullptr;
5953 ScheduleData *Bundle = nullptr;
5954 bool ReSchedule = false;
5955 LLVM_DEBUG(dbgs() << "SLP: bundle: " << *S.OpValue << "\n");
5956
5957 auto &&TryScheduleBundle = [this, OldScheduleEnd, SLP](bool ReSchedule,
5958 ScheduleData *Bundle) {
5959 // The scheduling region got new instructions at the lower end (or it is a
5960 // new region for the first bundle). This makes it necessary to
5961 // recalculate all dependencies.
5962 // It is seldom that this needs to be done a second time after adding the
5963 // initial bundle to the region.
5964 if (ScheduleEnd != OldScheduleEnd) {
5965 for (auto *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode())
5966 doForAllOpcodes(I, [](ScheduleData *SD) { SD->clearDependencies(); });
5967 ReSchedule = true;
5968 }
5969 if (ReSchedule) {
5970 resetSchedule();
5971 initialFillReadyList(ReadyInsts);
5972 }
5973 if (Bundle) {
5974 LLVM_DEBUG(dbgs() << "SLP: try schedule bundle " << *Bundle
5975 << " in block " << BB->getName() << "\n");
5976 calculateDependencies(Bundle, /*InsertInReadyList=*/true, SLP);
5977 }
5978
5979 // Now try to schedule the new bundle or (if no bundle) just calculate
5980 // dependencies. As soon as the bundle is "ready" it means that there are no
5981 // cyclic dependencies and we can schedule it. Note that's important that we
5982 // don't "schedule" the bundle yet (see cancelScheduling).
5983 while (((!Bundle && ReSchedule) || (Bundle && !Bundle->isReady())) &&
5984 !ReadyInsts.empty()) {
5985 ScheduleData *Picked = ReadyInsts.pop_back_val();
5986 if (Picked->isSchedulingEntity() && Picked->isReady())
5987 schedule(Picked, ReadyInsts);
5988 }
5989 };
5990
5991 // Make sure that the scheduling region contains all
5992 // instructions of the bundle.
5993 for (Value *V : VL) {
5994 if (!extendSchedulingRegion(V, S)) {
5995 // If the scheduling region got new instructions at the lower end (or it
5996 // is a new region for the first bundle). This makes it necessary to
5997 // recalculate all dependencies.
5998 // Otherwise the compiler may crash trying to incorrectly calculate
5999 // dependencies and emit instruction in the wrong order at the actual
6000 // scheduling.
6001 TryScheduleBundle(/*ReSchedule=*/false, nullptr);
6002 return None;
6003 }
6004 }
6005
6006 for (Value *V : VL) {
6007 ScheduleData *BundleMember = getScheduleData(V);
6008 assert(BundleMember &&
6009 "no ScheduleData for bundle member (maybe not in same basic block)");
6010 if (BundleMember->IsScheduled) {
6011 // A bundle member was scheduled as single instruction before and now
6012 // needs to be scheduled as part of the bundle. We just get rid of the
6013 // existing schedule.
6014 LLVM_DEBUG(dbgs() << "SLP: reset schedule because " << *BundleMember
6015 << " was already scheduled\n");
6016 ReSchedule = true;
6017 }
6018 assert(BundleMember->isSchedulingEntity() &&
6019 "bundle member already part of other bundle");
6020 if (PrevInBundle) {
6021 PrevInBundle->NextInBundle = BundleMember;
6022 } else {
6023 Bundle = BundleMember;
6024 }
6025 BundleMember->UnscheduledDepsInBundle = 0;
6026 Bundle->UnscheduledDepsInBundle += BundleMember->UnscheduledDeps;
6027
6028 // Group the instructions to a bundle.
6029 BundleMember->FirstInBundle = Bundle;
6030 PrevInBundle = BundleMember;
6031 }
6032 assert(Bundle && "Failed to find schedule bundle");
6033 TryScheduleBundle(ReSchedule, Bundle);
6034 if (!Bundle->isReady()) {
6035 cancelScheduling(VL, S.OpValue);
6036 return None;
6037 }
6038 return Bundle;
6039 }
6040
cancelScheduling(ArrayRef<Value * > VL,Value * OpValue)6041 void BoUpSLP::BlockScheduling::cancelScheduling(ArrayRef<Value *> VL,
6042 Value *OpValue) {
6043 if (isa<PHINode>(OpValue) || isa<InsertElementInst>(OpValue))
6044 return;
6045
6046 ScheduleData *Bundle = getScheduleData(OpValue);
6047 LLVM_DEBUG(dbgs() << "SLP: cancel scheduling of " << *Bundle << "\n");
6048 assert(!Bundle->IsScheduled &&
6049 "Can't cancel bundle which is already scheduled");
6050 assert(Bundle->isSchedulingEntity() && Bundle->isPartOfBundle() &&
6051 "tried to unbundle something which is not a bundle");
6052
6053 // Un-bundle: make single instructions out of the bundle.
6054 ScheduleData *BundleMember = Bundle;
6055 while (BundleMember) {
6056 assert(BundleMember->FirstInBundle == Bundle && "corrupt bundle links");
6057 BundleMember->FirstInBundle = BundleMember;
6058 ScheduleData *Next = BundleMember->NextInBundle;
6059 BundleMember->NextInBundle = nullptr;
6060 BundleMember->UnscheduledDepsInBundle = BundleMember->UnscheduledDeps;
6061 if (BundleMember->UnscheduledDepsInBundle == 0) {
6062 ReadyInsts.insert(BundleMember);
6063 }
6064 BundleMember = Next;
6065 }
6066 }
6067
allocateScheduleDataChunks()6068 BoUpSLP::ScheduleData *BoUpSLP::BlockScheduling::allocateScheduleDataChunks() {
6069 // Allocate a new ScheduleData for the instruction.
6070 if (ChunkPos >= ChunkSize) {
6071 ScheduleDataChunks.push_back(std::make_unique<ScheduleData[]>(ChunkSize));
6072 ChunkPos = 0;
6073 }
6074 return &(ScheduleDataChunks.back()[ChunkPos++]);
6075 }
6076
extendSchedulingRegion(Value * V,const InstructionsState & S)6077 bool BoUpSLP::BlockScheduling::extendSchedulingRegion(Value *V,
6078 const InstructionsState &S) {
6079 if (getScheduleData(V, isOneOf(S, V)))
6080 return true;
6081 Instruction *I = dyn_cast<Instruction>(V);
6082 assert(I && "bundle member must be an instruction");
6083 assert(!isa<PHINode>(I) && !isa<InsertElementInst>(I) &&
6084 "phi nodes/insertelements don't need to be scheduled");
6085 auto &&CheckSheduleForI = [this, &S](Instruction *I) -> bool {
6086 ScheduleData *ISD = getScheduleData(I);
6087 if (!ISD)
6088 return false;
6089 assert(isInSchedulingRegion(ISD) &&
6090 "ScheduleData not in scheduling region");
6091 ScheduleData *SD = allocateScheduleDataChunks();
6092 SD->Inst = I;
6093 SD->init(SchedulingRegionID, S.OpValue);
6094 ExtraScheduleDataMap[I][S.OpValue] = SD;
6095 return true;
6096 };
6097 if (CheckSheduleForI(I))
6098 return true;
6099 if (!ScheduleStart) {
6100 // It's the first instruction in the new region.
6101 initScheduleData(I, I->getNextNode(), nullptr, nullptr);
6102 ScheduleStart = I;
6103 ScheduleEnd = I->getNextNode();
6104 if (isOneOf(S, I) != I)
6105 CheckSheduleForI(I);
6106 assert(ScheduleEnd && "tried to vectorize a terminator?");
6107 LLVM_DEBUG(dbgs() << "SLP: initialize schedule region to " << *I << "\n");
6108 return true;
6109 }
6110 // Search up and down at the same time, because we don't know if the new
6111 // instruction is above or below the existing scheduling region.
6112 BasicBlock::reverse_iterator UpIter =
6113 ++ScheduleStart->getIterator().getReverse();
6114 BasicBlock::reverse_iterator UpperEnd = BB->rend();
6115 BasicBlock::iterator DownIter = ScheduleEnd->getIterator();
6116 BasicBlock::iterator LowerEnd = BB->end();
6117 while (UpIter != UpperEnd && DownIter != LowerEnd && &*UpIter != I &&
6118 &*DownIter != I) {
6119 if (++ScheduleRegionSize > ScheduleRegionSizeLimit) {
6120 LLVM_DEBUG(dbgs() << "SLP: exceeded schedule region size limit\n");
6121 return false;
6122 }
6123
6124 ++UpIter;
6125 ++DownIter;
6126 }
6127 if (DownIter == LowerEnd || (UpIter != UpperEnd && &*UpIter == I)) {
6128 assert(I->getParent() == ScheduleStart->getParent() &&
6129 "Instruction is in wrong basic block.");
6130 initScheduleData(I, ScheduleStart, nullptr, FirstLoadStoreInRegion);
6131 ScheduleStart = I;
6132 if (isOneOf(S, I) != I)
6133 CheckSheduleForI(I);
6134 LLVM_DEBUG(dbgs() << "SLP: extend schedule region start to " << *I
6135 << "\n");
6136 return true;
6137 }
6138 assert((UpIter == UpperEnd || (DownIter != LowerEnd && &*DownIter == I)) &&
6139 "Expected to reach top of the basic block or instruction down the "
6140 "lower end.");
6141 assert(I->getParent() == ScheduleEnd->getParent() &&
6142 "Instruction is in wrong basic block.");
6143 initScheduleData(ScheduleEnd, I->getNextNode(), LastLoadStoreInRegion,
6144 nullptr);
6145 ScheduleEnd = I->getNextNode();
6146 if (isOneOf(S, I) != I)
6147 CheckSheduleForI(I);
6148 assert(ScheduleEnd && "tried to vectorize a terminator?");
6149 LLVM_DEBUG(dbgs() << "SLP: extend schedule region end to " << *I << "\n");
6150 return true;
6151 }
6152
initScheduleData(Instruction * FromI,Instruction * ToI,ScheduleData * PrevLoadStore,ScheduleData * NextLoadStore)6153 void BoUpSLP::BlockScheduling::initScheduleData(Instruction *FromI,
6154 Instruction *ToI,
6155 ScheduleData *PrevLoadStore,
6156 ScheduleData *NextLoadStore) {
6157 ScheduleData *CurrentLoadStore = PrevLoadStore;
6158 for (Instruction *I = FromI; I != ToI; I = I->getNextNode()) {
6159 ScheduleData *SD = ScheduleDataMap[I];
6160 if (!SD) {
6161 SD = allocateScheduleDataChunks();
6162 ScheduleDataMap[I] = SD;
6163 SD->Inst = I;
6164 }
6165 assert(!isInSchedulingRegion(SD) &&
6166 "new ScheduleData already in scheduling region");
6167 SD->init(SchedulingRegionID, I);
6168
6169 if (I->mayReadOrWriteMemory() &&
6170 (!isa<IntrinsicInst>(I) ||
6171 (cast<IntrinsicInst>(I)->getIntrinsicID() != Intrinsic::sideeffect &&
6172 cast<IntrinsicInst>(I)->getIntrinsicID() !=
6173 Intrinsic::pseudoprobe))) {
6174 // Update the linked list of memory accessing instructions.
6175 if (CurrentLoadStore) {
6176 CurrentLoadStore->NextLoadStore = SD;
6177 } else {
6178 FirstLoadStoreInRegion = SD;
6179 }
6180 CurrentLoadStore = SD;
6181 }
6182 }
6183 if (NextLoadStore) {
6184 if (CurrentLoadStore)
6185 CurrentLoadStore->NextLoadStore = NextLoadStore;
6186 } else {
6187 LastLoadStoreInRegion = CurrentLoadStore;
6188 }
6189 }
6190
calculateDependencies(ScheduleData * SD,bool InsertInReadyList,BoUpSLP * SLP)6191 void BoUpSLP::BlockScheduling::calculateDependencies(ScheduleData *SD,
6192 bool InsertInReadyList,
6193 BoUpSLP *SLP) {
6194 assert(SD->isSchedulingEntity());
6195
6196 SmallVector<ScheduleData *, 10> WorkList;
6197 WorkList.push_back(SD);
6198
6199 while (!WorkList.empty()) {
6200 ScheduleData *SD = WorkList.pop_back_val();
6201
6202 ScheduleData *BundleMember = SD;
6203 while (BundleMember) {
6204 assert(isInSchedulingRegion(BundleMember));
6205 if (!BundleMember->hasValidDependencies()) {
6206
6207 LLVM_DEBUG(dbgs() << "SLP: update deps of " << *BundleMember
6208 << "\n");
6209 BundleMember->Dependencies = 0;
6210 BundleMember->resetUnscheduledDeps();
6211
6212 // Handle def-use chain dependencies.
6213 if (BundleMember->OpValue != BundleMember->Inst) {
6214 ScheduleData *UseSD = getScheduleData(BundleMember->Inst);
6215 if (UseSD && isInSchedulingRegion(UseSD->FirstInBundle)) {
6216 BundleMember->Dependencies++;
6217 ScheduleData *DestBundle = UseSD->FirstInBundle;
6218 if (!DestBundle->IsScheduled)
6219 BundleMember->incrementUnscheduledDeps(1);
6220 if (!DestBundle->hasValidDependencies())
6221 WorkList.push_back(DestBundle);
6222 }
6223 } else {
6224 for (User *U : BundleMember->Inst->users()) {
6225 if (isa<Instruction>(U)) {
6226 ScheduleData *UseSD = getScheduleData(U);
6227 if (UseSD && isInSchedulingRegion(UseSD->FirstInBundle)) {
6228 BundleMember->Dependencies++;
6229 ScheduleData *DestBundle = UseSD->FirstInBundle;
6230 if (!DestBundle->IsScheduled)
6231 BundleMember->incrementUnscheduledDeps(1);
6232 if (!DestBundle->hasValidDependencies())
6233 WorkList.push_back(DestBundle);
6234 }
6235 } else {
6236 // I'm not sure if this can ever happen. But we need to be safe.
6237 // This lets the instruction/bundle never be scheduled and
6238 // eventually disable vectorization.
6239 BundleMember->Dependencies++;
6240 BundleMember->incrementUnscheduledDeps(1);
6241 }
6242 }
6243 }
6244
6245 // Handle the memory dependencies.
6246 ScheduleData *DepDest = BundleMember->NextLoadStore;
6247 if (DepDest) {
6248 Instruction *SrcInst = BundleMember->Inst;
6249 MemoryLocation SrcLoc = getLocation(SrcInst, SLP->AA);
6250 bool SrcMayWrite = BundleMember->Inst->mayWriteToMemory();
6251 unsigned numAliased = 0;
6252 unsigned DistToSrc = 1;
6253
6254 while (DepDest) {
6255 assert(isInSchedulingRegion(DepDest));
6256
6257 // We have two limits to reduce the complexity:
6258 // 1) AliasedCheckLimit: It's a small limit to reduce calls to
6259 // SLP->isAliased (which is the expensive part in this loop).
6260 // 2) MaxMemDepDistance: It's for very large blocks and it aborts
6261 // the whole loop (even if the loop is fast, it's quadratic).
6262 // It's important for the loop break condition (see below) to
6263 // check this limit even between two read-only instructions.
6264 if (DistToSrc >= MaxMemDepDistance ||
6265 ((SrcMayWrite || DepDest->Inst->mayWriteToMemory()) &&
6266 (numAliased >= AliasedCheckLimit ||
6267 SLP->isAliased(SrcLoc, SrcInst, DepDest->Inst)))) {
6268
6269 // We increment the counter only if the locations are aliased
6270 // (instead of counting all alias checks). This gives a better
6271 // balance between reduced runtime and accurate dependencies.
6272 numAliased++;
6273
6274 DepDest->MemoryDependencies.push_back(BundleMember);
6275 BundleMember->Dependencies++;
6276 ScheduleData *DestBundle = DepDest->FirstInBundle;
6277 if (!DestBundle->IsScheduled) {
6278 BundleMember->incrementUnscheduledDeps(1);
6279 }
6280 if (!DestBundle->hasValidDependencies()) {
6281 WorkList.push_back(DestBundle);
6282 }
6283 }
6284 DepDest = DepDest->NextLoadStore;
6285
6286 // Example, explaining the loop break condition: Let's assume our
6287 // starting instruction is i0 and MaxMemDepDistance = 3.
6288 //
6289 // +--------v--v--v
6290 // i0,i1,i2,i3,i4,i5,i6,i7,i8
6291 // +--------^--^--^
6292 //
6293 // MaxMemDepDistance let us stop alias-checking at i3 and we add
6294 // dependencies from i0 to i3,i4,.. (even if they are not aliased).
6295 // Previously we already added dependencies from i3 to i6,i7,i8
6296 // (because of MaxMemDepDistance). As we added a dependency from
6297 // i0 to i3, we have transitive dependencies from i0 to i6,i7,i8
6298 // and we can abort this loop at i6.
6299 if (DistToSrc >= 2 * MaxMemDepDistance)
6300 break;
6301 DistToSrc++;
6302 }
6303 }
6304 }
6305 BundleMember = BundleMember->NextInBundle;
6306 }
6307 if (InsertInReadyList && SD->isReady()) {
6308 ReadyInsts.push_back(SD);
6309 LLVM_DEBUG(dbgs() << "SLP: gets ready on update: " << *SD->Inst
6310 << "\n");
6311 }
6312 }
6313 }
6314
resetSchedule()6315 void BoUpSLP::BlockScheduling::resetSchedule() {
6316 assert(ScheduleStart &&
6317 "tried to reset schedule on block which has not been scheduled");
6318 for (Instruction *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) {
6319 doForAllOpcodes(I, [&](ScheduleData *SD) {
6320 assert(isInSchedulingRegion(SD) &&
6321 "ScheduleData not in scheduling region");
6322 SD->IsScheduled = false;
6323 SD->resetUnscheduledDeps();
6324 });
6325 }
6326 ReadyInsts.clear();
6327 }
6328
scheduleBlock(BlockScheduling * BS)6329 void BoUpSLP::scheduleBlock(BlockScheduling *BS) {
6330 if (!BS->ScheduleStart)
6331 return;
6332
6333 LLVM_DEBUG(dbgs() << "SLP: schedule block " << BS->BB->getName() << "\n");
6334
6335 BS->resetSchedule();
6336
6337 // For the real scheduling we use a more sophisticated ready-list: it is
6338 // sorted by the original instruction location. This lets the final schedule
6339 // be as close as possible to the original instruction order.
6340 struct ScheduleDataCompare {
6341 bool operator()(ScheduleData *SD1, ScheduleData *SD2) const {
6342 return SD2->SchedulingPriority < SD1->SchedulingPriority;
6343 }
6344 };
6345 std::set<ScheduleData *, ScheduleDataCompare> ReadyInsts;
6346
6347 // Ensure that all dependency data is updated and fill the ready-list with
6348 // initial instructions.
6349 int Idx = 0;
6350 int NumToSchedule = 0;
6351 for (auto *I = BS->ScheduleStart; I != BS->ScheduleEnd;
6352 I = I->getNextNode()) {
6353 BS->doForAllOpcodes(I, [this, &Idx, &NumToSchedule, BS](ScheduleData *SD) {
6354 assert((isa<InsertElementInst>(SD->Inst) ||
6355 SD->isPartOfBundle() == (getTreeEntry(SD->Inst) != nullptr)) &&
6356 "scheduler and vectorizer bundle mismatch");
6357 SD->FirstInBundle->SchedulingPriority = Idx++;
6358 if (SD->isSchedulingEntity()) {
6359 BS->calculateDependencies(SD, false, this);
6360 NumToSchedule++;
6361 }
6362 });
6363 }
6364 BS->initialFillReadyList(ReadyInsts);
6365
6366 Instruction *LastScheduledInst = BS->ScheduleEnd;
6367
6368 // Do the "real" scheduling.
6369 while (!ReadyInsts.empty()) {
6370 ScheduleData *picked = *ReadyInsts.begin();
6371 ReadyInsts.erase(ReadyInsts.begin());
6372
6373 // Move the scheduled instruction(s) to their dedicated places, if not
6374 // there yet.
6375 ScheduleData *BundleMember = picked;
6376 while (BundleMember) {
6377 Instruction *pickedInst = BundleMember->Inst;
6378 if (pickedInst->getNextNode() != LastScheduledInst) {
6379 BS->BB->getInstList().remove(pickedInst);
6380 BS->BB->getInstList().insert(LastScheduledInst->getIterator(),
6381 pickedInst);
6382 }
6383 LastScheduledInst = pickedInst;
6384 BundleMember = BundleMember->NextInBundle;
6385 }
6386
6387 BS->schedule(picked, ReadyInsts);
6388 NumToSchedule--;
6389 }
6390 assert(NumToSchedule == 0 && "could not schedule all instructions");
6391
6392 // Avoid duplicate scheduling of the block.
6393 BS->ScheduleStart = nullptr;
6394 }
6395
getVectorElementSize(Value * V)6396 unsigned BoUpSLP::getVectorElementSize(Value *V) {
6397 // If V is a store, just return the width of the stored value (or value
6398 // truncated just before storing) without traversing the expression tree.
6399 // This is the common case.
6400 if (auto *Store = dyn_cast<StoreInst>(V)) {
6401 if (auto *Trunc = dyn_cast<TruncInst>(Store->getValueOperand()))
6402 return DL->getTypeSizeInBits(Trunc->getSrcTy());
6403 return DL->getTypeSizeInBits(Store->getValueOperand()->getType());
6404 }
6405
6406 if (auto *IEI = dyn_cast<InsertElementInst>(V))
6407 return getVectorElementSize(IEI->getOperand(1));
6408
6409 auto E = InstrElementSize.find(V);
6410 if (E != InstrElementSize.end())
6411 return E->second;
6412
6413 // If V is not a store, we can traverse the expression tree to find loads
6414 // that feed it. The type of the loaded value may indicate a more suitable
6415 // width than V's type. We want to base the vector element size on the width
6416 // of memory operations where possible.
6417 SmallVector<std::pair<Instruction *, BasicBlock *>, 16> Worklist;
6418 SmallPtrSet<Instruction *, 16> Visited;
6419 if (auto *I = dyn_cast<Instruction>(V)) {
6420 Worklist.emplace_back(I, I->getParent());
6421 Visited.insert(I);
6422 }
6423
6424 // Traverse the expression tree in bottom-up order looking for loads. If we
6425 // encounter an instruction we don't yet handle, we give up.
6426 auto Width = 0u;
6427 while (!Worklist.empty()) {
6428 Instruction *I;
6429 BasicBlock *Parent;
6430 std::tie(I, Parent) = Worklist.pop_back_val();
6431
6432 // We should only be looking at scalar instructions here. If the current
6433 // instruction has a vector type, skip.
6434 auto *Ty = I->getType();
6435 if (isa<VectorType>(Ty))
6436 continue;
6437
6438 // If the current instruction is a load, update MaxWidth to reflect the
6439 // width of the loaded value.
6440 if (isa<LoadInst>(I) || isa<ExtractElementInst>(I) ||
6441 isa<ExtractValueInst>(I))
6442 Width = std::max<unsigned>(Width, DL->getTypeSizeInBits(Ty));
6443
6444 // Otherwise, we need to visit the operands of the instruction. We only
6445 // handle the interesting cases from buildTree here. If an operand is an
6446 // instruction we haven't yet visited and from the same basic block as the
6447 // user or the use is a PHI node, we add it to the worklist.
6448 else if (isa<PHINode>(I) || isa<CastInst>(I) || isa<GetElementPtrInst>(I) ||
6449 isa<CmpInst>(I) || isa<SelectInst>(I) || isa<BinaryOperator>(I) ||
6450 isa<UnaryOperator>(I)) {
6451 for (Use &U : I->operands())
6452 if (auto *J = dyn_cast<Instruction>(U.get()))
6453 if (Visited.insert(J).second &&
6454 (isa<PHINode>(I) || J->getParent() == Parent))
6455 Worklist.emplace_back(J, J->getParent());
6456 } else {
6457 break;
6458 }
6459 }
6460
6461 // If we didn't encounter a memory access in the expression tree, or if we
6462 // gave up for some reason, just return the width of V. Otherwise, return the
6463 // maximum width we found.
6464 if (!Width) {
6465 if (auto *CI = dyn_cast<CmpInst>(V))
6466 V = CI->getOperand(0);
6467 Width = DL->getTypeSizeInBits(V->getType());
6468 }
6469
6470 for (Instruction *I : Visited)
6471 InstrElementSize[I] = Width;
6472
6473 return Width;
6474 }
6475
6476 // Determine if a value V in a vectorizable expression Expr can be demoted to a
6477 // smaller type with a truncation. We collect the values that will be demoted
6478 // in ToDemote and additional roots that require investigating in Roots.
collectValuesToDemote(Value * V,SmallPtrSetImpl<Value * > & Expr,SmallVectorImpl<Value * > & ToDemote,SmallVectorImpl<Value * > & Roots)6479 static bool collectValuesToDemote(Value *V, SmallPtrSetImpl<Value *> &Expr,
6480 SmallVectorImpl<Value *> &ToDemote,
6481 SmallVectorImpl<Value *> &Roots) {
6482 // We can always demote constants.
6483 if (isa<Constant>(V)) {
6484 ToDemote.push_back(V);
6485 return true;
6486 }
6487
6488 // If the value is not an instruction in the expression with only one use, it
6489 // cannot be demoted.
6490 auto *I = dyn_cast<Instruction>(V);
6491 if (!I || !I->hasOneUse() || !Expr.count(I))
6492 return false;
6493
6494 switch (I->getOpcode()) {
6495
6496 // We can always demote truncations and extensions. Since truncations can
6497 // seed additional demotion, we save the truncated value.
6498 case Instruction::Trunc:
6499 Roots.push_back(I->getOperand(0));
6500 break;
6501 case Instruction::ZExt:
6502 case Instruction::SExt:
6503 if (isa<ExtractElementInst>(I->getOperand(0)) ||
6504 isa<InsertElementInst>(I->getOperand(0)))
6505 return false;
6506 break;
6507
6508 // We can demote certain binary operations if we can demote both of their
6509 // operands.
6510 case Instruction::Add:
6511 case Instruction::Sub:
6512 case Instruction::Mul:
6513 case Instruction::And:
6514 case Instruction::Or:
6515 case Instruction::Xor:
6516 if (!collectValuesToDemote(I->getOperand(0), Expr, ToDemote, Roots) ||
6517 !collectValuesToDemote(I->getOperand(1), Expr, ToDemote, Roots))
6518 return false;
6519 break;
6520
6521 // We can demote selects if we can demote their true and false values.
6522 case Instruction::Select: {
6523 SelectInst *SI = cast<SelectInst>(I);
6524 if (!collectValuesToDemote(SI->getTrueValue(), Expr, ToDemote, Roots) ||
6525 !collectValuesToDemote(SI->getFalseValue(), Expr, ToDemote, Roots))
6526 return false;
6527 break;
6528 }
6529
6530 // We can demote phis if we can demote all their incoming operands. Note that
6531 // we don't need to worry about cycles since we ensure single use above.
6532 case Instruction::PHI: {
6533 PHINode *PN = cast<PHINode>(I);
6534 for (Value *IncValue : PN->incoming_values())
6535 if (!collectValuesToDemote(IncValue, Expr, ToDemote, Roots))
6536 return false;
6537 break;
6538 }
6539
6540 // Otherwise, conservatively give up.
6541 default:
6542 return false;
6543 }
6544
6545 // Record the value that we can demote.
6546 ToDemote.push_back(V);
6547 return true;
6548 }
6549
computeMinimumValueSizes()6550 void BoUpSLP::computeMinimumValueSizes() {
6551 // If there are no external uses, the expression tree must be rooted by a
6552 // store. We can't demote in-memory values, so there is nothing to do here.
6553 if (ExternalUses.empty())
6554 return;
6555
6556 // We only attempt to truncate integer expressions.
6557 auto &TreeRoot = VectorizableTree[0]->Scalars;
6558 auto *TreeRootIT = dyn_cast<IntegerType>(TreeRoot[0]->getType());
6559 if (!TreeRootIT)
6560 return;
6561
6562 // If the expression is not rooted by a store, these roots should have
6563 // external uses. We will rely on InstCombine to rewrite the expression in
6564 // the narrower type. However, InstCombine only rewrites single-use values.
6565 // This means that if a tree entry other than a root is used externally, it
6566 // must have multiple uses and InstCombine will not rewrite it. The code
6567 // below ensures that only the roots are used externally.
6568 SmallPtrSet<Value *, 32> Expr(TreeRoot.begin(), TreeRoot.end());
6569 for (auto &EU : ExternalUses)
6570 if (!Expr.erase(EU.Scalar))
6571 return;
6572 if (!Expr.empty())
6573 return;
6574
6575 // Collect the scalar values of the vectorizable expression. We will use this
6576 // context to determine which values can be demoted. If we see a truncation,
6577 // we mark it as seeding another demotion.
6578 for (auto &EntryPtr : VectorizableTree)
6579 Expr.insert(EntryPtr->Scalars.begin(), EntryPtr->Scalars.end());
6580
6581 // Ensure the roots of the vectorizable tree don't form a cycle. They must
6582 // have a single external user that is not in the vectorizable tree.
6583 for (auto *Root : TreeRoot)
6584 if (!Root->hasOneUse() || Expr.count(*Root->user_begin()))
6585 return;
6586
6587 // Conservatively determine if we can actually truncate the roots of the
6588 // expression. Collect the values that can be demoted in ToDemote and
6589 // additional roots that require investigating in Roots.
6590 SmallVector<Value *, 32> ToDemote;
6591 SmallVector<Value *, 4> Roots;
6592 for (auto *Root : TreeRoot)
6593 if (!collectValuesToDemote(Root, Expr, ToDemote, Roots))
6594 return;
6595
6596 // The maximum bit width required to represent all the values that can be
6597 // demoted without loss of precision. It would be safe to truncate the roots
6598 // of the expression to this width.
6599 auto MaxBitWidth = 8u;
6600
6601 // We first check if all the bits of the roots are demanded. If they're not,
6602 // we can truncate the roots to this narrower type.
6603 for (auto *Root : TreeRoot) {
6604 auto Mask = DB->getDemandedBits(cast<Instruction>(Root));
6605 MaxBitWidth = std::max<unsigned>(
6606 Mask.getBitWidth() - Mask.countLeadingZeros(), MaxBitWidth);
6607 }
6608
6609 // True if the roots can be zero-extended back to their original type, rather
6610 // than sign-extended. We know that if the leading bits are not demanded, we
6611 // can safely zero-extend. So we initialize IsKnownPositive to True.
6612 bool IsKnownPositive = true;
6613
6614 // If all the bits of the roots are demanded, we can try a little harder to
6615 // compute a narrower type. This can happen, for example, if the roots are
6616 // getelementptr indices. InstCombine promotes these indices to the pointer
6617 // width. Thus, all their bits are technically demanded even though the
6618 // address computation might be vectorized in a smaller type.
6619 //
6620 // We start by looking at each entry that can be demoted. We compute the
6621 // maximum bit width required to store the scalar by using ValueTracking to
6622 // compute the number of high-order bits we can truncate.
6623 if (MaxBitWidth == DL->getTypeSizeInBits(TreeRoot[0]->getType()) &&
6624 llvm::all_of(TreeRoot, [](Value *R) {
6625 assert(R->hasOneUse() && "Root should have only one use!");
6626 return isa<GetElementPtrInst>(R->user_back());
6627 })) {
6628 MaxBitWidth = 8u;
6629
6630 // Determine if the sign bit of all the roots is known to be zero. If not,
6631 // IsKnownPositive is set to False.
6632 IsKnownPositive = llvm::all_of(TreeRoot, [&](Value *R) {
6633 KnownBits Known = computeKnownBits(R, *DL);
6634 return Known.isNonNegative();
6635 });
6636
6637 // Determine the maximum number of bits required to store the scalar
6638 // values.
6639 for (auto *Scalar : ToDemote) {
6640 auto NumSignBits = ComputeNumSignBits(Scalar, *DL, 0, AC, nullptr, DT);
6641 auto NumTypeBits = DL->getTypeSizeInBits(Scalar->getType());
6642 MaxBitWidth = std::max<unsigned>(NumTypeBits - NumSignBits, MaxBitWidth);
6643 }
6644
6645 // If we can't prove that the sign bit is zero, we must add one to the
6646 // maximum bit width to account for the unknown sign bit. This preserves
6647 // the existing sign bit so we can safely sign-extend the root back to the
6648 // original type. Otherwise, if we know the sign bit is zero, we will
6649 // zero-extend the root instead.
6650 //
6651 // FIXME: This is somewhat suboptimal, as there will be cases where adding
6652 // one to the maximum bit width will yield a larger-than-necessary
6653 // type. In general, we need to add an extra bit only if we can't
6654 // prove that the upper bit of the original type is equal to the
6655 // upper bit of the proposed smaller type. If these two bits are the
6656 // same (either zero or one) we know that sign-extending from the
6657 // smaller type will result in the same value. Here, since we can't
6658 // yet prove this, we are just making the proposed smaller type
6659 // larger to ensure correctness.
6660 if (!IsKnownPositive)
6661 ++MaxBitWidth;
6662 }
6663
6664 // Round MaxBitWidth up to the next power-of-two.
6665 if (!isPowerOf2_64(MaxBitWidth))
6666 MaxBitWidth = NextPowerOf2(MaxBitWidth);
6667
6668 // If the maximum bit width we compute is less than the with of the roots'
6669 // type, we can proceed with the narrowing. Otherwise, do nothing.
6670 if (MaxBitWidth >= TreeRootIT->getBitWidth())
6671 return;
6672
6673 // If we can truncate the root, we must collect additional values that might
6674 // be demoted as a result. That is, those seeded by truncations we will
6675 // modify.
6676 while (!Roots.empty())
6677 collectValuesToDemote(Roots.pop_back_val(), Expr, ToDemote, Roots);
6678
6679 // Finally, map the values we can demote to the maximum bit with we computed.
6680 for (auto *Scalar : ToDemote)
6681 MinBWs[Scalar] = std::make_pair(MaxBitWidth, !IsKnownPositive);
6682 }
6683
6684 namespace {
6685
6686 /// The SLPVectorizer Pass.
6687 struct SLPVectorizer : public FunctionPass {
6688 SLPVectorizerPass Impl;
6689
6690 /// Pass identification, replacement for typeid
6691 static char ID;
6692
SLPVectorizer__anon82cbe93a2511::SLPVectorizer6693 explicit SLPVectorizer() : FunctionPass(ID) {
6694 initializeSLPVectorizerPass(*PassRegistry::getPassRegistry());
6695 }
6696
doInitialization__anon82cbe93a2511::SLPVectorizer6697 bool doInitialization(Module &M) override {
6698 return false;
6699 }
6700
runOnFunction__anon82cbe93a2511::SLPVectorizer6701 bool runOnFunction(Function &F) override {
6702 if (skipFunction(F))
6703 return false;
6704
6705 auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
6706 auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
6707 auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
6708 auto *TLI = TLIP ? &TLIP->getTLI(F) : nullptr;
6709 auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
6710 auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
6711 auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
6712 auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
6713 auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits();
6714 auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE();
6715
6716 return Impl.runImpl(F, SE, TTI, TLI, AA, LI, DT, AC, DB, ORE);
6717 }
6718
getAnalysisUsage__anon82cbe93a2511::SLPVectorizer6719 void getAnalysisUsage(AnalysisUsage &AU) const override {
6720 FunctionPass::getAnalysisUsage(AU);
6721 AU.addRequired<AssumptionCacheTracker>();
6722 AU.addRequired<ScalarEvolutionWrapperPass>();
6723 AU.addRequired<AAResultsWrapperPass>();
6724 AU.addRequired<TargetTransformInfoWrapperPass>();
6725 AU.addRequired<LoopInfoWrapperPass>();
6726 AU.addRequired<DominatorTreeWrapperPass>();
6727 AU.addRequired<DemandedBitsWrapperPass>();
6728 AU.addRequired<OptimizationRemarkEmitterWrapperPass>();
6729 AU.addRequired<InjectTLIMappingsLegacy>();
6730 AU.addPreserved<LoopInfoWrapperPass>();
6731 AU.addPreserved<DominatorTreeWrapperPass>();
6732 AU.addPreserved<AAResultsWrapperPass>();
6733 AU.addPreserved<GlobalsAAWrapperPass>();
6734 AU.setPreservesCFG();
6735 }
6736 };
6737
6738 } // end anonymous namespace
6739
run(Function & F,FunctionAnalysisManager & AM)6740 PreservedAnalyses SLPVectorizerPass::run(Function &F, FunctionAnalysisManager &AM) {
6741 auto *SE = &AM.getResult<ScalarEvolutionAnalysis>(F);
6742 auto *TTI = &AM.getResult<TargetIRAnalysis>(F);
6743 auto *TLI = AM.getCachedResult<TargetLibraryAnalysis>(F);
6744 auto *AA = &AM.getResult<AAManager>(F);
6745 auto *LI = &AM.getResult<LoopAnalysis>(F);
6746 auto *DT = &AM.getResult<DominatorTreeAnalysis>(F);
6747 auto *AC = &AM.getResult<AssumptionAnalysis>(F);
6748 auto *DB = &AM.getResult<DemandedBitsAnalysis>(F);
6749 auto *ORE = &AM.getResult<OptimizationRemarkEmitterAnalysis>(F);
6750
6751 bool Changed = runImpl(F, SE, TTI, TLI, AA, LI, DT, AC, DB, ORE);
6752 if (!Changed)
6753 return PreservedAnalyses::all();
6754
6755 PreservedAnalyses PA;
6756 PA.preserveSet<CFGAnalyses>();
6757 return PA;
6758 }
6759
runImpl(Function & F,ScalarEvolution * SE_,TargetTransformInfo * TTI_,TargetLibraryInfo * TLI_,AAResults * AA_,LoopInfo * LI_,DominatorTree * DT_,AssumptionCache * AC_,DemandedBits * DB_,OptimizationRemarkEmitter * ORE_)6760 bool SLPVectorizerPass::runImpl(Function &F, ScalarEvolution *SE_,
6761 TargetTransformInfo *TTI_,
6762 TargetLibraryInfo *TLI_, AAResults *AA_,
6763 LoopInfo *LI_, DominatorTree *DT_,
6764 AssumptionCache *AC_, DemandedBits *DB_,
6765 OptimizationRemarkEmitter *ORE_) {
6766 if (!RunSLPVectorization)
6767 return false;
6768 SE = SE_;
6769 TTI = TTI_;
6770 TLI = TLI_;
6771 AA = AA_;
6772 LI = LI_;
6773 DT = DT_;
6774 AC = AC_;
6775 DB = DB_;
6776 DL = &F.getParent()->getDataLayout();
6777
6778 Stores.clear();
6779 GEPs.clear();
6780 bool Changed = false;
6781
6782 // If the target claims to have no vector registers don't attempt
6783 // vectorization.
6784 if (!TTI->getNumberOfRegisters(TTI->getRegisterClassForType(true)))
6785 return false;
6786
6787 // Don't vectorize when the attribute NoImplicitFloat is used.
6788 if (F.hasFnAttribute(Attribute::NoImplicitFloat))
6789 return false;
6790
6791 LLVM_DEBUG(dbgs() << "SLP: Analyzing blocks in " << F.getName() << ".\n");
6792
6793 // Use the bottom up slp vectorizer to construct chains that start with
6794 // store instructions.
6795 BoUpSLP R(&F, SE, TTI, TLI, AA, LI, DT, AC, DB, DL, ORE_);
6796
6797 // A general note: the vectorizer must use BoUpSLP::eraseInstruction() to
6798 // delete instructions.
6799
6800 // Update DFS numbers now so that we can use them for ordering.
6801 DT->updateDFSNumbers();
6802
6803 // Scan the blocks in the function in post order.
6804 for (auto BB : post_order(&F.getEntryBlock())) {
6805 collectSeedInstructions(BB);
6806
6807 // Vectorize trees that end at stores.
6808 if (!Stores.empty()) {
6809 LLVM_DEBUG(dbgs() << "SLP: Found stores for " << Stores.size()
6810 << " underlying objects.\n");
6811 Changed |= vectorizeStoreChains(R);
6812 }
6813
6814 // Vectorize trees that end at reductions.
6815 Changed |= vectorizeChainsInBlock(BB, R);
6816
6817 // Vectorize the index computations of getelementptr instructions. This
6818 // is primarily intended to catch gather-like idioms ending at
6819 // non-consecutive loads.
6820 if (!GEPs.empty()) {
6821 LLVM_DEBUG(dbgs() << "SLP: Found GEPs for " << GEPs.size()
6822 << " underlying objects.\n");
6823 Changed |= vectorizeGEPIndices(BB, R);
6824 }
6825 }
6826
6827 if (Changed) {
6828 R.optimizeGatherSequence();
6829 LLVM_DEBUG(dbgs() << "SLP: vectorized \"" << F.getName() << "\"\n");
6830 }
6831 return Changed;
6832 }
6833
6834 /// Order may have elements assigned special value (size) which is out of
6835 /// bounds. Such indices only appear on places which correspond to undef values
6836 /// (see canReuseExtract for details) and used in order to avoid undef values
6837 /// have effect on operands ordering.
6838 /// The first loop below simply finds all unused indices and then the next loop
6839 /// nest assigns these indices for undef values positions.
6840 /// As an example below Order has two undef positions and they have assigned
6841 /// values 3 and 7 respectively:
6842 /// before: 6 9 5 4 9 2 1 0
6843 /// after: 6 3 5 4 7 2 1 0
6844 /// \returns Fixed ordering.
fixupOrderingIndices(ArrayRef<unsigned> Order)6845 static BoUpSLP::OrdersType fixupOrderingIndices(ArrayRef<unsigned> Order) {
6846 BoUpSLP::OrdersType NewOrder(Order.begin(), Order.end());
6847 const unsigned Sz = NewOrder.size();
6848 SmallBitVector UsedIndices(Sz);
6849 SmallVector<int> MaskedIndices;
6850 for (int I = 0, E = NewOrder.size(); I < E; ++I) {
6851 if (NewOrder[I] < Sz)
6852 UsedIndices.set(NewOrder[I]);
6853 else
6854 MaskedIndices.push_back(I);
6855 }
6856 if (MaskedIndices.empty())
6857 return NewOrder;
6858 SmallVector<int> AvailableIndices(MaskedIndices.size());
6859 unsigned Cnt = 0;
6860 int Idx = UsedIndices.find_first();
6861 do {
6862 AvailableIndices[Cnt] = Idx;
6863 Idx = UsedIndices.find_next(Idx);
6864 ++Cnt;
6865 } while (Idx > 0);
6866 assert(Cnt == MaskedIndices.size() && "Non-synced masked/available indices.");
6867 for (int I = 0, E = MaskedIndices.size(); I < E; ++I)
6868 NewOrder[MaskedIndices[I]] = AvailableIndices[I];
6869 return NewOrder;
6870 }
6871
vectorizeStoreChain(ArrayRef<Value * > Chain,BoUpSLP & R,unsigned Idx)6872 bool SLPVectorizerPass::vectorizeStoreChain(ArrayRef<Value *> Chain, BoUpSLP &R,
6873 unsigned Idx) {
6874 LLVM_DEBUG(dbgs() << "SLP: Analyzing a store chain of length " << Chain.size()
6875 << "\n");
6876 const unsigned Sz = R.getVectorElementSize(Chain[0]);
6877 const unsigned MinVF = R.getMinVecRegSize() / Sz;
6878 unsigned VF = Chain.size();
6879
6880 if (!isPowerOf2_32(Sz) || !isPowerOf2_32(VF) || VF < 2 || VF < MinVF)
6881 return false;
6882
6883 LLVM_DEBUG(dbgs() << "SLP: Analyzing " << VF << " stores at offset " << Idx
6884 << "\n");
6885
6886 R.buildTree(Chain);
6887 Optional<ArrayRef<unsigned>> Order = R.bestOrder();
6888 // TODO: Handle orders of size less than number of elements in the vector.
6889 if (Order && Order->size() == Chain.size()) {
6890 // TODO: reorder tree nodes without tree rebuilding.
6891 SmallVector<Value *, 4> ReorderedOps(Chain.size());
6892 transform(fixupOrderingIndices(*Order), ReorderedOps.begin(),
6893 [Chain](const unsigned Idx) { return Chain[Idx]; });
6894 R.buildTree(ReorderedOps);
6895 }
6896 if (R.isTreeTinyAndNotFullyVectorizable())
6897 return false;
6898 if (R.isLoadCombineCandidate())
6899 return false;
6900
6901 R.computeMinimumValueSizes();
6902
6903 InstructionCost Cost = R.getTreeCost();
6904
6905 LLVM_DEBUG(dbgs() << "SLP: Found cost = " << Cost << " for VF =" << VF << "\n");
6906 if (Cost < -SLPCostThreshold) {
6907 LLVM_DEBUG(dbgs() << "SLP: Decided to vectorize cost = " << Cost << "\n");
6908
6909 using namespace ore;
6910
6911 R.getORE()->emit(OptimizationRemark(SV_NAME, "StoresVectorized",
6912 cast<StoreInst>(Chain[0]))
6913 << "Stores SLP vectorized with cost " << NV("Cost", Cost)
6914 << " and with tree size "
6915 << NV("TreeSize", R.getTreeSize()));
6916
6917 R.vectorizeTree();
6918 return true;
6919 }
6920
6921 return false;
6922 }
6923
vectorizeStores(ArrayRef<StoreInst * > Stores,BoUpSLP & R)6924 bool SLPVectorizerPass::vectorizeStores(ArrayRef<StoreInst *> Stores,
6925 BoUpSLP &R) {
6926 // We may run into multiple chains that merge into a single chain. We mark the
6927 // stores that we vectorized so that we don't visit the same store twice.
6928 BoUpSLP::ValueSet VectorizedStores;
6929 bool Changed = false;
6930
6931 int E = Stores.size();
6932 SmallBitVector Tails(E, false);
6933 int MaxIter = MaxStoreLookup.getValue();
6934 SmallVector<std::pair<int, int>, 16> ConsecutiveChain(
6935 E, std::make_pair(E, INT_MAX));
6936 SmallVector<SmallBitVector, 4> CheckedPairs(E, SmallBitVector(E, false));
6937 int IterCnt;
6938 auto &&FindConsecutiveAccess = [this, &Stores, &Tails, &IterCnt, MaxIter,
6939 &CheckedPairs,
6940 &ConsecutiveChain](int K, int Idx) {
6941 if (IterCnt >= MaxIter)
6942 return true;
6943 if (CheckedPairs[Idx].test(K))
6944 return ConsecutiveChain[K].second == 1 &&
6945 ConsecutiveChain[K].first == Idx;
6946 ++IterCnt;
6947 CheckedPairs[Idx].set(K);
6948 CheckedPairs[K].set(Idx);
6949 Optional<int> Diff = getPointersDiff(
6950 Stores[K]->getValueOperand()->getType(), Stores[K]->getPointerOperand(),
6951 Stores[Idx]->getValueOperand()->getType(),
6952 Stores[Idx]->getPointerOperand(), *DL, *SE, /*StrictCheck=*/true);
6953 if (!Diff || *Diff == 0)
6954 return false;
6955 int Val = *Diff;
6956 if (Val < 0) {
6957 if (ConsecutiveChain[Idx].second > -Val) {
6958 Tails.set(K);
6959 ConsecutiveChain[Idx] = std::make_pair(K, -Val);
6960 }
6961 return false;
6962 }
6963 if (ConsecutiveChain[K].second <= Val)
6964 return false;
6965
6966 Tails.set(Idx);
6967 ConsecutiveChain[K] = std::make_pair(Idx, Val);
6968 return Val == 1;
6969 };
6970 // Do a quadratic search on all of the given stores in reverse order and find
6971 // all of the pairs of stores that follow each other.
6972 for (int Idx = E - 1; Idx >= 0; --Idx) {
6973 // If a store has multiple consecutive store candidates, search according
6974 // to the sequence: Idx-1, Idx+1, Idx-2, Idx+2, ...
6975 // This is because usually pairing with immediate succeeding or preceding
6976 // candidate create the best chance to find slp vectorization opportunity.
6977 const int MaxLookDepth = std::max(E - Idx, Idx + 1);
6978 IterCnt = 0;
6979 for (int Offset = 1, F = MaxLookDepth; Offset < F; ++Offset)
6980 if ((Idx >= Offset && FindConsecutiveAccess(Idx - Offset, Idx)) ||
6981 (Idx + Offset < E && FindConsecutiveAccess(Idx + Offset, Idx)))
6982 break;
6983 }
6984
6985 // Tracks if we tried to vectorize stores starting from the given tail
6986 // already.
6987 SmallBitVector TriedTails(E, false);
6988 // For stores that start but don't end a link in the chain:
6989 for (int Cnt = E; Cnt > 0; --Cnt) {
6990 int I = Cnt - 1;
6991 if (ConsecutiveChain[I].first == E || Tails.test(I))
6992 continue;
6993 // We found a store instr that starts a chain. Now follow the chain and try
6994 // to vectorize it.
6995 BoUpSLP::ValueList Operands;
6996 // Collect the chain into a list.
6997 while (I != E && !VectorizedStores.count(Stores[I])) {
6998 Operands.push_back(Stores[I]);
6999 Tails.set(I);
7000 if (ConsecutiveChain[I].second != 1) {
7001 // Mark the new end in the chain and go back, if required. It might be
7002 // required if the original stores come in reversed order, for example.
7003 if (ConsecutiveChain[I].first != E &&
7004 Tails.test(ConsecutiveChain[I].first) && !TriedTails.test(I) &&
7005 !VectorizedStores.count(Stores[ConsecutiveChain[I].first])) {
7006 TriedTails.set(I);
7007 Tails.reset(ConsecutiveChain[I].first);
7008 if (Cnt < ConsecutiveChain[I].first + 2)
7009 Cnt = ConsecutiveChain[I].first + 2;
7010 }
7011 break;
7012 }
7013 // Move to the next value in the chain.
7014 I = ConsecutiveChain[I].first;
7015 }
7016 assert(!Operands.empty() && "Expected non-empty list of stores.");
7017
7018 unsigned MaxVecRegSize = R.getMaxVecRegSize();
7019 unsigned EltSize = R.getVectorElementSize(Operands[0]);
7020 unsigned MaxElts = llvm::PowerOf2Floor(MaxVecRegSize / EltSize);
7021
7022 unsigned MinVF = std::max(2U, R.getMinVecRegSize() / EltSize);
7023 unsigned MaxVF = std::min(R.getMaximumVF(EltSize, Instruction::Store),
7024 MaxElts);
7025
7026 // FIXME: Is division-by-2 the correct step? Should we assert that the
7027 // register size is a power-of-2?
7028 unsigned StartIdx = 0;
7029 for (unsigned Size = MaxVF; Size >= MinVF; Size /= 2) {
7030 for (unsigned Cnt = StartIdx, E = Operands.size(); Cnt + Size <= E;) {
7031 ArrayRef<Value *> Slice = makeArrayRef(Operands).slice(Cnt, Size);
7032 if (!VectorizedStores.count(Slice.front()) &&
7033 !VectorizedStores.count(Slice.back()) &&
7034 vectorizeStoreChain(Slice, R, Cnt)) {
7035 // Mark the vectorized stores so that we don't vectorize them again.
7036 VectorizedStores.insert(Slice.begin(), Slice.end());
7037 Changed = true;
7038 // If we vectorized initial block, no need to try to vectorize it
7039 // again.
7040 if (Cnt == StartIdx)
7041 StartIdx += Size;
7042 Cnt += Size;
7043 continue;
7044 }
7045 ++Cnt;
7046 }
7047 // Check if the whole array was vectorized already - exit.
7048 if (StartIdx >= Operands.size())
7049 break;
7050 }
7051 }
7052
7053 return Changed;
7054 }
7055
collectSeedInstructions(BasicBlock * BB)7056 void SLPVectorizerPass::collectSeedInstructions(BasicBlock *BB) {
7057 // Initialize the collections. We will make a single pass over the block.
7058 Stores.clear();
7059 GEPs.clear();
7060
7061 // Visit the store and getelementptr instructions in BB and organize them in
7062 // Stores and GEPs according to the underlying objects of their pointer
7063 // operands.
7064 for (Instruction &I : *BB) {
7065 // Ignore store instructions that are volatile or have a pointer operand
7066 // that doesn't point to a scalar type.
7067 if (auto *SI = dyn_cast<StoreInst>(&I)) {
7068 if (!SI->isSimple())
7069 continue;
7070 if (!isValidElementType(SI->getValueOperand()->getType()))
7071 continue;
7072 Stores[getUnderlyingObject(SI->getPointerOperand())].push_back(SI);
7073 }
7074
7075 // Ignore getelementptr instructions that have more than one index, a
7076 // constant index, or a pointer operand that doesn't point to a scalar
7077 // type.
7078 else if (auto *GEP = dyn_cast<GetElementPtrInst>(&I)) {
7079 auto Idx = GEP->idx_begin()->get();
7080 if (GEP->getNumIndices() > 1 || isa<Constant>(Idx))
7081 continue;
7082 if (!isValidElementType(Idx->getType()))
7083 continue;
7084 if (GEP->getType()->isVectorTy())
7085 continue;
7086 GEPs[GEP->getPointerOperand()].push_back(GEP);
7087 }
7088 }
7089 }
7090
tryToVectorizePair(Value * A,Value * B,BoUpSLP & R)7091 bool SLPVectorizerPass::tryToVectorizePair(Value *A, Value *B, BoUpSLP &R) {
7092 if (!A || !B)
7093 return false;
7094 Value *VL[] = {A, B};
7095 return tryToVectorizeList(VL, R, /*AllowReorder=*/true);
7096 }
7097
tryToVectorizeList(ArrayRef<Value * > VL,BoUpSLP & R,bool AllowReorder)7098 bool SLPVectorizerPass::tryToVectorizeList(ArrayRef<Value *> VL, BoUpSLP &R,
7099 bool AllowReorder) {
7100 if (VL.size() < 2)
7101 return false;
7102
7103 LLVM_DEBUG(dbgs() << "SLP: Trying to vectorize a list of length = "
7104 << VL.size() << ".\n");
7105
7106 // Check that all of the parts are instructions of the same type,
7107 // we permit an alternate opcode via InstructionsState.
7108 InstructionsState S = getSameOpcode(VL);
7109 if (!S.getOpcode())
7110 return false;
7111
7112 Instruction *I0 = cast<Instruction>(S.OpValue);
7113 // Make sure invalid types (including vector type) are rejected before
7114 // determining vectorization factor for scalar instructions.
7115 for (Value *V : VL) {
7116 Type *Ty = V->getType();
7117 if (!isa<InsertElementInst>(V) && !isValidElementType(Ty)) {
7118 // NOTE: the following will give user internal llvm type name, which may
7119 // not be useful.
7120 R.getORE()->emit([&]() {
7121 std::string type_str;
7122 llvm::raw_string_ostream rso(type_str);
7123 Ty->print(rso);
7124 return OptimizationRemarkMissed(SV_NAME, "UnsupportedType", I0)
7125 << "Cannot SLP vectorize list: type "
7126 << rso.str() + " is unsupported by vectorizer";
7127 });
7128 return false;
7129 }
7130 }
7131
7132 unsigned Sz = R.getVectorElementSize(I0);
7133 unsigned MinVF = std::max(2U, R.getMinVecRegSize() / Sz);
7134 unsigned MaxVF = std::max<unsigned>(PowerOf2Floor(VL.size()), MinVF);
7135 MaxVF = std::min(R.getMaximumVF(Sz, S.getOpcode()), MaxVF);
7136 if (MaxVF < 2) {
7137 R.getORE()->emit([&]() {
7138 return OptimizationRemarkMissed(SV_NAME, "SmallVF", I0)
7139 << "Cannot SLP vectorize list: vectorization factor "
7140 << "less than 2 is not supported";
7141 });
7142 return false;
7143 }
7144
7145 bool Changed = false;
7146 bool CandidateFound = false;
7147 InstructionCost MinCost = SLPCostThreshold.getValue();
7148 Type *ScalarTy = VL[0]->getType();
7149 if (auto *IE = dyn_cast<InsertElementInst>(VL[0]))
7150 ScalarTy = IE->getOperand(1)->getType();
7151
7152 unsigned NextInst = 0, MaxInst = VL.size();
7153 for (unsigned VF = MaxVF; NextInst + 1 < MaxInst && VF >= MinVF; VF /= 2) {
7154 // No actual vectorization should happen, if number of parts is the same as
7155 // provided vectorization factor (i.e. the scalar type is used for vector
7156 // code during codegen).
7157 auto *VecTy = FixedVectorType::get(ScalarTy, VF);
7158 if (TTI->getNumberOfParts(VecTy) == VF)
7159 continue;
7160 for (unsigned I = NextInst; I < MaxInst; ++I) {
7161 unsigned OpsWidth = 0;
7162
7163 if (I + VF > MaxInst)
7164 OpsWidth = MaxInst - I;
7165 else
7166 OpsWidth = VF;
7167
7168 if (!isPowerOf2_32(OpsWidth))
7169 continue;
7170
7171 if ((VF > MinVF && OpsWidth <= VF / 2) || (VF == MinVF && OpsWidth < 2))
7172 break;
7173
7174 ArrayRef<Value *> Ops = VL.slice(I, OpsWidth);
7175 // Check that a previous iteration of this loop did not delete the Value.
7176 if (llvm::any_of(Ops, [&R](Value *V) {
7177 auto *I = dyn_cast<Instruction>(V);
7178 return I && R.isDeleted(I);
7179 }))
7180 continue;
7181
7182 LLVM_DEBUG(dbgs() << "SLP: Analyzing " << OpsWidth << " operations "
7183 << "\n");
7184
7185 R.buildTree(Ops);
7186 if (AllowReorder) {
7187 Optional<ArrayRef<unsigned>> Order = R.bestOrder();
7188 if (Order) {
7189 // TODO: reorder tree nodes without tree rebuilding.
7190 SmallVector<Value *, 4> ReorderedOps(Ops.size());
7191 transform(fixupOrderingIndices(*Order), ReorderedOps.begin(),
7192 [Ops](const unsigned Idx) { return Ops[Idx]; });
7193 R.buildTree(ReorderedOps);
7194 }
7195 }
7196 if (R.isTreeTinyAndNotFullyVectorizable())
7197 continue;
7198
7199 R.computeMinimumValueSizes();
7200 InstructionCost Cost = R.getTreeCost();
7201 CandidateFound = true;
7202 MinCost = std::min(MinCost, Cost);
7203
7204 if (Cost < -SLPCostThreshold) {
7205 LLVM_DEBUG(dbgs() << "SLP: Vectorizing list at cost:" << Cost << ".\n");
7206 R.getORE()->emit(OptimizationRemark(SV_NAME, "VectorizedList",
7207 cast<Instruction>(Ops[0]))
7208 << "SLP vectorized with cost " << ore::NV("Cost", Cost)
7209 << " and with tree size "
7210 << ore::NV("TreeSize", R.getTreeSize()));
7211
7212 R.vectorizeTree();
7213 // Move to the next bundle.
7214 I += VF - 1;
7215 NextInst = I + 1;
7216 Changed = true;
7217 }
7218 }
7219 }
7220
7221 if (!Changed && CandidateFound) {
7222 R.getORE()->emit([&]() {
7223 return OptimizationRemarkMissed(SV_NAME, "NotBeneficial", I0)
7224 << "List vectorization was possible but not beneficial with cost "
7225 << ore::NV("Cost", MinCost) << " >= "
7226 << ore::NV("Treshold", -SLPCostThreshold);
7227 });
7228 } else if (!Changed) {
7229 R.getORE()->emit([&]() {
7230 return OptimizationRemarkMissed(SV_NAME, "NotPossible", I0)
7231 << "Cannot SLP vectorize list: vectorization was impossible"
7232 << " with available vectorization factors";
7233 });
7234 }
7235 return Changed;
7236 }
7237
tryToVectorize(Instruction * I,BoUpSLP & R)7238 bool SLPVectorizerPass::tryToVectorize(Instruction *I, BoUpSLP &R) {
7239 if (!I)
7240 return false;
7241
7242 if (!isa<BinaryOperator>(I) && !isa<CmpInst>(I))
7243 return false;
7244
7245 Value *P = I->getParent();
7246
7247 // Vectorize in current basic block only.
7248 auto *Op0 = dyn_cast<Instruction>(I->getOperand(0));
7249 auto *Op1 = dyn_cast<Instruction>(I->getOperand(1));
7250 if (!Op0 || !Op1 || Op0->getParent() != P || Op1->getParent() != P)
7251 return false;
7252
7253 // Try to vectorize V.
7254 if (tryToVectorizePair(Op0, Op1, R))
7255 return true;
7256
7257 auto *A = dyn_cast<BinaryOperator>(Op0);
7258 auto *B = dyn_cast<BinaryOperator>(Op1);
7259 // Try to skip B.
7260 if (B && B->hasOneUse()) {
7261 auto *B0 = dyn_cast<BinaryOperator>(B->getOperand(0));
7262 auto *B1 = dyn_cast<BinaryOperator>(B->getOperand(1));
7263 if (B0 && B0->getParent() == P && tryToVectorizePair(A, B0, R))
7264 return true;
7265 if (B1 && B1->getParent() == P && tryToVectorizePair(A, B1, R))
7266 return true;
7267 }
7268
7269 // Try to skip A.
7270 if (A && A->hasOneUse()) {
7271 auto *A0 = dyn_cast<BinaryOperator>(A->getOperand(0));
7272 auto *A1 = dyn_cast<BinaryOperator>(A->getOperand(1));
7273 if (A0 && A0->getParent() == P && tryToVectorizePair(A0, B, R))
7274 return true;
7275 if (A1 && A1->getParent() == P && tryToVectorizePair(A1, B, R))
7276 return true;
7277 }
7278 return false;
7279 }
7280
7281 namespace {
7282
7283 /// Model horizontal reductions.
7284 ///
7285 /// A horizontal reduction is a tree of reduction instructions that has values
7286 /// that can be put into a vector as its leaves. For example:
7287 ///
7288 /// mul mul mul mul
7289 /// \ / \ /
7290 /// + +
7291 /// \ /
7292 /// +
7293 /// This tree has "mul" as its leaf values and "+" as its reduction
7294 /// instructions. A reduction can feed into a store or a binary operation
7295 /// feeding a phi.
7296 /// ...
7297 /// \ /
7298 /// +
7299 /// |
7300 /// phi +=
7301 ///
7302 /// Or:
7303 /// ...
7304 /// \ /
7305 /// +
7306 /// |
7307 /// *p =
7308 ///
7309 class HorizontalReduction {
7310 using ReductionOpsType = SmallVector<Value *, 16>;
7311 using ReductionOpsListType = SmallVector<ReductionOpsType, 2>;
7312 ReductionOpsListType ReductionOps;
7313 SmallVector<Value *, 32> ReducedVals;
7314 // Use map vector to make stable output.
7315 MapVector<Instruction *, Value *> ExtraArgs;
7316 WeakTrackingVH ReductionRoot;
7317 /// The type of reduction operation.
7318 RecurKind RdxKind;
7319
7320 const unsigned INVALID_OPERAND_INDEX = std::numeric_limits<unsigned>::max();
7321
isCmpSelMinMax(Instruction * I)7322 static bool isCmpSelMinMax(Instruction *I) {
7323 return match(I, m_Select(m_Cmp(), m_Value(), m_Value())) &&
7324 RecurrenceDescriptor::isMinMaxRecurrenceKind(getRdxKind(I));
7325 }
7326
7327 // And/or are potentially poison-safe logical patterns like:
7328 // select x, y, false
7329 // select x, true, y
isBoolLogicOp(Instruction * I)7330 static bool isBoolLogicOp(Instruction *I) {
7331 return match(I, m_LogicalAnd(m_Value(), m_Value())) ||
7332 match(I, m_LogicalOr(m_Value(), m_Value()));
7333 }
7334
7335 /// Checks if instruction is associative and can be vectorized.
isVectorizable(RecurKind Kind,Instruction * I)7336 static bool isVectorizable(RecurKind Kind, Instruction *I) {
7337 if (Kind == RecurKind::None)
7338 return false;
7339
7340 // Integer ops that map to select instructions or intrinsics are fine.
7341 if (RecurrenceDescriptor::isIntMinMaxRecurrenceKind(Kind) ||
7342 isBoolLogicOp(I))
7343 return true;
7344
7345 if (Kind == RecurKind::FMax || Kind == RecurKind::FMin) {
7346 // FP min/max are associative except for NaN and -0.0. We do not
7347 // have to rule out -0.0 here because the intrinsic semantics do not
7348 // specify a fixed result for it.
7349 return I->getFastMathFlags().noNaNs();
7350 }
7351
7352 return I->isAssociative();
7353 }
7354
getRdxOperand(Instruction * I,unsigned Index)7355 static Value *getRdxOperand(Instruction *I, unsigned Index) {
7356 // Poison-safe 'or' takes the form: select X, true, Y
7357 // To make that work with the normal operand processing, we skip the
7358 // true value operand.
7359 // TODO: Change the code and data structures to handle this without a hack.
7360 if (getRdxKind(I) == RecurKind::Or && isa<SelectInst>(I) && Index == 1)
7361 return I->getOperand(2);
7362 return I->getOperand(Index);
7363 }
7364
7365 /// Checks if the ParentStackElem.first should be marked as a reduction
7366 /// operation with an extra argument or as extra argument itself.
markExtraArg(std::pair<Instruction *,unsigned> & ParentStackElem,Value * ExtraArg)7367 void markExtraArg(std::pair<Instruction *, unsigned> &ParentStackElem,
7368 Value *ExtraArg) {
7369 if (ExtraArgs.count(ParentStackElem.first)) {
7370 ExtraArgs[ParentStackElem.first] = nullptr;
7371 // We ran into something like:
7372 // ParentStackElem.first = ExtraArgs[ParentStackElem.first] + ExtraArg.
7373 // The whole ParentStackElem.first should be considered as an extra value
7374 // in this case.
7375 // Do not perform analysis of remaining operands of ParentStackElem.first
7376 // instruction, this whole instruction is an extra argument.
7377 ParentStackElem.second = INVALID_OPERAND_INDEX;
7378 } else {
7379 // We ran into something like:
7380 // ParentStackElem.first += ... + ExtraArg + ...
7381 ExtraArgs[ParentStackElem.first] = ExtraArg;
7382 }
7383 }
7384
7385 /// Creates reduction operation with the current opcode.
createOp(IRBuilder<> & Builder,RecurKind Kind,Value * LHS,Value * RHS,const Twine & Name,bool UseSelect)7386 static Value *createOp(IRBuilder<> &Builder, RecurKind Kind, Value *LHS,
7387 Value *RHS, const Twine &Name, bool UseSelect) {
7388 unsigned RdxOpcode = RecurrenceDescriptor::getOpcode(Kind);
7389 switch (Kind) {
7390 case RecurKind::Add:
7391 case RecurKind::Mul:
7392 case RecurKind::Or:
7393 case RecurKind::And:
7394 case RecurKind::Xor:
7395 case RecurKind::FAdd:
7396 case RecurKind::FMul:
7397 return Builder.CreateBinOp((Instruction::BinaryOps)RdxOpcode, LHS, RHS,
7398 Name);
7399 case RecurKind::FMax:
7400 return Builder.CreateBinaryIntrinsic(Intrinsic::maxnum, LHS, RHS);
7401 case RecurKind::FMin:
7402 return Builder.CreateBinaryIntrinsic(Intrinsic::minnum, LHS, RHS);
7403 case RecurKind::SMax:
7404 if (UseSelect) {
7405 Value *Cmp = Builder.CreateICmpSGT(LHS, RHS, Name);
7406 return Builder.CreateSelect(Cmp, LHS, RHS, Name);
7407 }
7408 return Builder.CreateBinaryIntrinsic(Intrinsic::smax, LHS, RHS);
7409 case RecurKind::SMin:
7410 if (UseSelect) {
7411 Value *Cmp = Builder.CreateICmpSLT(LHS, RHS, Name);
7412 return Builder.CreateSelect(Cmp, LHS, RHS, Name);
7413 }
7414 return Builder.CreateBinaryIntrinsic(Intrinsic::smin, LHS, RHS);
7415 case RecurKind::UMax:
7416 if (UseSelect) {
7417 Value *Cmp = Builder.CreateICmpUGT(LHS, RHS, Name);
7418 return Builder.CreateSelect(Cmp, LHS, RHS, Name);
7419 }
7420 return Builder.CreateBinaryIntrinsic(Intrinsic::umax, LHS, RHS);
7421 case RecurKind::UMin:
7422 if (UseSelect) {
7423 Value *Cmp = Builder.CreateICmpULT(LHS, RHS, Name);
7424 return Builder.CreateSelect(Cmp, LHS, RHS, Name);
7425 }
7426 return Builder.CreateBinaryIntrinsic(Intrinsic::umin, LHS, RHS);
7427 default:
7428 llvm_unreachable("Unknown reduction operation.");
7429 }
7430 }
7431
7432 /// Creates reduction operation with the current opcode with the IR flags
7433 /// from \p ReductionOps.
createOp(IRBuilder<> & Builder,RecurKind RdxKind,Value * LHS,Value * RHS,const Twine & Name,const ReductionOpsListType & ReductionOps)7434 static Value *createOp(IRBuilder<> &Builder, RecurKind RdxKind, Value *LHS,
7435 Value *RHS, const Twine &Name,
7436 const ReductionOpsListType &ReductionOps) {
7437 bool UseSelect = ReductionOps.size() == 2;
7438 assert((!UseSelect || isa<SelectInst>(ReductionOps[1][0])) &&
7439 "Expected cmp + select pairs for reduction");
7440 Value *Op = createOp(Builder, RdxKind, LHS, RHS, Name, UseSelect);
7441 if (RecurrenceDescriptor::isIntMinMaxRecurrenceKind(RdxKind)) {
7442 if (auto *Sel = dyn_cast<SelectInst>(Op)) {
7443 propagateIRFlags(Sel->getCondition(), ReductionOps[0]);
7444 propagateIRFlags(Op, ReductionOps[1]);
7445 return Op;
7446 }
7447 }
7448 propagateIRFlags(Op, ReductionOps[0]);
7449 return Op;
7450 }
7451
7452 /// Creates reduction operation with the current opcode with the IR flags
7453 /// from \p I.
createOp(IRBuilder<> & Builder,RecurKind RdxKind,Value * LHS,Value * RHS,const Twine & Name,Instruction * I)7454 static Value *createOp(IRBuilder<> &Builder, RecurKind RdxKind, Value *LHS,
7455 Value *RHS, const Twine &Name, Instruction *I) {
7456 auto *SelI = dyn_cast<SelectInst>(I);
7457 Value *Op = createOp(Builder, RdxKind, LHS, RHS, Name, SelI != nullptr);
7458 if (SelI && RecurrenceDescriptor::isIntMinMaxRecurrenceKind(RdxKind)) {
7459 if (auto *Sel = dyn_cast<SelectInst>(Op))
7460 propagateIRFlags(Sel->getCondition(), SelI->getCondition());
7461 }
7462 propagateIRFlags(Op, I);
7463 return Op;
7464 }
7465
getRdxKind(Instruction * I)7466 static RecurKind getRdxKind(Instruction *I) {
7467 assert(I && "Expected instruction for reduction matching");
7468 TargetTransformInfo::ReductionFlags RdxFlags;
7469 if (match(I, m_Add(m_Value(), m_Value())))
7470 return RecurKind::Add;
7471 if (match(I, m_Mul(m_Value(), m_Value())))
7472 return RecurKind::Mul;
7473 if (match(I, m_And(m_Value(), m_Value())) ||
7474 match(I, m_LogicalAnd(m_Value(), m_Value())))
7475 return RecurKind::And;
7476 if (match(I, m_Or(m_Value(), m_Value())) ||
7477 match(I, m_LogicalOr(m_Value(), m_Value())))
7478 return RecurKind::Or;
7479 if (match(I, m_Xor(m_Value(), m_Value())))
7480 return RecurKind::Xor;
7481 if (match(I, m_FAdd(m_Value(), m_Value())))
7482 return RecurKind::FAdd;
7483 if (match(I, m_FMul(m_Value(), m_Value())))
7484 return RecurKind::FMul;
7485
7486 if (match(I, m_Intrinsic<Intrinsic::maxnum>(m_Value(), m_Value())))
7487 return RecurKind::FMax;
7488 if (match(I, m_Intrinsic<Intrinsic::minnum>(m_Value(), m_Value())))
7489 return RecurKind::FMin;
7490
7491 // This matches either cmp+select or intrinsics. SLP is expected to handle
7492 // either form.
7493 // TODO: If we are canonicalizing to intrinsics, we can remove several
7494 // special-case paths that deal with selects.
7495 if (match(I, m_SMax(m_Value(), m_Value())))
7496 return RecurKind::SMax;
7497 if (match(I, m_SMin(m_Value(), m_Value())))
7498 return RecurKind::SMin;
7499 if (match(I, m_UMax(m_Value(), m_Value())))
7500 return RecurKind::UMax;
7501 if (match(I, m_UMin(m_Value(), m_Value())))
7502 return RecurKind::UMin;
7503
7504 if (auto *Select = dyn_cast<SelectInst>(I)) {
7505 // Try harder: look for min/max pattern based on instructions producing
7506 // same values such as: select ((cmp Inst1, Inst2), Inst1, Inst2).
7507 // During the intermediate stages of SLP, it's very common to have
7508 // pattern like this (since optimizeGatherSequence is run only once
7509 // at the end):
7510 // %1 = extractelement <2 x i32> %a, i32 0
7511 // %2 = extractelement <2 x i32> %a, i32 1
7512 // %cond = icmp sgt i32 %1, %2
7513 // %3 = extractelement <2 x i32> %a, i32 0
7514 // %4 = extractelement <2 x i32> %a, i32 1
7515 // %select = select i1 %cond, i32 %3, i32 %4
7516 CmpInst::Predicate Pred;
7517 Instruction *L1;
7518 Instruction *L2;
7519
7520 Value *LHS = Select->getTrueValue();
7521 Value *RHS = Select->getFalseValue();
7522 Value *Cond = Select->getCondition();
7523
7524 // TODO: Support inverse predicates.
7525 if (match(Cond, m_Cmp(Pred, m_Specific(LHS), m_Instruction(L2)))) {
7526 if (!isa<ExtractElementInst>(RHS) ||
7527 !L2->isIdenticalTo(cast<Instruction>(RHS)))
7528 return RecurKind::None;
7529 } else if (match(Cond, m_Cmp(Pred, m_Instruction(L1), m_Specific(RHS)))) {
7530 if (!isa<ExtractElementInst>(LHS) ||
7531 !L1->isIdenticalTo(cast<Instruction>(LHS)))
7532 return RecurKind::None;
7533 } else {
7534 if (!isa<ExtractElementInst>(LHS) || !isa<ExtractElementInst>(RHS))
7535 return RecurKind::None;
7536 if (!match(Cond, m_Cmp(Pred, m_Instruction(L1), m_Instruction(L2))) ||
7537 !L1->isIdenticalTo(cast<Instruction>(LHS)) ||
7538 !L2->isIdenticalTo(cast<Instruction>(RHS)))
7539 return RecurKind::None;
7540 }
7541
7542 TargetTransformInfo::ReductionFlags RdxFlags;
7543 switch (Pred) {
7544 default:
7545 return RecurKind::None;
7546 case CmpInst::ICMP_SGT:
7547 case CmpInst::ICMP_SGE:
7548 return RecurKind::SMax;
7549 case CmpInst::ICMP_SLT:
7550 case CmpInst::ICMP_SLE:
7551 return RecurKind::SMin;
7552 case CmpInst::ICMP_UGT:
7553 case CmpInst::ICMP_UGE:
7554 return RecurKind::UMax;
7555 case CmpInst::ICMP_ULT:
7556 case CmpInst::ICMP_ULE:
7557 return RecurKind::UMin;
7558 }
7559 }
7560 return RecurKind::None;
7561 }
7562
7563 /// Get the index of the first operand.
getFirstOperandIndex(Instruction * I)7564 static unsigned getFirstOperandIndex(Instruction *I) {
7565 return isCmpSelMinMax(I) ? 1 : 0;
7566 }
7567
7568 /// Total number of operands in the reduction operation.
getNumberOfOperands(Instruction * I)7569 static unsigned getNumberOfOperands(Instruction *I) {
7570 return isCmpSelMinMax(I) ? 3 : 2;
7571 }
7572
7573 /// Checks if the instruction is in basic block \p BB.
7574 /// For a cmp+sel min/max reduction check that both ops are in \p BB.
hasSameParent(Instruction * I,BasicBlock * BB)7575 static bool hasSameParent(Instruction *I, BasicBlock *BB) {
7576 if (isCmpSelMinMax(I)) {
7577 auto *Sel = cast<SelectInst>(I);
7578 auto *Cmp = cast<Instruction>(Sel->getCondition());
7579 return Sel->getParent() == BB && Cmp->getParent() == BB;
7580 }
7581 return I->getParent() == BB;
7582 }
7583
7584 /// Expected number of uses for reduction operations/reduced values.
hasRequiredNumberOfUses(bool IsCmpSelMinMax,Instruction * I)7585 static bool hasRequiredNumberOfUses(bool IsCmpSelMinMax, Instruction *I) {
7586 if (IsCmpSelMinMax) {
7587 // SelectInst must be used twice while the condition op must have single
7588 // use only.
7589 if (auto *Sel = dyn_cast<SelectInst>(I))
7590 return Sel->hasNUses(2) && Sel->getCondition()->hasOneUse();
7591 return I->hasNUses(2);
7592 }
7593
7594 // Arithmetic reduction operation must be used once only.
7595 return I->hasOneUse();
7596 }
7597
7598 /// Initializes the list of reduction operations.
initReductionOps(Instruction * I)7599 void initReductionOps(Instruction *I) {
7600 if (isCmpSelMinMax(I))
7601 ReductionOps.assign(2, ReductionOpsType());
7602 else
7603 ReductionOps.assign(1, ReductionOpsType());
7604 }
7605
7606 /// Add all reduction operations for the reduction instruction \p I.
addReductionOps(Instruction * I)7607 void addReductionOps(Instruction *I) {
7608 if (isCmpSelMinMax(I)) {
7609 ReductionOps[0].emplace_back(cast<SelectInst>(I)->getCondition());
7610 ReductionOps[1].emplace_back(I);
7611 } else {
7612 ReductionOps[0].emplace_back(I);
7613 }
7614 }
7615
getLHS(RecurKind Kind,Instruction * I)7616 static Value *getLHS(RecurKind Kind, Instruction *I) {
7617 if (Kind == RecurKind::None)
7618 return nullptr;
7619 return I->getOperand(getFirstOperandIndex(I));
7620 }
getRHS(RecurKind Kind,Instruction * I)7621 static Value *getRHS(RecurKind Kind, Instruction *I) {
7622 if (Kind == RecurKind::None)
7623 return nullptr;
7624 return I->getOperand(getFirstOperandIndex(I) + 1);
7625 }
7626
7627 public:
7628 HorizontalReduction() = default;
7629
7630 /// Try to find a reduction tree.
matchAssociativeReduction(PHINode * Phi,Instruction * Inst)7631 bool matchAssociativeReduction(PHINode *Phi, Instruction *Inst) {
7632 assert((!Phi || is_contained(Phi->operands(), Inst)) &&
7633 "Phi needs to use the binary operator");
7634 assert((isa<BinaryOperator>(Inst) || isa<SelectInst>(Inst) ||
7635 isa<IntrinsicInst>(Inst)) &&
7636 "Expected binop, select, or intrinsic for reduction matching");
7637 RdxKind = getRdxKind(Inst);
7638
7639 // We could have a initial reductions that is not an add.
7640 // r *= v1 + v2 + v3 + v4
7641 // In such a case start looking for a tree rooted in the first '+'.
7642 if (Phi) {
7643 if (getLHS(RdxKind, Inst) == Phi) {
7644 Phi = nullptr;
7645 Inst = dyn_cast<Instruction>(getRHS(RdxKind, Inst));
7646 if (!Inst)
7647 return false;
7648 RdxKind = getRdxKind(Inst);
7649 } else if (getRHS(RdxKind, Inst) == Phi) {
7650 Phi = nullptr;
7651 Inst = dyn_cast<Instruction>(getLHS(RdxKind, Inst));
7652 if (!Inst)
7653 return false;
7654 RdxKind = getRdxKind(Inst);
7655 }
7656 }
7657
7658 if (!isVectorizable(RdxKind, Inst))
7659 return false;
7660
7661 // Analyze "regular" integer/FP types for reductions - no target-specific
7662 // types or pointers.
7663 Type *Ty = Inst->getType();
7664 if (!isValidElementType(Ty) || Ty->isPointerTy())
7665 return false;
7666
7667 // Though the ultimate reduction may have multiple uses, its condition must
7668 // have only single use.
7669 if (auto *Sel = dyn_cast<SelectInst>(Inst))
7670 if (!Sel->getCondition()->hasOneUse())
7671 return false;
7672
7673 ReductionRoot = Inst;
7674
7675 // The opcode for leaf values that we perform a reduction on.
7676 // For example: load(x) + load(y) + load(z) + fptoui(w)
7677 // The leaf opcode for 'w' does not match, so we don't include it as a
7678 // potential candidate for the reduction.
7679 unsigned LeafOpcode = 0;
7680
7681 // Post-order traverse the reduction tree starting at Inst. We only handle
7682 // true trees containing binary operators or selects.
7683 SmallVector<std::pair<Instruction *, unsigned>, 32> Stack;
7684 Stack.push_back(std::make_pair(Inst, getFirstOperandIndex(Inst)));
7685 initReductionOps(Inst);
7686 while (!Stack.empty()) {
7687 Instruction *TreeN = Stack.back().first;
7688 unsigned EdgeToVisit = Stack.back().second++;
7689 const RecurKind TreeRdxKind = getRdxKind(TreeN);
7690 bool IsReducedValue = TreeRdxKind != RdxKind;
7691
7692 // Postorder visit.
7693 if (IsReducedValue || EdgeToVisit >= getNumberOfOperands(TreeN)) {
7694 if (IsReducedValue)
7695 ReducedVals.push_back(TreeN);
7696 else {
7697 auto ExtraArgsIter = ExtraArgs.find(TreeN);
7698 if (ExtraArgsIter != ExtraArgs.end() && !ExtraArgsIter->second) {
7699 // Check if TreeN is an extra argument of its parent operation.
7700 if (Stack.size() <= 1) {
7701 // TreeN can't be an extra argument as it is a root reduction
7702 // operation.
7703 return false;
7704 }
7705 // Yes, TreeN is an extra argument, do not add it to a list of
7706 // reduction operations.
7707 // Stack[Stack.size() - 2] always points to the parent operation.
7708 markExtraArg(Stack[Stack.size() - 2], TreeN);
7709 ExtraArgs.erase(TreeN);
7710 } else
7711 addReductionOps(TreeN);
7712 }
7713 // Retract.
7714 Stack.pop_back();
7715 continue;
7716 }
7717
7718 // Visit operands.
7719 Value *EdgeVal = getRdxOperand(TreeN, EdgeToVisit);
7720 auto *EdgeInst = dyn_cast<Instruction>(EdgeVal);
7721 if (!EdgeInst) {
7722 // Edge value is not a reduction instruction or a leaf instruction.
7723 // (It may be a constant, function argument, or something else.)
7724 markExtraArg(Stack.back(), EdgeVal);
7725 continue;
7726 }
7727 RecurKind EdgeRdxKind = getRdxKind(EdgeInst);
7728 // Continue analysis if the next operand is a reduction operation or
7729 // (possibly) a leaf value. If the leaf value opcode is not set,
7730 // the first met operation != reduction operation is considered as the
7731 // leaf opcode.
7732 // Only handle trees in the current basic block.
7733 // Each tree node needs to have minimal number of users except for the
7734 // ultimate reduction.
7735 const bool IsRdxInst = EdgeRdxKind == RdxKind;
7736 if (EdgeInst != Phi && EdgeInst != Inst &&
7737 hasSameParent(EdgeInst, Inst->getParent()) &&
7738 hasRequiredNumberOfUses(isCmpSelMinMax(Inst), EdgeInst) &&
7739 (!LeafOpcode || LeafOpcode == EdgeInst->getOpcode() || IsRdxInst)) {
7740 if (IsRdxInst) {
7741 // We need to be able to reassociate the reduction operations.
7742 if (!isVectorizable(EdgeRdxKind, EdgeInst)) {
7743 // I is an extra argument for TreeN (its parent operation).
7744 markExtraArg(Stack.back(), EdgeInst);
7745 continue;
7746 }
7747 } else if (!LeafOpcode) {
7748 LeafOpcode = EdgeInst->getOpcode();
7749 }
7750 Stack.push_back(
7751 std::make_pair(EdgeInst, getFirstOperandIndex(EdgeInst)));
7752 continue;
7753 }
7754 // I is an extra argument for TreeN (its parent operation).
7755 markExtraArg(Stack.back(), EdgeInst);
7756 }
7757 return true;
7758 }
7759
7760 /// Attempt to vectorize the tree found by matchAssociativeReduction.
tryToReduce(BoUpSLP & V,TargetTransformInfo * TTI)7761 bool tryToReduce(BoUpSLP &V, TargetTransformInfo *TTI) {
7762 // If there are a sufficient number of reduction values, reduce
7763 // to a nearby power-of-2. We can safely generate oversized
7764 // vectors and rely on the backend to split them to legal sizes.
7765 unsigned NumReducedVals = ReducedVals.size();
7766 if (NumReducedVals < 4)
7767 return false;
7768
7769 // Intersect the fast-math-flags from all reduction operations.
7770 FastMathFlags RdxFMF;
7771 RdxFMF.set();
7772 for (ReductionOpsType &RdxOp : ReductionOps) {
7773 for (Value *RdxVal : RdxOp) {
7774 if (auto *FPMO = dyn_cast<FPMathOperator>(RdxVal))
7775 RdxFMF &= FPMO->getFastMathFlags();
7776 }
7777 }
7778
7779 IRBuilder<> Builder(cast<Instruction>(ReductionRoot));
7780 Builder.setFastMathFlags(RdxFMF);
7781
7782 BoUpSLP::ExtraValueToDebugLocsMap ExternallyUsedValues;
7783 // The same extra argument may be used several times, so log each attempt
7784 // to use it.
7785 for (const std::pair<Instruction *, Value *> &Pair : ExtraArgs) {
7786 assert(Pair.first && "DebugLoc must be set.");
7787 ExternallyUsedValues[Pair.second].push_back(Pair.first);
7788 }
7789
7790 // The compare instruction of a min/max is the insertion point for new
7791 // instructions and may be replaced with a new compare instruction.
7792 auto getCmpForMinMaxReduction = [](Instruction *RdxRootInst) {
7793 assert(isa<SelectInst>(RdxRootInst) &&
7794 "Expected min/max reduction to have select root instruction");
7795 Value *ScalarCond = cast<SelectInst>(RdxRootInst)->getCondition();
7796 assert(isa<Instruction>(ScalarCond) &&
7797 "Expected min/max reduction to have compare condition");
7798 return cast<Instruction>(ScalarCond);
7799 };
7800
7801 // The reduction root is used as the insertion point for new instructions,
7802 // so set it as externally used to prevent it from being deleted.
7803 ExternallyUsedValues[ReductionRoot];
7804 SmallVector<Value *, 16> IgnoreList;
7805 for (ReductionOpsType &RdxOp : ReductionOps)
7806 IgnoreList.append(RdxOp.begin(), RdxOp.end());
7807
7808 unsigned ReduxWidth = PowerOf2Floor(NumReducedVals);
7809 if (NumReducedVals > ReduxWidth) {
7810 // In the loop below, we are building a tree based on a window of
7811 // 'ReduxWidth' values.
7812 // If the operands of those values have common traits (compare predicate,
7813 // constant operand, etc), then we want to group those together to
7814 // minimize the cost of the reduction.
7815
7816 // TODO: This should be extended to count common operands for
7817 // compares and binops.
7818
7819 // Step 1: Count the number of times each compare predicate occurs.
7820 SmallDenseMap<unsigned, unsigned> PredCountMap;
7821 for (Value *RdxVal : ReducedVals) {
7822 CmpInst::Predicate Pred;
7823 if (match(RdxVal, m_Cmp(Pred, m_Value(), m_Value())))
7824 ++PredCountMap[Pred];
7825 }
7826 // Step 2: Sort the values so the most common predicates come first.
7827 stable_sort(ReducedVals, [&PredCountMap](Value *A, Value *B) {
7828 CmpInst::Predicate PredA, PredB;
7829 if (match(A, m_Cmp(PredA, m_Value(), m_Value())) &&
7830 match(B, m_Cmp(PredB, m_Value(), m_Value()))) {
7831 return PredCountMap[PredA] > PredCountMap[PredB];
7832 }
7833 return false;
7834 });
7835 }
7836
7837 Value *VectorizedTree = nullptr;
7838 unsigned i = 0;
7839 while (i < NumReducedVals - ReduxWidth + 1 && ReduxWidth > 2) {
7840 ArrayRef<Value *> VL(&ReducedVals[i], ReduxWidth);
7841 V.buildTree(VL, ExternallyUsedValues, IgnoreList);
7842 Optional<ArrayRef<unsigned>> Order = V.bestOrder();
7843 if (Order) {
7844 assert(Order->size() == VL.size() &&
7845 "Order size must be the same as number of vectorized "
7846 "instructions.");
7847 // TODO: reorder tree nodes without tree rebuilding.
7848 SmallVector<Value *, 4> ReorderedOps(VL.size());
7849 transform(fixupOrderingIndices(*Order), ReorderedOps.begin(),
7850 [VL](const unsigned Idx) { return VL[Idx]; });
7851 V.buildTree(ReorderedOps, ExternallyUsedValues, IgnoreList);
7852 }
7853 if (V.isTreeTinyAndNotFullyVectorizable())
7854 break;
7855 if (V.isLoadCombineReductionCandidate(RdxKind))
7856 break;
7857
7858 // For a poison-safe boolean logic reduction, do not replace select
7859 // instructions with logic ops. All reduced values will be frozen (see
7860 // below) to prevent leaking poison.
7861 if (isa<SelectInst>(ReductionRoot) &&
7862 isBoolLogicOp(cast<Instruction>(ReductionRoot)) &&
7863 NumReducedVals != ReduxWidth)
7864 break;
7865
7866 V.computeMinimumValueSizes();
7867
7868 // Estimate cost.
7869 InstructionCost TreeCost =
7870 V.getTreeCost(makeArrayRef(&ReducedVals[i], ReduxWidth));
7871 InstructionCost ReductionCost =
7872 getReductionCost(TTI, ReducedVals[i], ReduxWidth, RdxFMF);
7873 InstructionCost Cost = TreeCost + ReductionCost;
7874 if (!Cost.isValid()) {
7875 LLVM_DEBUG(dbgs() << "Encountered invalid baseline cost.\n");
7876 return false;
7877 }
7878 if (Cost >= -SLPCostThreshold) {
7879 V.getORE()->emit([&]() {
7880 return OptimizationRemarkMissed(SV_NAME, "HorSLPNotBeneficial",
7881 cast<Instruction>(VL[0]))
7882 << "Vectorizing horizontal reduction is possible"
7883 << "but not beneficial with cost " << ore::NV("Cost", Cost)
7884 << " and threshold "
7885 << ore::NV("Threshold", -SLPCostThreshold);
7886 });
7887 break;
7888 }
7889
7890 LLVM_DEBUG(dbgs() << "SLP: Vectorizing horizontal reduction at cost:"
7891 << Cost << ". (HorRdx)\n");
7892 V.getORE()->emit([&]() {
7893 return OptimizationRemark(SV_NAME, "VectorizedHorizontalReduction",
7894 cast<Instruction>(VL[0]))
7895 << "Vectorized horizontal reduction with cost "
7896 << ore::NV("Cost", Cost) << " and with tree size "
7897 << ore::NV("TreeSize", V.getTreeSize());
7898 });
7899
7900 // Vectorize a tree.
7901 DebugLoc Loc = cast<Instruction>(ReducedVals[i])->getDebugLoc();
7902 Value *VectorizedRoot = V.vectorizeTree(ExternallyUsedValues);
7903
7904 // Emit a reduction. If the root is a select (min/max idiom), the insert
7905 // point is the compare condition of that select.
7906 Instruction *RdxRootInst = cast<Instruction>(ReductionRoot);
7907 if (isCmpSelMinMax(RdxRootInst))
7908 Builder.SetInsertPoint(getCmpForMinMaxReduction(RdxRootInst));
7909 else
7910 Builder.SetInsertPoint(RdxRootInst);
7911
7912 // To prevent poison from leaking across what used to be sequential, safe,
7913 // scalar boolean logic operations, the reduction operand must be frozen.
7914 if (isa<SelectInst>(RdxRootInst) && isBoolLogicOp(RdxRootInst))
7915 VectorizedRoot = Builder.CreateFreeze(VectorizedRoot);
7916
7917 Value *ReducedSubTree =
7918 emitReduction(VectorizedRoot, Builder, ReduxWidth, TTI);
7919
7920 if (!VectorizedTree) {
7921 // Initialize the final value in the reduction.
7922 VectorizedTree = ReducedSubTree;
7923 } else {
7924 // Update the final value in the reduction.
7925 Builder.SetCurrentDebugLocation(Loc);
7926 VectorizedTree = createOp(Builder, RdxKind, VectorizedTree,
7927 ReducedSubTree, "op.rdx", ReductionOps);
7928 }
7929 i += ReduxWidth;
7930 ReduxWidth = PowerOf2Floor(NumReducedVals - i);
7931 }
7932
7933 if (VectorizedTree) {
7934 // Finish the reduction.
7935 for (; i < NumReducedVals; ++i) {
7936 auto *I = cast<Instruction>(ReducedVals[i]);
7937 Builder.SetCurrentDebugLocation(I->getDebugLoc());
7938 VectorizedTree =
7939 createOp(Builder, RdxKind, VectorizedTree, I, "", ReductionOps);
7940 }
7941 for (auto &Pair : ExternallyUsedValues) {
7942 // Add each externally used value to the final reduction.
7943 for (auto *I : Pair.second) {
7944 Builder.SetCurrentDebugLocation(I->getDebugLoc());
7945 VectorizedTree = createOp(Builder, RdxKind, VectorizedTree,
7946 Pair.first, "op.extra", I);
7947 }
7948 }
7949
7950 ReductionRoot->replaceAllUsesWith(VectorizedTree);
7951
7952 // Mark all scalar reduction ops for deletion, they are replaced by the
7953 // vector reductions.
7954 V.eraseInstructions(IgnoreList);
7955 }
7956 return VectorizedTree != nullptr;
7957 }
7958
numReductionValues() const7959 unsigned numReductionValues() const { return ReducedVals.size(); }
7960
7961 private:
7962 /// Calculate the cost of a reduction.
getReductionCost(TargetTransformInfo * TTI,Value * FirstReducedVal,unsigned ReduxWidth,FastMathFlags FMF)7963 InstructionCost getReductionCost(TargetTransformInfo *TTI,
7964 Value *FirstReducedVal, unsigned ReduxWidth,
7965 FastMathFlags FMF) {
7966 Type *ScalarTy = FirstReducedVal->getType();
7967 FixedVectorType *VectorTy = FixedVectorType::get(ScalarTy, ReduxWidth);
7968 InstructionCost VectorCost, ScalarCost;
7969 switch (RdxKind) {
7970 case RecurKind::Add:
7971 case RecurKind::Mul:
7972 case RecurKind::Or:
7973 case RecurKind::And:
7974 case RecurKind::Xor:
7975 case RecurKind::FAdd:
7976 case RecurKind::FMul: {
7977 unsigned RdxOpcode = RecurrenceDescriptor::getOpcode(RdxKind);
7978 VectorCost = TTI->getArithmeticReductionCost(RdxOpcode, VectorTy, FMF);
7979 ScalarCost = TTI->getArithmeticInstrCost(RdxOpcode, ScalarTy);
7980 break;
7981 }
7982 case RecurKind::FMax:
7983 case RecurKind::FMin: {
7984 auto *VecCondTy = cast<VectorType>(CmpInst::makeCmpResultType(VectorTy));
7985 VectorCost = TTI->getMinMaxReductionCost(VectorTy, VecCondTy,
7986 /*unsigned=*/false);
7987 ScalarCost =
7988 TTI->getCmpSelInstrCost(Instruction::FCmp, ScalarTy) +
7989 TTI->getCmpSelInstrCost(Instruction::Select, ScalarTy,
7990 CmpInst::makeCmpResultType(ScalarTy));
7991 break;
7992 }
7993 case RecurKind::SMax:
7994 case RecurKind::SMin:
7995 case RecurKind::UMax:
7996 case RecurKind::UMin: {
7997 auto *VecCondTy = cast<VectorType>(CmpInst::makeCmpResultType(VectorTy));
7998 bool IsUnsigned =
7999 RdxKind == RecurKind::UMax || RdxKind == RecurKind::UMin;
8000 VectorCost = TTI->getMinMaxReductionCost(VectorTy, VecCondTy, IsUnsigned);
8001 ScalarCost =
8002 TTI->getCmpSelInstrCost(Instruction::ICmp, ScalarTy) +
8003 TTI->getCmpSelInstrCost(Instruction::Select, ScalarTy,
8004 CmpInst::makeCmpResultType(ScalarTy));
8005 break;
8006 }
8007 default:
8008 llvm_unreachable("Expected arithmetic or min/max reduction operation");
8009 }
8010
8011 // Scalar cost is repeated for N-1 elements.
8012 ScalarCost *= (ReduxWidth - 1);
8013 LLVM_DEBUG(dbgs() << "SLP: Adding cost " << VectorCost - ScalarCost
8014 << " for reduction that starts with " << *FirstReducedVal
8015 << " (It is a splitting reduction)\n");
8016 return VectorCost - ScalarCost;
8017 }
8018
8019 /// Emit a horizontal reduction of the vectorized value.
emitReduction(Value * VectorizedValue,IRBuilder<> & Builder,unsigned ReduxWidth,const TargetTransformInfo * TTI)8020 Value *emitReduction(Value *VectorizedValue, IRBuilder<> &Builder,
8021 unsigned ReduxWidth, const TargetTransformInfo *TTI) {
8022 assert(VectorizedValue && "Need to have a vectorized tree node");
8023 assert(isPowerOf2_32(ReduxWidth) &&
8024 "We only handle power-of-two reductions for now");
8025
8026 return createSimpleTargetReduction(Builder, TTI, VectorizedValue, RdxKind,
8027 ReductionOps.back());
8028 }
8029 };
8030
8031 } // end anonymous namespace
8032
getAggregateSize(Instruction * InsertInst)8033 static Optional<unsigned> getAggregateSize(Instruction *InsertInst) {
8034 if (auto *IE = dyn_cast<InsertElementInst>(InsertInst))
8035 return cast<FixedVectorType>(IE->getType())->getNumElements();
8036
8037 unsigned AggregateSize = 1;
8038 auto *IV = cast<InsertValueInst>(InsertInst);
8039 Type *CurrentType = IV->getType();
8040 do {
8041 if (auto *ST = dyn_cast<StructType>(CurrentType)) {
8042 for (auto *Elt : ST->elements())
8043 if (Elt != ST->getElementType(0)) // check homogeneity
8044 return None;
8045 AggregateSize *= ST->getNumElements();
8046 CurrentType = ST->getElementType(0);
8047 } else if (auto *AT = dyn_cast<ArrayType>(CurrentType)) {
8048 AggregateSize *= AT->getNumElements();
8049 CurrentType = AT->getElementType();
8050 } else if (auto *VT = dyn_cast<FixedVectorType>(CurrentType)) {
8051 AggregateSize *= VT->getNumElements();
8052 return AggregateSize;
8053 } else if (CurrentType->isSingleValueType()) {
8054 return AggregateSize;
8055 } else {
8056 return None;
8057 }
8058 } while (true);
8059 }
8060
findBuildAggregate_rec(Instruction * LastInsertInst,TargetTransformInfo * TTI,SmallVectorImpl<Value * > & BuildVectorOpds,SmallVectorImpl<Value * > & InsertElts,unsigned OperandOffset)8061 static bool findBuildAggregate_rec(Instruction *LastInsertInst,
8062 TargetTransformInfo *TTI,
8063 SmallVectorImpl<Value *> &BuildVectorOpds,
8064 SmallVectorImpl<Value *> &InsertElts,
8065 unsigned OperandOffset) {
8066 do {
8067 Value *InsertedOperand = LastInsertInst->getOperand(1);
8068 Optional<int> OperandIndex = getInsertIndex(LastInsertInst, OperandOffset);
8069 if (!OperandIndex)
8070 return false;
8071 if (isa<InsertElementInst>(InsertedOperand) ||
8072 isa<InsertValueInst>(InsertedOperand)) {
8073 if (!findBuildAggregate_rec(cast<Instruction>(InsertedOperand), TTI,
8074 BuildVectorOpds, InsertElts, *OperandIndex))
8075 return false;
8076 } else {
8077 BuildVectorOpds[*OperandIndex] = InsertedOperand;
8078 InsertElts[*OperandIndex] = LastInsertInst;
8079 }
8080 LastInsertInst = dyn_cast<Instruction>(LastInsertInst->getOperand(0));
8081 } while (LastInsertInst != nullptr &&
8082 (isa<InsertValueInst>(LastInsertInst) ||
8083 isa<InsertElementInst>(LastInsertInst)) &&
8084 LastInsertInst->hasOneUse());
8085 return true;
8086 }
8087
8088 /// Recognize construction of vectors like
8089 /// %ra = insertelement <4 x float> poison, float %s0, i32 0
8090 /// %rb = insertelement <4 x float> %ra, float %s1, i32 1
8091 /// %rc = insertelement <4 x float> %rb, float %s2, i32 2
8092 /// %rd = insertelement <4 x float> %rc, float %s3, i32 3
8093 /// starting from the last insertelement or insertvalue instruction.
8094 ///
8095 /// Also recognize homogeneous aggregates like {<2 x float>, <2 x float>},
8096 /// {{float, float}, {float, float}}, [2 x {float, float}] and so on.
8097 /// See llvm/test/Transforms/SLPVectorizer/X86/pr42022.ll for examples.
8098 ///
8099 /// Assume LastInsertInst is of InsertElementInst or InsertValueInst type.
8100 ///
8101 /// \return true if it matches.
findBuildAggregate(Instruction * LastInsertInst,TargetTransformInfo * TTI,SmallVectorImpl<Value * > & BuildVectorOpds,SmallVectorImpl<Value * > & InsertElts)8102 static bool findBuildAggregate(Instruction *LastInsertInst,
8103 TargetTransformInfo *TTI,
8104 SmallVectorImpl<Value *> &BuildVectorOpds,
8105 SmallVectorImpl<Value *> &InsertElts) {
8106
8107 assert((isa<InsertElementInst>(LastInsertInst) ||
8108 isa<InsertValueInst>(LastInsertInst)) &&
8109 "Expected insertelement or insertvalue instruction!");
8110
8111 assert((BuildVectorOpds.empty() && InsertElts.empty()) &&
8112 "Expected empty result vectors!");
8113
8114 Optional<unsigned> AggregateSize = getAggregateSize(LastInsertInst);
8115 if (!AggregateSize)
8116 return false;
8117 BuildVectorOpds.resize(*AggregateSize);
8118 InsertElts.resize(*AggregateSize);
8119
8120 if (findBuildAggregate_rec(LastInsertInst, TTI, BuildVectorOpds, InsertElts,
8121 0)) {
8122 llvm::erase_value(BuildVectorOpds, nullptr);
8123 llvm::erase_value(InsertElts, nullptr);
8124 if (BuildVectorOpds.size() >= 2)
8125 return true;
8126 }
8127
8128 return false;
8129 }
8130
8131 /// Try and get a reduction value from a phi node.
8132 ///
8133 /// Given a phi node \p P in a block \p ParentBB, consider possible reductions
8134 /// if they come from either \p ParentBB or a containing loop latch.
8135 ///
8136 /// \returns A candidate reduction value if possible, or \code nullptr \endcode
8137 /// if not possible.
getReductionValue(const DominatorTree * DT,PHINode * P,BasicBlock * ParentBB,LoopInfo * LI)8138 static Value *getReductionValue(const DominatorTree *DT, PHINode *P,
8139 BasicBlock *ParentBB, LoopInfo *LI) {
8140 // There are situations where the reduction value is not dominated by the
8141 // reduction phi. Vectorizing such cases has been reported to cause
8142 // miscompiles. See PR25787.
8143 auto DominatedReduxValue = [&](Value *R) {
8144 return isa<Instruction>(R) &&
8145 DT->dominates(P->getParent(), cast<Instruction>(R)->getParent());
8146 };
8147
8148 Value *Rdx = nullptr;
8149
8150 // Return the incoming value if it comes from the same BB as the phi node.
8151 if (P->getIncomingBlock(0) == ParentBB) {
8152 Rdx = P->getIncomingValue(0);
8153 } else if (P->getIncomingBlock(1) == ParentBB) {
8154 Rdx = P->getIncomingValue(1);
8155 }
8156
8157 if (Rdx && DominatedReduxValue(Rdx))
8158 return Rdx;
8159
8160 // Otherwise, check whether we have a loop latch to look at.
8161 Loop *BBL = LI->getLoopFor(ParentBB);
8162 if (!BBL)
8163 return nullptr;
8164 BasicBlock *BBLatch = BBL->getLoopLatch();
8165 if (!BBLatch)
8166 return nullptr;
8167
8168 // There is a loop latch, return the incoming value if it comes from
8169 // that. This reduction pattern occasionally turns up.
8170 if (P->getIncomingBlock(0) == BBLatch) {
8171 Rdx = P->getIncomingValue(0);
8172 } else if (P->getIncomingBlock(1) == BBLatch) {
8173 Rdx = P->getIncomingValue(1);
8174 }
8175
8176 if (Rdx && DominatedReduxValue(Rdx))
8177 return Rdx;
8178
8179 return nullptr;
8180 }
8181
matchRdxBop(Instruction * I,Value * & V0,Value * & V1)8182 static bool matchRdxBop(Instruction *I, Value *&V0, Value *&V1) {
8183 if (match(I, m_BinOp(m_Value(V0), m_Value(V1))))
8184 return true;
8185 if (match(I, m_Intrinsic<Intrinsic::maxnum>(m_Value(V0), m_Value(V1))))
8186 return true;
8187 if (match(I, m_Intrinsic<Intrinsic::minnum>(m_Value(V0), m_Value(V1))))
8188 return true;
8189 if (match(I, m_Intrinsic<Intrinsic::smax>(m_Value(V0), m_Value(V1))))
8190 return true;
8191 if (match(I, m_Intrinsic<Intrinsic::smin>(m_Value(V0), m_Value(V1))))
8192 return true;
8193 if (match(I, m_Intrinsic<Intrinsic::umax>(m_Value(V0), m_Value(V1))))
8194 return true;
8195 if (match(I, m_Intrinsic<Intrinsic::umin>(m_Value(V0), m_Value(V1))))
8196 return true;
8197 return false;
8198 }
8199
8200 /// Attempt to reduce a horizontal reduction.
8201 /// If it is legal to match a horizontal reduction feeding the phi node \a P
8202 /// with reduction operators \a Root (or one of its operands) in a basic block
8203 /// \a BB, then check if it can be done. If horizontal reduction is not found
8204 /// and root instruction is a binary operation, vectorization of the operands is
8205 /// attempted.
8206 /// \returns true if a horizontal reduction was matched and reduced or operands
8207 /// of one of the binary instruction were vectorized.
8208 /// \returns false if a horizontal reduction was not matched (or not possible)
8209 /// or no vectorization of any binary operation feeding \a Root instruction was
8210 /// performed.
tryToVectorizeHorReductionOrInstOperands(PHINode * P,Instruction * Root,BasicBlock * BB,BoUpSLP & R,TargetTransformInfo * TTI,const function_ref<bool (Instruction *,BoUpSLP &)> Vectorize)8211 static bool tryToVectorizeHorReductionOrInstOperands(
8212 PHINode *P, Instruction *Root, BasicBlock *BB, BoUpSLP &R,
8213 TargetTransformInfo *TTI,
8214 const function_ref<bool(Instruction *, BoUpSLP &)> Vectorize) {
8215 if (!ShouldVectorizeHor)
8216 return false;
8217
8218 if (!Root)
8219 return false;
8220
8221 if (Root->getParent() != BB || isa<PHINode>(Root))
8222 return false;
8223 // Start analysis starting from Root instruction. If horizontal reduction is
8224 // found, try to vectorize it. If it is not a horizontal reduction or
8225 // vectorization is not possible or not effective, and currently analyzed
8226 // instruction is a binary operation, try to vectorize the operands, using
8227 // pre-order DFS traversal order. If the operands were not vectorized, repeat
8228 // the same procedure considering each operand as a possible root of the
8229 // horizontal reduction.
8230 // Interrupt the process if the Root instruction itself was vectorized or all
8231 // sub-trees not higher that RecursionMaxDepth were analyzed/vectorized.
8232 // Skip the analysis of CmpInsts.Compiler implements postanalysis of the
8233 // CmpInsts so we can skip extra attempts in
8234 // tryToVectorizeHorReductionOrInstOperands and save compile time.
8235 SmallVector<std::pair<Instruction *, unsigned>, 8> Stack(1, {Root, 0});
8236 SmallPtrSet<Value *, 8> VisitedInstrs;
8237 bool Res = false;
8238 while (!Stack.empty()) {
8239 Instruction *Inst;
8240 unsigned Level;
8241 std::tie(Inst, Level) = Stack.pop_back_val();
8242 // Do not try to analyze instruction that has already been vectorized.
8243 // This may happen when we vectorize instruction operands on a previous
8244 // iteration while stack was populated before that happened.
8245 if (R.isDeleted(Inst))
8246 continue;
8247 Value *B0, *B1;
8248 bool IsBinop = matchRdxBop(Inst, B0, B1);
8249 bool IsSelect = match(Inst, m_Select(m_Value(), m_Value(), m_Value()));
8250 if (IsBinop || IsSelect) {
8251 HorizontalReduction HorRdx;
8252 if (HorRdx.matchAssociativeReduction(P, Inst)) {
8253 if (HorRdx.tryToReduce(R, TTI)) {
8254 Res = true;
8255 // Set P to nullptr to avoid re-analysis of phi node in
8256 // matchAssociativeReduction function unless this is the root node.
8257 P = nullptr;
8258 continue;
8259 }
8260 }
8261 if (P && IsBinop) {
8262 Inst = dyn_cast<Instruction>(B0);
8263 if (Inst == P)
8264 Inst = dyn_cast<Instruction>(B1);
8265 if (!Inst) {
8266 // Set P to nullptr to avoid re-analysis of phi node in
8267 // matchAssociativeReduction function unless this is the root node.
8268 P = nullptr;
8269 continue;
8270 }
8271 }
8272 }
8273 // Set P to nullptr to avoid re-analysis of phi node in
8274 // matchAssociativeReduction function unless this is the root node.
8275 P = nullptr;
8276 // Do not try to vectorize CmpInst operands, this is done separately.
8277 if (!isa<CmpInst>(Inst) && Vectorize(Inst, R)) {
8278 Res = true;
8279 continue;
8280 }
8281
8282 // Try to vectorize operands.
8283 // Continue analysis for the instruction from the same basic block only to
8284 // save compile time.
8285 if (++Level < RecursionMaxDepth)
8286 for (auto *Op : Inst->operand_values())
8287 if (VisitedInstrs.insert(Op).second)
8288 if (auto *I = dyn_cast<Instruction>(Op))
8289 // Do not try to vectorize CmpInst operands, this is done
8290 // separately.
8291 if (!isa<PHINode>(I) && !isa<CmpInst>(I) && !R.isDeleted(I) &&
8292 I->getParent() == BB)
8293 Stack.emplace_back(I, Level);
8294 }
8295 return Res;
8296 }
8297
vectorizeRootInstruction(PHINode * P,Value * V,BasicBlock * BB,BoUpSLP & R,TargetTransformInfo * TTI)8298 bool SLPVectorizerPass::vectorizeRootInstruction(PHINode *P, Value *V,
8299 BasicBlock *BB, BoUpSLP &R,
8300 TargetTransformInfo *TTI) {
8301 auto *I = dyn_cast_or_null<Instruction>(V);
8302 if (!I)
8303 return false;
8304
8305 if (!isa<BinaryOperator>(I))
8306 P = nullptr;
8307 // Try to match and vectorize a horizontal reduction.
8308 auto &&ExtraVectorization = [this](Instruction *I, BoUpSLP &R) -> bool {
8309 return tryToVectorize(I, R);
8310 };
8311 return tryToVectorizeHorReductionOrInstOperands(P, I, BB, R, TTI,
8312 ExtraVectorization);
8313 }
8314
vectorizeInsertValueInst(InsertValueInst * IVI,BasicBlock * BB,BoUpSLP & R)8315 bool SLPVectorizerPass::vectorizeInsertValueInst(InsertValueInst *IVI,
8316 BasicBlock *BB, BoUpSLP &R) {
8317 const DataLayout &DL = BB->getModule()->getDataLayout();
8318 if (!R.canMapToVector(IVI->getType(), DL))
8319 return false;
8320
8321 SmallVector<Value *, 16> BuildVectorOpds;
8322 SmallVector<Value *, 16> BuildVectorInsts;
8323 if (!findBuildAggregate(IVI, TTI, BuildVectorOpds, BuildVectorInsts))
8324 return false;
8325
8326 LLVM_DEBUG(dbgs() << "SLP: array mappable to vector: " << *IVI << "\n");
8327 // Aggregate value is unlikely to be processed in vector register, we need to
8328 // extract scalars into scalar registers, so NeedExtraction is set true.
8329 return tryToVectorizeList(BuildVectorOpds, R, /*AllowReorder=*/false);
8330 }
8331
vectorizeInsertElementInst(InsertElementInst * IEI,BasicBlock * BB,BoUpSLP & R)8332 bool SLPVectorizerPass::vectorizeInsertElementInst(InsertElementInst *IEI,
8333 BasicBlock *BB, BoUpSLP &R) {
8334 SmallVector<Value *, 16> BuildVectorInsts;
8335 SmallVector<Value *, 16> BuildVectorOpds;
8336 SmallVector<int> Mask;
8337 if (!findBuildAggregate(IEI, TTI, BuildVectorOpds, BuildVectorInsts) ||
8338 (llvm::all_of(BuildVectorOpds,
8339 [](Value *V) { return isa<ExtractElementInst>(V); }) &&
8340 isShuffle(BuildVectorOpds, Mask)))
8341 return false;
8342
8343 LLVM_DEBUG(dbgs() << "SLP: array mappable to vector: " << *IEI << "\n");
8344 return tryToVectorizeList(BuildVectorInsts, R, /*AllowReorder=*/true);
8345 }
8346
vectorizeSimpleInstructions(SmallVectorImpl<Instruction * > & Instructions,BasicBlock * BB,BoUpSLP & R,bool AtTerminator)8347 bool SLPVectorizerPass::vectorizeSimpleInstructions(
8348 SmallVectorImpl<Instruction *> &Instructions, BasicBlock *BB, BoUpSLP &R,
8349 bool AtTerminator) {
8350 bool OpsChanged = false;
8351 SmallVector<Instruction *, 4> PostponedCmps;
8352 for (auto *I : reverse(Instructions)) {
8353 if (R.isDeleted(I))
8354 continue;
8355 if (auto *LastInsertValue = dyn_cast<InsertValueInst>(I))
8356 OpsChanged |= vectorizeInsertValueInst(LastInsertValue, BB, R);
8357 else if (auto *LastInsertElem = dyn_cast<InsertElementInst>(I))
8358 OpsChanged |= vectorizeInsertElementInst(LastInsertElem, BB, R);
8359 else if (isa<CmpInst>(I))
8360 PostponedCmps.push_back(I);
8361 }
8362 if (AtTerminator) {
8363 // Try to find reductions first.
8364 for (Instruction *I : PostponedCmps) {
8365 if (R.isDeleted(I))
8366 continue;
8367 for (Value *Op : I->operands())
8368 OpsChanged |= vectorizeRootInstruction(nullptr, Op, BB, R, TTI);
8369 }
8370 // Try to vectorize operands as vector bundles.
8371 for (Instruction *I : PostponedCmps) {
8372 if (R.isDeleted(I))
8373 continue;
8374 OpsChanged |= tryToVectorize(I, R);
8375 }
8376 Instructions.clear();
8377 } else {
8378 // Insert in reverse order since the PostponedCmps vector was filled in
8379 // reverse order.
8380 Instructions.assign(PostponedCmps.rbegin(), PostponedCmps.rend());
8381 }
8382 return OpsChanged;
8383 }
8384
vectorizeChainsInBlock(BasicBlock * BB,BoUpSLP & R)8385 bool SLPVectorizerPass::vectorizeChainsInBlock(BasicBlock *BB, BoUpSLP &R) {
8386 bool Changed = false;
8387 SmallVector<Value *, 4> Incoming;
8388 SmallPtrSet<Value *, 16> VisitedInstrs;
8389 // Maps phi nodes to the non-phi nodes found in the use tree for each phi
8390 // node. Allows better to identify the chains that can be vectorized in the
8391 // better way.
8392 DenseMap<Value *, SmallVector<Value *, 4>> PHIToOpcodes;
8393
8394 bool HaveVectorizedPhiNodes = true;
8395 while (HaveVectorizedPhiNodes) {
8396 HaveVectorizedPhiNodes = false;
8397
8398 // Collect the incoming values from the PHIs.
8399 Incoming.clear();
8400 for (Instruction &I : *BB) {
8401 PHINode *P = dyn_cast<PHINode>(&I);
8402 if (!P)
8403 break;
8404
8405 // No need to analyze deleted, vectorized and non-vectorizable
8406 // instructions.
8407 if (!VisitedInstrs.count(P) && !R.isDeleted(P) &&
8408 isValidElementType(P->getType()))
8409 Incoming.push_back(P);
8410 }
8411
8412 // Find the corresponding non-phi nodes for better matching when trying to
8413 // build the tree.
8414 for (Value *V : Incoming) {
8415 SmallVectorImpl<Value *> &Opcodes =
8416 PHIToOpcodes.try_emplace(V).first->getSecond();
8417 if (!Opcodes.empty())
8418 continue;
8419 SmallVector<Value *, 4> Nodes(1, V);
8420 SmallPtrSet<Value *, 4> Visited;
8421 while (!Nodes.empty()) {
8422 auto *PHI = cast<PHINode>(Nodes.pop_back_val());
8423 if (!Visited.insert(PHI).second)
8424 continue;
8425 for (Value *V : PHI->incoming_values()) {
8426 if (auto *PHI1 = dyn_cast<PHINode>((V))) {
8427 Nodes.push_back(PHI1);
8428 continue;
8429 }
8430 Opcodes.emplace_back(V);
8431 }
8432 }
8433 }
8434
8435 // Sort by type, parent, operands.
8436 stable_sort(Incoming, [this, &PHIToOpcodes](Value *V1, Value *V2) {
8437 assert(isValidElementType(V1->getType()) &&
8438 isValidElementType(V2->getType()) &&
8439 "Expected vectorizable types only.");
8440 // It is fine to compare type IDs here, since we expect only vectorizable
8441 // types, like ints, floats and pointers, we don't care about other type.
8442 if (V1->getType()->getTypeID() < V2->getType()->getTypeID())
8443 return true;
8444 if (V1->getType()->getTypeID() > V2->getType()->getTypeID())
8445 return false;
8446 ArrayRef<Value *> Opcodes1 = PHIToOpcodes[V1];
8447 ArrayRef<Value *> Opcodes2 = PHIToOpcodes[V2];
8448 if (Opcodes1.size() < Opcodes2.size())
8449 return true;
8450 if (Opcodes1.size() > Opcodes2.size())
8451 return false;
8452 for (int I = 0, E = Opcodes1.size(); I < E; ++I) {
8453 // Undefs are compatible with any other value.
8454 if (isa<UndefValue>(Opcodes1[I]) || isa<UndefValue>(Opcodes2[I]))
8455 continue;
8456 if (auto *I1 = dyn_cast<Instruction>(Opcodes1[I]))
8457 if (auto *I2 = dyn_cast<Instruction>(Opcodes2[I])) {
8458 DomTreeNodeBase<BasicBlock> *NodeI1 = DT->getNode(I1->getParent());
8459 DomTreeNodeBase<BasicBlock> *NodeI2 = DT->getNode(I2->getParent());
8460 if (!NodeI1)
8461 return NodeI2 != nullptr;
8462 if (!NodeI2)
8463 return false;
8464 assert((NodeI1 == NodeI2) ==
8465 (NodeI1->getDFSNumIn() == NodeI2->getDFSNumIn()) &&
8466 "Different nodes should have different DFS numbers");
8467 if (NodeI1 != NodeI2)
8468 return NodeI1->getDFSNumIn() < NodeI2->getDFSNumIn();
8469 InstructionsState S = getSameOpcode({I1, I2});
8470 if (S.getOpcode())
8471 continue;
8472 return I1->getOpcode() < I2->getOpcode();
8473 }
8474 if (isa<Constant>(Opcodes1[I]) && isa<Constant>(Opcodes2[I]))
8475 continue;
8476 if (Opcodes1[I]->getValueID() < Opcodes2[I]->getValueID())
8477 return true;
8478 if (Opcodes1[I]->getValueID() > Opcodes2[I]->getValueID())
8479 return false;
8480 }
8481 return false;
8482 });
8483
8484 auto &&AreCompatiblePHIs = [&PHIToOpcodes](Value *V1, Value *V2) {
8485 if (V1 == V2)
8486 return true;
8487 if (V1->getType() != V2->getType())
8488 return false;
8489 ArrayRef<Value *> Opcodes1 = PHIToOpcodes[V1];
8490 ArrayRef<Value *> Opcodes2 = PHIToOpcodes[V2];
8491 if (Opcodes1.size() != Opcodes2.size())
8492 return false;
8493 for (int I = 0, E = Opcodes1.size(); I < E; ++I) {
8494 // Undefs are compatible with any other value.
8495 if (isa<UndefValue>(Opcodes1[I]) || isa<UndefValue>(Opcodes2[I]))
8496 continue;
8497 if (auto *I1 = dyn_cast<Instruction>(Opcodes1[I]))
8498 if (auto *I2 = dyn_cast<Instruction>(Opcodes2[I])) {
8499 if (I1->getParent() != I2->getParent())
8500 return false;
8501 InstructionsState S = getSameOpcode({I1, I2});
8502 if (S.getOpcode())
8503 continue;
8504 return false;
8505 }
8506 if (isa<Constant>(Opcodes1[I]) && isa<Constant>(Opcodes2[I]))
8507 continue;
8508 if (Opcodes1[I]->getValueID() != Opcodes2[I]->getValueID())
8509 return false;
8510 }
8511 return true;
8512 };
8513
8514 // Try to vectorize elements base on their type.
8515 SmallVector<Value *, 4> Candidates;
8516 for (SmallVector<Value *, 4>::iterator IncIt = Incoming.begin(),
8517 E = Incoming.end();
8518 IncIt != E;) {
8519
8520 // Look for the next elements with the same type, parent and operand
8521 // kinds.
8522 SmallVector<Value *, 4>::iterator SameTypeIt = IncIt;
8523 while (SameTypeIt != E && AreCompatiblePHIs(*SameTypeIt, *IncIt)) {
8524 VisitedInstrs.insert(*SameTypeIt);
8525 ++SameTypeIt;
8526 }
8527
8528 // Try to vectorize them.
8529 unsigned NumElts = (SameTypeIt - IncIt);
8530 LLVM_DEBUG(dbgs() << "SLP: Trying to vectorize starting at PHIs ("
8531 << NumElts << ")\n");
8532 // The order in which the phi nodes appear in the program does not matter.
8533 // So allow tryToVectorizeList to reorder them if it is beneficial. This
8534 // is done when there are exactly two elements since tryToVectorizeList
8535 // asserts that there are only two values when AllowReorder is true.
8536 if (NumElts > 1 && tryToVectorizeList(makeArrayRef(IncIt, NumElts), R,
8537 /*AllowReorder=*/true)) {
8538 // Success start over because instructions might have been changed.
8539 HaveVectorizedPhiNodes = true;
8540 Changed = true;
8541 } else if (NumElts < 4 &&
8542 (Candidates.empty() ||
8543 Candidates.front()->getType() == (*IncIt)->getType())) {
8544 Candidates.append(IncIt, std::next(IncIt, NumElts));
8545 }
8546 // Final attempt to vectorize phis with the same types.
8547 if (SameTypeIt == E || (*SameTypeIt)->getType() != (*IncIt)->getType()) {
8548 if (Candidates.size() > 1 &&
8549 tryToVectorizeList(Candidates, R, /*AllowReorder=*/true)) {
8550 // Success start over because instructions might have been changed.
8551 HaveVectorizedPhiNodes = true;
8552 Changed = true;
8553 }
8554 Candidates.clear();
8555 }
8556
8557 // Start over at the next instruction of a different type (or the end).
8558 IncIt = SameTypeIt;
8559 }
8560 }
8561
8562 VisitedInstrs.clear();
8563
8564 SmallVector<Instruction *, 8> PostProcessInstructions;
8565 SmallDenseSet<Instruction *, 4> KeyNodes;
8566 for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e; ++it) {
8567 // Skip instructions with scalable type. The num of elements is unknown at
8568 // compile-time for scalable type.
8569 if (isa<ScalableVectorType>(it->getType()))
8570 continue;
8571
8572 // Skip instructions marked for the deletion.
8573 if (R.isDeleted(&*it))
8574 continue;
8575 // We may go through BB multiple times so skip the one we have checked.
8576 if (!VisitedInstrs.insert(&*it).second) {
8577 if (it->use_empty() && KeyNodes.contains(&*it) &&
8578 vectorizeSimpleInstructions(PostProcessInstructions, BB, R,
8579 it->isTerminator())) {
8580 // We would like to start over since some instructions are deleted
8581 // and the iterator may become invalid value.
8582 Changed = true;
8583 it = BB->begin();
8584 e = BB->end();
8585 }
8586 continue;
8587 }
8588
8589 if (isa<DbgInfoIntrinsic>(it))
8590 continue;
8591
8592 // Try to vectorize reductions that use PHINodes.
8593 if (PHINode *P = dyn_cast<PHINode>(it)) {
8594 // Check that the PHI is a reduction PHI.
8595 if (P->getNumIncomingValues() == 2) {
8596 // Try to match and vectorize a horizontal reduction.
8597 if (vectorizeRootInstruction(P, getReductionValue(DT, P, BB, LI), BB, R,
8598 TTI)) {
8599 Changed = true;
8600 it = BB->begin();
8601 e = BB->end();
8602 continue;
8603 }
8604 }
8605 // Try to vectorize the incoming values of the PHI, to catch reductions
8606 // that feed into PHIs.
8607 for (unsigned I = 0, E = P->getNumIncomingValues(); I != E; I++) {
8608 // Skip if the incoming block is the current BB for now. Also, bypass
8609 // unreachable IR for efficiency and to avoid crashing.
8610 // TODO: Collect the skipped incoming values and try to vectorize them
8611 // after processing BB.
8612 if (BB == P->getIncomingBlock(I) ||
8613 !DT->isReachableFromEntry(P->getIncomingBlock(I)))
8614 continue;
8615
8616 Changed |= vectorizeRootInstruction(nullptr, P->getIncomingValue(I),
8617 P->getIncomingBlock(I), R, TTI);
8618 }
8619 continue;
8620 }
8621
8622 // Ran into an instruction without users, like terminator, or function call
8623 // with ignored return value, store. Ignore unused instructions (basing on
8624 // instruction type, except for CallInst and InvokeInst).
8625 if (it->use_empty() && (it->getType()->isVoidTy() || isa<CallInst>(it) ||
8626 isa<InvokeInst>(it))) {
8627 KeyNodes.insert(&*it);
8628 bool OpsChanged = false;
8629 if (ShouldStartVectorizeHorAtStore || !isa<StoreInst>(it)) {
8630 for (auto *V : it->operand_values()) {
8631 // Try to match and vectorize a horizontal reduction.
8632 OpsChanged |= vectorizeRootInstruction(nullptr, V, BB, R, TTI);
8633 }
8634 }
8635 // Start vectorization of post-process list of instructions from the
8636 // top-tree instructions to try to vectorize as many instructions as
8637 // possible.
8638 OpsChanged |= vectorizeSimpleInstructions(PostProcessInstructions, BB, R,
8639 it->isTerminator());
8640 if (OpsChanged) {
8641 // We would like to start over since some instructions are deleted
8642 // and the iterator may become invalid value.
8643 Changed = true;
8644 it = BB->begin();
8645 e = BB->end();
8646 continue;
8647 }
8648 }
8649
8650 if (isa<InsertElementInst>(it) || isa<CmpInst>(it) ||
8651 isa<InsertValueInst>(it))
8652 PostProcessInstructions.push_back(&*it);
8653 }
8654
8655 return Changed;
8656 }
8657
vectorizeGEPIndices(BasicBlock * BB,BoUpSLP & R)8658 bool SLPVectorizerPass::vectorizeGEPIndices(BasicBlock *BB, BoUpSLP &R) {
8659 auto Changed = false;
8660 for (auto &Entry : GEPs) {
8661 // If the getelementptr list has fewer than two elements, there's nothing
8662 // to do.
8663 if (Entry.second.size() < 2)
8664 continue;
8665
8666 LLVM_DEBUG(dbgs() << "SLP: Analyzing a getelementptr list of length "
8667 << Entry.second.size() << ".\n");
8668
8669 // Process the GEP list in chunks suitable for the target's supported
8670 // vector size. If a vector register can't hold 1 element, we are done. We
8671 // are trying to vectorize the index computations, so the maximum number of
8672 // elements is based on the size of the index expression, rather than the
8673 // size of the GEP itself (the target's pointer size).
8674 unsigned MaxVecRegSize = R.getMaxVecRegSize();
8675 unsigned EltSize = R.getVectorElementSize(*Entry.second[0]->idx_begin());
8676 if (MaxVecRegSize < EltSize)
8677 continue;
8678
8679 unsigned MaxElts = MaxVecRegSize / EltSize;
8680 for (unsigned BI = 0, BE = Entry.second.size(); BI < BE; BI += MaxElts) {
8681 auto Len = std::min<unsigned>(BE - BI, MaxElts);
8682 ArrayRef<GetElementPtrInst *> GEPList(&Entry.second[BI], Len);
8683
8684 // Initialize a set a candidate getelementptrs. Note that we use a
8685 // SetVector here to preserve program order. If the index computations
8686 // are vectorizable and begin with loads, we want to minimize the chance
8687 // of having to reorder them later.
8688 SetVector<Value *> Candidates(GEPList.begin(), GEPList.end());
8689
8690 // Some of the candidates may have already been vectorized after we
8691 // initially collected them. If so, they are marked as deleted, so remove
8692 // them from the set of candidates.
8693 Candidates.remove_if(
8694 [&R](Value *I) { return R.isDeleted(cast<Instruction>(I)); });
8695
8696 // Remove from the set of candidates all pairs of getelementptrs with
8697 // constant differences. Such getelementptrs are likely not good
8698 // candidates for vectorization in a bottom-up phase since one can be
8699 // computed from the other. We also ensure all candidate getelementptr
8700 // indices are unique.
8701 for (int I = 0, E = GEPList.size(); I < E && Candidates.size() > 1; ++I) {
8702 auto *GEPI = GEPList[I];
8703 if (!Candidates.count(GEPI))
8704 continue;
8705 auto *SCEVI = SE->getSCEV(GEPList[I]);
8706 for (int J = I + 1; J < E && Candidates.size() > 1; ++J) {
8707 auto *GEPJ = GEPList[J];
8708 auto *SCEVJ = SE->getSCEV(GEPList[J]);
8709 if (isa<SCEVConstant>(SE->getMinusSCEV(SCEVI, SCEVJ))) {
8710 Candidates.remove(GEPI);
8711 Candidates.remove(GEPJ);
8712 } else if (GEPI->idx_begin()->get() == GEPJ->idx_begin()->get()) {
8713 Candidates.remove(GEPJ);
8714 }
8715 }
8716 }
8717
8718 // We break out of the above computation as soon as we know there are
8719 // fewer than two candidates remaining.
8720 if (Candidates.size() < 2)
8721 continue;
8722
8723 // Add the single, non-constant index of each candidate to the bundle. We
8724 // ensured the indices met these constraints when we originally collected
8725 // the getelementptrs.
8726 SmallVector<Value *, 16> Bundle(Candidates.size());
8727 auto BundleIndex = 0u;
8728 for (auto *V : Candidates) {
8729 auto *GEP = cast<GetElementPtrInst>(V);
8730 auto *GEPIdx = GEP->idx_begin()->get();
8731 assert(GEP->getNumIndices() == 1 || !isa<Constant>(GEPIdx));
8732 Bundle[BundleIndex++] = GEPIdx;
8733 }
8734
8735 // Try and vectorize the indices. We are currently only interested in
8736 // gather-like cases of the form:
8737 //
8738 // ... = g[a[0] - b[0]] + g[a[1] - b[1]] + ...
8739 //
8740 // where the loads of "a", the loads of "b", and the subtractions can be
8741 // performed in parallel. It's likely that detecting this pattern in a
8742 // bottom-up phase will be simpler and less costly than building a
8743 // full-blown top-down phase beginning at the consecutive loads.
8744 Changed |= tryToVectorizeList(Bundle, R);
8745 }
8746 }
8747 return Changed;
8748 }
8749
vectorizeStoreChains(BoUpSLP & R)8750 bool SLPVectorizerPass::vectorizeStoreChains(BoUpSLP &R) {
8751 bool Changed = false;
8752 // Sort by type, base pointers and values operand. Value operands must be
8753 // compatible (have the same opcode, same parent), otherwise it is
8754 // definitely not profitable to try to vectorize them.
8755 auto &&StoreSorter = [this](StoreInst *V, StoreInst *V2) {
8756 if (V->getPointerOperandType()->getTypeID() <
8757 V2->getPointerOperandType()->getTypeID())
8758 return true;
8759 if (V->getPointerOperandType()->getTypeID() >
8760 V2->getPointerOperandType()->getTypeID())
8761 return false;
8762 // UndefValues are compatible with all other values.
8763 if (isa<UndefValue>(V->getValueOperand()) ||
8764 isa<UndefValue>(V2->getValueOperand()))
8765 return false;
8766 if (auto *I1 = dyn_cast<Instruction>(V->getValueOperand()))
8767 if (auto *I2 = dyn_cast<Instruction>(V2->getValueOperand())) {
8768 DomTreeNodeBase<llvm::BasicBlock> *NodeI1 =
8769 DT->getNode(I1->getParent());
8770 DomTreeNodeBase<llvm::BasicBlock> *NodeI2 =
8771 DT->getNode(I2->getParent());
8772 assert(NodeI1 && "Should only process reachable instructions");
8773 assert(NodeI1 && "Should only process reachable instructions");
8774 assert((NodeI1 == NodeI2) ==
8775 (NodeI1->getDFSNumIn() == NodeI2->getDFSNumIn()) &&
8776 "Different nodes should have different DFS numbers");
8777 if (NodeI1 != NodeI2)
8778 return NodeI1->getDFSNumIn() < NodeI2->getDFSNumIn();
8779 InstructionsState S = getSameOpcode({I1, I2});
8780 if (S.getOpcode())
8781 return false;
8782 return I1->getOpcode() < I2->getOpcode();
8783 }
8784 if (isa<Constant>(V->getValueOperand()) &&
8785 isa<Constant>(V2->getValueOperand()))
8786 return false;
8787 return V->getValueOperand()->getValueID() <
8788 V2->getValueOperand()->getValueID();
8789 };
8790
8791 auto &&AreCompatibleStores = [](StoreInst *V1, StoreInst *V2) {
8792 if (V1 == V2)
8793 return true;
8794 if (V1->getPointerOperandType() != V2->getPointerOperandType())
8795 return false;
8796 // Undefs are compatible with any other value.
8797 if (isa<UndefValue>(V1->getValueOperand()) ||
8798 isa<UndefValue>(V2->getValueOperand()))
8799 return true;
8800 if (auto *I1 = dyn_cast<Instruction>(V1->getValueOperand()))
8801 if (auto *I2 = dyn_cast<Instruction>(V2->getValueOperand())) {
8802 if (I1->getParent() != I2->getParent())
8803 return false;
8804 InstructionsState S = getSameOpcode({I1, I2});
8805 return S.getOpcode() > 0;
8806 }
8807 if (isa<Constant>(V1->getValueOperand()) &&
8808 isa<Constant>(V2->getValueOperand()))
8809 return true;
8810 return V1->getValueOperand()->getValueID() ==
8811 V2->getValueOperand()->getValueID();
8812 };
8813
8814 // Attempt to sort and vectorize each of the store-groups.
8815 for (auto &Pair : Stores) {
8816 if (Pair.second.size() < 2)
8817 continue;
8818
8819 LLVM_DEBUG(dbgs() << "SLP: Analyzing a store chain of length "
8820 << Pair.second.size() << ".\n");
8821
8822 stable_sort(Pair.second, StoreSorter);
8823
8824 // Try to vectorize elements based on their compatibility.
8825 for (ArrayRef<StoreInst *>::iterator IncIt = Pair.second.begin(),
8826 E = Pair.second.end();
8827 IncIt != E;) {
8828
8829 // Look for the next elements with the same type.
8830 ArrayRef<StoreInst *>::iterator SameTypeIt = IncIt;
8831 Type *EltTy = (*IncIt)->getPointerOperand()->getType();
8832
8833 while (SameTypeIt != E && AreCompatibleStores(*SameTypeIt, *IncIt))
8834 ++SameTypeIt;
8835
8836 // Try to vectorize them.
8837 unsigned NumElts = (SameTypeIt - IncIt);
8838 LLVM_DEBUG(dbgs() << "SLP: Trying to vectorize starting at stores ("
8839 << NumElts << ")\n");
8840 if (NumElts > 1 && !EltTy->getPointerElementType()->isVectorTy() &&
8841 vectorizeStores(makeArrayRef(IncIt, NumElts), R)) {
8842 // Success start over because instructions might have been changed.
8843 Changed = true;
8844 }
8845
8846 // Start over at the next instruction of a different type (or the end).
8847 IncIt = SameTypeIt;
8848 }
8849 }
8850 return Changed;
8851 }
8852
8853 char SLPVectorizer::ID = 0;
8854
8855 static const char lv_name[] = "SLP Vectorizer";
8856
INITIALIZE_PASS_BEGIN(SLPVectorizer,SV_NAME,lv_name,false,false)8857 INITIALIZE_PASS_BEGIN(SLPVectorizer, SV_NAME, lv_name, false, false)
8858 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
8859 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
8860 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
8861 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
8862 INITIALIZE_PASS_DEPENDENCY(LoopSimplify)
8863 INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass)
8864 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass)
8865 INITIALIZE_PASS_DEPENDENCY(InjectTLIMappingsLegacy)
8866 INITIALIZE_PASS_END(SLPVectorizer, SV_NAME, lv_name, false, false)
8867
8868 Pass *llvm::createSLPVectorizerPass() { return new SLPVectorizer(); }
8869