1 //===- LoopVectorize.cpp - A Loop Vectorizer ------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This is the LLVM loop vectorizer. This pass modifies 'vectorizable' loops
10 // and generates target-independent LLVM-IR.
11 // The vectorizer uses the TargetTransformInfo analysis to estimate the costs
12 // of instructions in order to estimate the profitability of vectorization.
13 //
14 // The loop vectorizer combines consecutive loop iterations into a single
15 // 'wide' iteration. After this transformation the index is incremented
16 // by the SIMD vector width, and not by one.
17 //
18 // This pass has three parts:
19 // 1. The main loop pass that drives the different parts.
20 // 2. LoopVectorizationLegality - A unit that checks for the legality
21 // of the vectorization.
22 // 3. InnerLoopVectorizer - A unit that performs the actual
23 // widening of instructions.
24 // 4. LoopVectorizationCostModel - A unit that checks for the profitability
25 // of vectorization. It decides on the optimal vector width, which
26 // can be one, if vectorization is not profitable.
27 //
28 // There is a development effort going on to migrate loop vectorizer to the
29 // VPlan infrastructure and to introduce outer loop vectorization support (see
30 // docs/Proposal/VectorizationPlan.rst and
31 // http://lists.llvm.org/pipermail/llvm-dev/2017-December/119523.html). For this
32 // purpose, we temporarily introduced the VPlan-native vectorization path: an
33 // alternative vectorization path that is natively implemented on top of the
34 // VPlan infrastructure. See EnableVPlanNativePath for enabling.
35 //
36 //===----------------------------------------------------------------------===//
37 //
38 // The reduction-variable vectorization is based on the paper:
39 // D. Nuzman and R. Henderson. Multi-platform Auto-vectorization.
40 //
41 // Variable uniformity checks are inspired by:
42 // Karrenberg, R. and Hack, S. Whole Function Vectorization.
43 //
44 // The interleaved access vectorization is based on the paper:
45 // Dorit Nuzman, Ira Rosen and Ayal Zaks. Auto-Vectorization of Interleaved
46 // Data for SIMD
47 //
48 // Other ideas/concepts are from:
49 // A. Zaks and D. Nuzman. Autovectorization in GCC-two years later.
50 //
51 // S. Maleki, Y. Gao, M. Garzaran, T. Wong and D. Padua. An Evaluation of
52 // Vectorizing Compilers.
53 //
54 //===----------------------------------------------------------------------===//
55
56 #include "llvm/Transforms/Vectorize/LoopVectorize.h"
57 #include "LoopVectorizationPlanner.h"
58 #include "VPRecipeBuilder.h"
59 #include "VPlan.h"
60 #include "VPlanHCFGBuilder.h"
61 #include "VPlanPredicator.h"
62 #include "VPlanTransforms.h"
63 #include "llvm/ADT/APInt.h"
64 #include "llvm/ADT/ArrayRef.h"
65 #include "llvm/ADT/DenseMap.h"
66 #include "llvm/ADT/DenseMapInfo.h"
67 #include "llvm/ADT/Hashing.h"
68 #include "llvm/ADT/MapVector.h"
69 #include "llvm/ADT/None.h"
70 #include "llvm/ADT/Optional.h"
71 #include "llvm/ADT/STLExtras.h"
72 #include "llvm/ADT/SetVector.h"
73 #include "llvm/ADT/SmallPtrSet.h"
74 #include "llvm/ADT/SmallVector.h"
75 #include "llvm/ADT/Statistic.h"
76 #include "llvm/ADT/StringRef.h"
77 #include "llvm/ADT/Twine.h"
78 #include "llvm/ADT/iterator_range.h"
79 #include "llvm/Analysis/AssumptionCache.h"
80 #include "llvm/Analysis/BasicAliasAnalysis.h"
81 #include "llvm/Analysis/BlockFrequencyInfo.h"
82 #include "llvm/Analysis/CFG.h"
83 #include "llvm/Analysis/CodeMetrics.h"
84 #include "llvm/Analysis/DemandedBits.h"
85 #include "llvm/Analysis/GlobalsModRef.h"
86 #include "llvm/Analysis/LoopAccessAnalysis.h"
87 #include "llvm/Analysis/LoopAnalysisManager.h"
88 #include "llvm/Analysis/LoopInfo.h"
89 #include "llvm/Analysis/LoopIterator.h"
90 #include "llvm/Analysis/MemorySSA.h"
91 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
92 #include "llvm/Analysis/ProfileSummaryInfo.h"
93 #include "llvm/Analysis/ScalarEvolution.h"
94 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
95 #include "llvm/Analysis/TargetLibraryInfo.h"
96 #include "llvm/Analysis/TargetTransformInfo.h"
97 #include "llvm/Analysis/VectorUtils.h"
98 #include "llvm/IR/Attributes.h"
99 #include "llvm/IR/BasicBlock.h"
100 #include "llvm/IR/CFG.h"
101 #include "llvm/IR/Constant.h"
102 #include "llvm/IR/Constants.h"
103 #include "llvm/IR/DataLayout.h"
104 #include "llvm/IR/DebugInfoMetadata.h"
105 #include "llvm/IR/DebugLoc.h"
106 #include "llvm/IR/DerivedTypes.h"
107 #include "llvm/IR/DiagnosticInfo.h"
108 #include "llvm/IR/Dominators.h"
109 #include "llvm/IR/Function.h"
110 #include "llvm/IR/IRBuilder.h"
111 #include "llvm/IR/InstrTypes.h"
112 #include "llvm/IR/Instruction.h"
113 #include "llvm/IR/Instructions.h"
114 #include "llvm/IR/IntrinsicInst.h"
115 #include "llvm/IR/Intrinsics.h"
116 #include "llvm/IR/LLVMContext.h"
117 #include "llvm/IR/Metadata.h"
118 #include "llvm/IR/Module.h"
119 #include "llvm/IR/Operator.h"
120 #include "llvm/IR/Type.h"
121 #include "llvm/IR/Use.h"
122 #include "llvm/IR/User.h"
123 #include "llvm/IR/Value.h"
124 #include "llvm/IR/ValueHandle.h"
125 #include "llvm/IR/Verifier.h"
126 #include "llvm/InitializePasses.h"
127 #include "llvm/Pass.h"
128 #include "llvm/Support/Casting.h"
129 #include "llvm/Support/CommandLine.h"
130 #include "llvm/Support/Compiler.h"
131 #include "llvm/Support/Debug.h"
132 #include "llvm/Support/ErrorHandling.h"
133 #include "llvm/Support/MathExtras.h"
134 #include "llvm/Support/raw_ostream.h"
135 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
136 #include "llvm/Transforms/Utils/InjectTLIMappings.h"
137 #include "llvm/Transforms/Utils/LoopSimplify.h"
138 #include "llvm/Transforms/Utils/LoopUtils.h"
139 #include "llvm/Transforms/Utils/LoopVersioning.h"
140 #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h"
141 #include "llvm/Transforms/Utils/SizeOpts.h"
142 #include "llvm/Transforms/Vectorize/LoopVectorizationLegality.h"
143 #include <algorithm>
144 #include <cassert>
145 #include <cstdint>
146 #include <cstdlib>
147 #include <functional>
148 #include <iterator>
149 #include <limits>
150 #include <memory>
151 #include <string>
152 #include <tuple>
153 #include <utility>
154
155 using namespace llvm;
156
157 #define LV_NAME "loop-vectorize"
158 #define DEBUG_TYPE LV_NAME
159
160 /// @{
161 /// Metadata attribute names
162 static const char *const LLVMLoopVectorizeFollowupAll =
163 "llvm.loop.vectorize.followup_all";
164 static const char *const LLVMLoopVectorizeFollowupVectorized =
165 "llvm.loop.vectorize.followup_vectorized";
166 static const char *const LLVMLoopVectorizeFollowupEpilogue =
167 "llvm.loop.vectorize.followup_epilogue";
168 /// @}
169
170 STATISTIC(LoopsVectorized, "Number of loops vectorized");
171 STATISTIC(LoopsAnalyzed, "Number of loops analyzed for vectorization");
172
173 /// Loops with a known constant trip count below this number are vectorized only
174 /// if no scalar iteration overheads are incurred.
175 static cl::opt<unsigned> TinyTripCountVectorThreshold(
176 "vectorizer-min-trip-count", cl::init(16), cl::Hidden,
177 cl::desc("Loops with a constant trip count that is smaller than this "
178 "value are vectorized only if no scalar iteration overheads "
179 "are incurred."));
180
181 // Indicates that an epilogue is undesired, predication is preferred.
182 // This means that the vectorizer will try to fold the loop-tail (epilogue)
183 // into the loop and predicate the loop body accordingly.
184 static cl::opt<bool> PreferPredicateOverEpilog(
185 "prefer-predicate-over-epilog", cl::init(false), cl::Hidden,
186 cl::desc("Indicate that an epilogue is undesired, predication should be "
187 "used instead."));
188
189 static cl::opt<bool> MaximizeBandwidth(
190 "vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden,
191 cl::desc("Maximize bandwidth when selecting vectorization factor which "
192 "will be determined by the smallest type in loop."));
193
194 static cl::opt<bool> EnableInterleavedMemAccesses(
195 "enable-interleaved-mem-accesses", cl::init(false), cl::Hidden,
196 cl::desc("Enable vectorization on interleaved memory accesses in a loop"));
197
198 /// An interleave-group may need masking if it resides in a block that needs
199 /// predication, or in order to mask away gaps.
200 static cl::opt<bool> EnableMaskedInterleavedMemAccesses(
201 "enable-masked-interleaved-mem-accesses", cl::init(false), cl::Hidden,
202 cl::desc("Enable vectorization on masked interleaved memory accesses in a loop"));
203
204 static cl::opt<unsigned> TinyTripCountInterleaveThreshold(
205 "tiny-trip-count-interleave-threshold", cl::init(128), cl::Hidden,
206 cl::desc("We don't interleave loops with a estimated constant trip count "
207 "below this number"));
208
209 static cl::opt<unsigned> ForceTargetNumScalarRegs(
210 "force-target-num-scalar-regs", cl::init(0), cl::Hidden,
211 cl::desc("A flag that overrides the target's number of scalar registers."));
212
213 static cl::opt<unsigned> ForceTargetNumVectorRegs(
214 "force-target-num-vector-regs", cl::init(0), cl::Hidden,
215 cl::desc("A flag that overrides the target's number of vector registers."));
216
217 static cl::opt<unsigned> ForceTargetMaxScalarInterleaveFactor(
218 "force-target-max-scalar-interleave", cl::init(0), cl::Hidden,
219 cl::desc("A flag that overrides the target's max interleave factor for "
220 "scalar loops."));
221
222 static cl::opt<unsigned> ForceTargetMaxVectorInterleaveFactor(
223 "force-target-max-vector-interleave", cl::init(0), cl::Hidden,
224 cl::desc("A flag that overrides the target's max interleave factor for "
225 "vectorized loops."));
226
227 static cl::opt<unsigned> ForceTargetInstructionCost(
228 "force-target-instruction-cost", cl::init(0), cl::Hidden,
229 cl::desc("A flag that overrides the target's expected cost for "
230 "an instruction to a single constant value. Mostly "
231 "useful for getting consistent testing."));
232
233 static cl::opt<unsigned> SmallLoopCost(
234 "small-loop-cost", cl::init(20), cl::Hidden,
235 cl::desc(
236 "The cost of a loop that is considered 'small' by the interleaver."));
237
238 static cl::opt<bool> LoopVectorizeWithBlockFrequency(
239 "loop-vectorize-with-block-frequency", cl::init(true), cl::Hidden,
240 cl::desc("Enable the use of the block frequency analysis to access PGO "
241 "heuristics minimizing code growth in cold regions and being more "
242 "aggressive in hot regions."));
243
244 // Runtime interleave loops for load/store throughput.
245 static cl::opt<bool> EnableLoadStoreRuntimeInterleave(
246 "enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden,
247 cl::desc(
248 "Enable runtime interleaving until load/store ports are saturated"));
249
250 /// The number of stores in a loop that are allowed to need predication.
251 static cl::opt<unsigned> NumberOfStoresToPredicate(
252 "vectorize-num-stores-pred", cl::init(1), cl::Hidden,
253 cl::desc("Max number of stores to be predicated behind an if."));
254
255 static cl::opt<bool> EnableIndVarRegisterHeur(
256 "enable-ind-var-reg-heur", cl::init(true), cl::Hidden,
257 cl::desc("Count the induction variable only once when interleaving"));
258
259 static cl::opt<bool> EnableCondStoresVectorization(
260 "enable-cond-stores-vec", cl::init(true), cl::Hidden,
261 cl::desc("Enable if predication of stores during vectorization."));
262
263 static cl::opt<unsigned> MaxNestedScalarReductionIC(
264 "max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden,
265 cl::desc("The maximum interleave count to use when interleaving a scalar "
266 "reduction in a nested loop."));
267
268 cl::opt<bool> EnableVPlanNativePath(
269 "enable-vplan-native-path", cl::init(false), cl::Hidden,
270 cl::desc("Enable VPlan-native vectorization path with "
271 "support for outer loop vectorization."));
272
273 // FIXME: Remove this switch once we have divergence analysis. Currently we
274 // assume divergent non-backedge branches when this switch is true.
275 cl::opt<bool> EnableVPlanPredication(
276 "enable-vplan-predication", cl::init(false), cl::Hidden,
277 cl::desc("Enable VPlan-native vectorization path predicator with "
278 "support for outer loop vectorization."));
279
280 // This flag enables the stress testing of the VPlan H-CFG construction in the
281 // VPlan-native vectorization path. It must be used in conjuction with
282 // -enable-vplan-native-path. -vplan-verify-hcfg can also be used to enable the
283 // verification of the H-CFGs built.
284 static cl::opt<bool> VPlanBuildStressTest(
285 "vplan-build-stress-test", cl::init(false), cl::Hidden,
286 cl::desc(
287 "Build VPlan for every supported loop nest in the function and bail "
288 "out right after the build (stress test the VPlan H-CFG construction "
289 "in the VPlan-native vectorization path)."));
290
291 cl::opt<bool> llvm::EnableLoopInterleaving(
292 "interleave-loops", cl::init(true), cl::Hidden,
293 cl::desc("Enable loop interleaving in Loop vectorization passes"));
294 cl::opt<bool> llvm::EnableLoopVectorization(
295 "vectorize-loops", cl::init(true), cl::Hidden,
296 cl::desc("Run the Loop vectorization passes"));
297
298 /// A helper function that returns the type of loaded or stored value.
getMemInstValueType(Value * I)299 static Type *getMemInstValueType(Value *I) {
300 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
301 "Expected Load or Store instruction");
302 if (auto *LI = dyn_cast<LoadInst>(I))
303 return LI->getType();
304 return cast<StoreInst>(I)->getValueOperand()->getType();
305 }
306
307 /// A helper function that returns true if the given type is irregular. The
308 /// type is irregular if its allocated size doesn't equal the store size of an
309 /// element of the corresponding vector type at the given vectorization factor.
hasIrregularType(Type * Ty,const DataLayout & DL,unsigned VF)310 static bool hasIrregularType(Type *Ty, const DataLayout &DL, unsigned VF) {
311 // Determine if an array of VF elements of type Ty is "bitcast compatible"
312 // with a <VF x Ty> vector.
313 if (VF > 1) {
314 auto *VectorTy = FixedVectorType::get(Ty, VF);
315 return VF * DL.getTypeAllocSize(Ty) != DL.getTypeStoreSize(VectorTy);
316 }
317
318 // If the vectorization factor is one, we just check if an array of type Ty
319 // requires padding between elements.
320 return DL.getTypeAllocSizeInBits(Ty) != DL.getTypeSizeInBits(Ty);
321 }
322
323 /// A helper function that returns the reciprocal of the block probability of
324 /// predicated blocks. If we return X, we are assuming the predicated block
325 /// will execute once for every X iterations of the loop header.
326 ///
327 /// TODO: We should use actual block probability here, if available. Currently,
328 /// we always assume predicated blocks have a 50% chance of executing.
getReciprocalPredBlockProb()329 static unsigned getReciprocalPredBlockProb() { return 2; }
330
331 /// A helper function that adds a 'fast' flag to floating-point operations.
addFastMathFlag(Value * V)332 static Value *addFastMathFlag(Value *V) {
333 if (isa<FPMathOperator>(V))
334 cast<Instruction>(V)->setFastMathFlags(FastMathFlags::getFast());
335 return V;
336 }
337
addFastMathFlag(Value * V,FastMathFlags FMF)338 static Value *addFastMathFlag(Value *V, FastMathFlags FMF) {
339 if (isa<FPMathOperator>(V))
340 cast<Instruction>(V)->setFastMathFlags(FMF);
341 return V;
342 }
343
344 /// A helper function that returns an integer or floating-point constant with
345 /// value C.
getSignedIntOrFpConstant(Type * Ty,int64_t C)346 static Constant *getSignedIntOrFpConstant(Type *Ty, int64_t C) {
347 return Ty->isIntegerTy() ? ConstantInt::getSigned(Ty, C)
348 : ConstantFP::get(Ty, C);
349 }
350
351 /// Returns "best known" trip count for the specified loop \p L as defined by
352 /// the following procedure:
353 /// 1) Returns exact trip count if it is known.
354 /// 2) Returns expected trip count according to profile data if any.
355 /// 3) Returns upper bound estimate if it is known.
356 /// 4) Returns None if all of the above failed.
getSmallBestKnownTC(ScalarEvolution & SE,Loop * L)357 static Optional<unsigned> getSmallBestKnownTC(ScalarEvolution &SE, Loop *L) {
358 // Check if exact trip count is known.
359 if (unsigned ExpectedTC = SE.getSmallConstantTripCount(L))
360 return ExpectedTC;
361
362 // Check if there is an expected trip count available from profile data.
363 if (LoopVectorizeWithBlockFrequency)
364 if (auto EstimatedTC = getLoopEstimatedTripCount(L))
365 return EstimatedTC;
366
367 // Check if upper bound estimate is known.
368 if (unsigned ExpectedTC = SE.getSmallConstantMaxTripCount(L))
369 return ExpectedTC;
370
371 return None;
372 }
373
374 namespace llvm {
375
376 /// InnerLoopVectorizer vectorizes loops which contain only one basic
377 /// block to a specified vectorization factor (VF).
378 /// This class performs the widening of scalars into vectors, or multiple
379 /// scalars. This class also implements the following features:
380 /// * It inserts an epilogue loop for handling loops that don't have iteration
381 /// counts that are known to be a multiple of the vectorization factor.
382 /// * It handles the code generation for reduction variables.
383 /// * Scalarization (implementation using scalars) of un-vectorizable
384 /// instructions.
385 /// InnerLoopVectorizer does not perform any vectorization-legality
386 /// checks, and relies on the caller to check for the different legality
387 /// aspects. The InnerLoopVectorizer relies on the
388 /// LoopVectorizationLegality class to provide information about the induction
389 /// and reduction variables that were found to a given vectorization factor.
390 class InnerLoopVectorizer {
391 public:
InnerLoopVectorizer(Loop * OrigLoop,PredicatedScalarEvolution & PSE,LoopInfo * LI,DominatorTree * DT,const TargetLibraryInfo * TLI,const TargetTransformInfo * TTI,AssumptionCache * AC,OptimizationRemarkEmitter * ORE,unsigned VecWidth,unsigned UnrollFactor,LoopVectorizationLegality * LVL,LoopVectorizationCostModel * CM)392 InnerLoopVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE,
393 LoopInfo *LI, DominatorTree *DT,
394 const TargetLibraryInfo *TLI,
395 const TargetTransformInfo *TTI, AssumptionCache *AC,
396 OptimizationRemarkEmitter *ORE, unsigned VecWidth,
397 unsigned UnrollFactor, LoopVectorizationLegality *LVL,
398 LoopVectorizationCostModel *CM)
399 : OrigLoop(OrigLoop), PSE(PSE), LI(LI), DT(DT), TLI(TLI), TTI(TTI),
400 AC(AC), ORE(ORE), VF(VecWidth), UF(UnrollFactor),
401 Builder(PSE.getSE()->getContext()),
402 VectorLoopValueMap(UnrollFactor, VecWidth), Legal(LVL), Cost(CM) {}
403 virtual ~InnerLoopVectorizer() = default;
404
405 /// Create a new empty loop. Unlink the old loop and connect the new one.
406 /// Return the pre-header block of the new loop.
407 BasicBlock *createVectorizedLoopSkeleton();
408
409 /// Widen a single instruction within the innermost loop.
410 void widenInstruction(Instruction &I, VPUser &Operands,
411 VPTransformState &State);
412
413 /// Widen a single call instruction within the innermost loop.
414 void widenCallInstruction(CallInst &I, VPUser &ArgOperands,
415 VPTransformState &State);
416
417 /// Widen a single select instruction within the innermost loop.
418 void widenSelectInstruction(SelectInst &I, VPUser &Operands,
419 bool InvariantCond, VPTransformState &State);
420
421 /// Fix the vectorized code, taking care of header phi's, live-outs, and more.
422 void fixVectorizedLoop();
423
424 // Return true if any runtime check is added.
areSafetyChecksAdded()425 bool areSafetyChecksAdded() { return AddedSafetyChecks; }
426
427 /// A type for vectorized values in the new loop. Each value from the
428 /// original loop, when vectorized, is represented by UF vector values in the
429 /// new unrolled loop, where UF is the unroll factor.
430 using VectorParts = SmallVector<Value *, 2>;
431
432 /// Vectorize a single GetElementPtrInst based on information gathered and
433 /// decisions taken during planning.
434 void widenGEP(GetElementPtrInst *GEP, VPUser &Indices, unsigned UF,
435 unsigned VF, bool IsPtrLoopInvariant,
436 SmallBitVector &IsIndexLoopInvariant, VPTransformState &State);
437
438 /// Vectorize a single PHINode in a block. This method handles the induction
439 /// variable canonicalization. It supports both VF = 1 for unrolled loops and
440 /// arbitrary length vectors.
441 void widenPHIInstruction(Instruction *PN, unsigned UF, unsigned VF);
442
443 /// A helper function to scalarize a single Instruction in the innermost loop.
444 /// Generates a sequence of scalar instances for each lane between \p MinLane
445 /// and \p MaxLane, times each part between \p MinPart and \p MaxPart,
446 /// inclusive. Uses the VPValue operands from \p Operands instead of \p
447 /// Instr's operands.
448 void scalarizeInstruction(Instruction *Instr, VPUser &Operands,
449 const VPIteration &Instance, bool IfPredicateInstr,
450 VPTransformState &State);
451
452 /// Widen an integer or floating-point induction variable \p IV. If \p Trunc
453 /// is provided, the integer induction variable will first be truncated to
454 /// the corresponding type.
455 void widenIntOrFpInduction(PHINode *IV, TruncInst *Trunc = nullptr);
456
457 /// getOrCreateVectorValue and getOrCreateScalarValue coordinate to generate a
458 /// vector or scalar value on-demand if one is not yet available. When
459 /// vectorizing a loop, we visit the definition of an instruction before its
460 /// uses. When visiting the definition, we either vectorize or scalarize the
461 /// instruction, creating an entry for it in the corresponding map. (In some
462 /// cases, such as induction variables, we will create both vector and scalar
463 /// entries.) Then, as we encounter uses of the definition, we derive values
464 /// for each scalar or vector use unless such a value is already available.
465 /// For example, if we scalarize a definition and one of its uses is vector,
466 /// we build the required vector on-demand with an insertelement sequence
467 /// when visiting the use. Otherwise, if the use is scalar, we can use the
468 /// existing scalar definition.
469 ///
470 /// Return a value in the new loop corresponding to \p V from the original
471 /// loop at unroll index \p Part. If the value has already been vectorized,
472 /// the corresponding vector entry in VectorLoopValueMap is returned. If,
473 /// however, the value has a scalar entry in VectorLoopValueMap, we construct
474 /// a new vector value on-demand by inserting the scalar values into a vector
475 /// with an insertelement sequence. If the value has been neither vectorized
476 /// nor scalarized, it must be loop invariant, so we simply broadcast the
477 /// value into a vector.
478 Value *getOrCreateVectorValue(Value *V, unsigned Part);
479
480 /// Return a value in the new loop corresponding to \p V from the original
481 /// loop at unroll and vector indices \p Instance. If the value has been
482 /// vectorized but not scalarized, the necessary extractelement instruction
483 /// will be generated.
484 Value *getOrCreateScalarValue(Value *V, const VPIteration &Instance);
485
486 /// Construct the vector value of a scalarized value \p V one lane at a time.
487 void packScalarIntoVectorValue(Value *V, const VPIteration &Instance);
488
489 /// Try to vectorize interleaved access group \p Group with the base address
490 /// given in \p Addr, optionally masking the vector operations if \p
491 /// BlockInMask is non-null. Use \p State to translate given VPValues to IR
492 /// values in the vectorized loop.
493 void vectorizeInterleaveGroup(const InterleaveGroup<Instruction> *Group,
494 VPTransformState &State, VPValue *Addr,
495 VPValue *BlockInMask = nullptr);
496
497 /// Vectorize Load and Store instructions with the base address given in \p
498 /// Addr, optionally masking the vector operations if \p BlockInMask is
499 /// non-null. Use \p State to translate given VPValues to IR values in the
500 /// vectorized loop.
501 void vectorizeMemoryInstruction(Instruction *Instr, VPTransformState &State,
502 VPValue *Addr, VPValue *StoredValue,
503 VPValue *BlockInMask);
504
505 /// Set the debug location in the builder using the debug location in
506 /// the instruction.
507 void setDebugLocFromInst(IRBuilder<> &B, const Value *Ptr);
508
509 /// Fix the non-induction PHIs in the OrigPHIsToFix vector.
510 void fixNonInductionPHIs(void);
511
512 protected:
513 friend class LoopVectorizationPlanner;
514
515 /// A small list of PHINodes.
516 using PhiVector = SmallVector<PHINode *, 4>;
517
518 /// A type for scalarized values in the new loop. Each value from the
519 /// original loop, when scalarized, is represented by UF x VF scalar values
520 /// in the new unrolled loop, where UF is the unroll factor and VF is the
521 /// vectorization factor.
522 using ScalarParts = SmallVector<SmallVector<Value *, 4>, 2>;
523
524 /// Set up the values of the IVs correctly when exiting the vector loop.
525 void fixupIVUsers(PHINode *OrigPhi, const InductionDescriptor &II,
526 Value *CountRoundDown, Value *EndValue,
527 BasicBlock *MiddleBlock);
528
529 /// Create a new induction variable inside L.
530 PHINode *createInductionVariable(Loop *L, Value *Start, Value *End,
531 Value *Step, Instruction *DL);
532
533 /// Handle all cross-iteration phis in the header.
534 void fixCrossIterationPHIs();
535
536 /// Fix a first-order recurrence. This is the second phase of vectorizing
537 /// this phi node.
538 void fixFirstOrderRecurrence(PHINode *Phi);
539
540 /// Fix a reduction cross-iteration phi. This is the second phase of
541 /// vectorizing this phi node.
542 void fixReduction(PHINode *Phi);
543
544 /// Clear NSW/NUW flags from reduction instructions if necessary.
545 void clearReductionWrapFlags(RecurrenceDescriptor &RdxDesc);
546
547 /// The Loop exit block may have single value PHI nodes with some
548 /// incoming value. While vectorizing we only handled real values
549 /// that were defined inside the loop and we should have one value for
550 /// each predecessor of its parent basic block. See PR14725.
551 void fixLCSSAPHIs();
552
553 /// Iteratively sink the scalarized operands of a predicated instruction into
554 /// the block that was created for it.
555 void sinkScalarOperands(Instruction *PredInst);
556
557 /// Shrinks vector element sizes to the smallest bitwidth they can be legally
558 /// represented as.
559 void truncateToMinimalBitwidths();
560
561 /// Create a broadcast instruction. This method generates a broadcast
562 /// instruction (shuffle) for loop invariant values and for the induction
563 /// value. If this is the induction variable then we extend it to N, N+1, ...
564 /// this is needed because each iteration in the loop corresponds to a SIMD
565 /// element.
566 virtual Value *getBroadcastInstrs(Value *V);
567
568 /// This function adds (StartIdx, StartIdx + Step, StartIdx + 2*Step, ...)
569 /// to each vector element of Val. The sequence starts at StartIndex.
570 /// \p Opcode is relevant for FP induction variable.
571 virtual Value *getStepVector(Value *Val, int StartIdx, Value *Step,
572 Instruction::BinaryOps Opcode =
573 Instruction::BinaryOpsEnd);
574
575 /// Compute scalar induction steps. \p ScalarIV is the scalar induction
576 /// variable on which to base the steps, \p Step is the size of the step, and
577 /// \p EntryVal is the value from the original loop that maps to the steps.
578 /// Note that \p EntryVal doesn't have to be an induction variable - it
579 /// can also be a truncate instruction.
580 void buildScalarSteps(Value *ScalarIV, Value *Step, Instruction *EntryVal,
581 const InductionDescriptor &ID);
582
583 /// Create a vector induction phi node based on an existing scalar one. \p
584 /// EntryVal is the value from the original loop that maps to the vector phi
585 /// node, and \p Step is the loop-invariant step. If \p EntryVal is a
586 /// truncate instruction, instead of widening the original IV, we widen a
587 /// version of the IV truncated to \p EntryVal's type.
588 void createVectorIntOrFpInductionPHI(const InductionDescriptor &II,
589 Value *Step, Instruction *EntryVal);
590
591 /// Returns true if an instruction \p I should be scalarized instead of
592 /// vectorized for the chosen vectorization factor.
593 bool shouldScalarizeInstruction(Instruction *I) const;
594
595 /// Returns true if we should generate a scalar version of \p IV.
596 bool needsScalarInduction(Instruction *IV) const;
597
598 /// If there is a cast involved in the induction variable \p ID, which should
599 /// be ignored in the vectorized loop body, this function records the
600 /// VectorLoopValue of the respective Phi also as the VectorLoopValue of the
601 /// cast. We had already proved that the casted Phi is equal to the uncasted
602 /// Phi in the vectorized loop (under a runtime guard), and therefore
603 /// there is no need to vectorize the cast - the same value can be used in the
604 /// vector loop for both the Phi and the cast.
605 /// If \p VectorLoopValue is a scalarized value, \p Lane is also specified,
606 /// Otherwise, \p VectorLoopValue is a widened/vectorized value.
607 ///
608 /// \p EntryVal is the value from the original loop that maps to the vector
609 /// phi node and is used to distinguish what is the IV currently being
610 /// processed - original one (if \p EntryVal is a phi corresponding to the
611 /// original IV) or the "newly-created" one based on the proof mentioned above
612 /// (see also buildScalarSteps() and createVectorIntOrFPInductionPHI()). In the
613 /// latter case \p EntryVal is a TruncInst and we must not record anything for
614 /// that IV, but it's error-prone to expect callers of this routine to care
615 /// about that, hence this explicit parameter.
616 void recordVectorLoopValueForInductionCast(const InductionDescriptor &ID,
617 const Instruction *EntryVal,
618 Value *VectorLoopValue,
619 unsigned Part,
620 unsigned Lane = UINT_MAX);
621
622 /// Generate a shuffle sequence that will reverse the vector Vec.
623 virtual Value *reverseVector(Value *Vec);
624
625 /// Returns (and creates if needed) the original loop trip count.
626 Value *getOrCreateTripCount(Loop *NewLoop);
627
628 /// Returns (and creates if needed) the trip count of the widened loop.
629 Value *getOrCreateVectorTripCount(Loop *NewLoop);
630
631 /// Returns a bitcasted value to the requested vector type.
632 /// Also handles bitcasts of vector<float> <-> vector<pointer> types.
633 Value *createBitOrPointerCast(Value *V, VectorType *DstVTy,
634 const DataLayout &DL);
635
636 /// Emit a bypass check to see if the vector trip count is zero, including if
637 /// it overflows.
638 void emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass);
639
640 /// Emit a bypass check to see if all of the SCEV assumptions we've
641 /// had to make are correct.
642 void emitSCEVChecks(Loop *L, BasicBlock *Bypass);
643
644 /// Emit bypass checks to check any memory assumptions we may have made.
645 void emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass);
646
647 /// Compute the transformed value of Index at offset StartValue using step
648 /// StepValue.
649 /// For integer induction, returns StartValue + Index * StepValue.
650 /// For pointer induction, returns StartValue[Index * StepValue].
651 /// FIXME: The newly created binary instructions should contain nsw/nuw
652 /// flags, which can be found from the original scalar operations.
653 Value *emitTransformedIndex(IRBuilder<> &B, Value *Index, ScalarEvolution *SE,
654 const DataLayout &DL,
655 const InductionDescriptor &ID) const;
656
657 /// Add additional metadata to \p To that was not present on \p Orig.
658 ///
659 /// Currently this is used to add the noalias annotations based on the
660 /// inserted memchecks. Use this for instructions that are *cloned* into the
661 /// vector loop.
662 void addNewMetadata(Instruction *To, const Instruction *Orig);
663
664 /// Add metadata from one instruction to another.
665 ///
666 /// This includes both the original MDs from \p From and additional ones (\see
667 /// addNewMetadata). Use this for *newly created* instructions in the vector
668 /// loop.
669 void addMetadata(Instruction *To, Instruction *From);
670
671 /// Similar to the previous function but it adds the metadata to a
672 /// vector of instructions.
673 void addMetadata(ArrayRef<Value *> To, Instruction *From);
674
675 /// The original loop.
676 Loop *OrigLoop;
677
678 /// A wrapper around ScalarEvolution used to add runtime SCEV checks. Applies
679 /// dynamic knowledge to simplify SCEV expressions and converts them to a
680 /// more usable form.
681 PredicatedScalarEvolution &PSE;
682
683 /// Loop Info.
684 LoopInfo *LI;
685
686 /// Dominator Tree.
687 DominatorTree *DT;
688
689 /// Alias Analysis.
690 AAResults *AA;
691
692 /// Target Library Info.
693 const TargetLibraryInfo *TLI;
694
695 /// Target Transform Info.
696 const TargetTransformInfo *TTI;
697
698 /// Assumption Cache.
699 AssumptionCache *AC;
700
701 /// Interface to emit optimization remarks.
702 OptimizationRemarkEmitter *ORE;
703
704 /// LoopVersioning. It's only set up (non-null) if memchecks were
705 /// used.
706 ///
707 /// This is currently only used to add no-alias metadata based on the
708 /// memchecks. The actually versioning is performed manually.
709 std::unique_ptr<LoopVersioning> LVer;
710
711 /// The vectorization SIMD factor to use. Each vector will have this many
712 /// vector elements.
713 unsigned VF;
714
715 /// The vectorization unroll factor to use. Each scalar is vectorized to this
716 /// many different vector instructions.
717 unsigned UF;
718
719 /// The builder that we use
720 IRBuilder<> Builder;
721
722 // --- Vectorization state ---
723
724 /// The vector-loop preheader.
725 BasicBlock *LoopVectorPreHeader;
726
727 /// The scalar-loop preheader.
728 BasicBlock *LoopScalarPreHeader;
729
730 /// Middle Block between the vector and the scalar.
731 BasicBlock *LoopMiddleBlock;
732
733 /// The ExitBlock of the scalar loop.
734 BasicBlock *LoopExitBlock;
735
736 /// The vector loop body.
737 BasicBlock *LoopVectorBody;
738
739 /// The scalar loop body.
740 BasicBlock *LoopScalarBody;
741
742 /// A list of all bypass blocks. The first block is the entry of the loop.
743 SmallVector<BasicBlock *, 4> LoopBypassBlocks;
744
745 /// The new Induction variable which was added to the new block.
746 PHINode *Induction = nullptr;
747
748 /// The induction variable of the old basic block.
749 PHINode *OldInduction = nullptr;
750
751 /// Maps values from the original loop to their corresponding values in the
752 /// vectorized loop. A key value can map to either vector values, scalar
753 /// values or both kinds of values, depending on whether the key was
754 /// vectorized and scalarized.
755 VectorizerValueMap VectorLoopValueMap;
756
757 /// Store instructions that were predicated.
758 SmallVector<Instruction *, 4> PredicatedInstructions;
759
760 /// Trip count of the original loop.
761 Value *TripCount = nullptr;
762
763 /// Trip count of the widened loop (TripCount - TripCount % (VF*UF))
764 Value *VectorTripCount = nullptr;
765
766 /// The legality analysis.
767 LoopVectorizationLegality *Legal;
768
769 /// The profitablity analysis.
770 LoopVectorizationCostModel *Cost;
771
772 // Record whether runtime checks are added.
773 bool AddedSafetyChecks = false;
774
775 // Holds the end values for each induction variable. We save the end values
776 // so we can later fix-up the external users of the induction variables.
777 DenseMap<PHINode *, Value *> IVEndValues;
778
779 // Vector of original scalar PHIs whose corresponding widened PHIs need to be
780 // fixed up at the end of vector code generation.
781 SmallVector<PHINode *, 8> OrigPHIsToFix;
782 };
783
784 class InnerLoopUnroller : public InnerLoopVectorizer {
785 public:
InnerLoopUnroller(Loop * OrigLoop,PredicatedScalarEvolution & PSE,LoopInfo * LI,DominatorTree * DT,const TargetLibraryInfo * TLI,const TargetTransformInfo * TTI,AssumptionCache * AC,OptimizationRemarkEmitter * ORE,unsigned UnrollFactor,LoopVectorizationLegality * LVL,LoopVectorizationCostModel * CM)786 InnerLoopUnroller(Loop *OrigLoop, PredicatedScalarEvolution &PSE,
787 LoopInfo *LI, DominatorTree *DT,
788 const TargetLibraryInfo *TLI,
789 const TargetTransformInfo *TTI, AssumptionCache *AC,
790 OptimizationRemarkEmitter *ORE, unsigned UnrollFactor,
791 LoopVectorizationLegality *LVL,
792 LoopVectorizationCostModel *CM)
793 : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 1,
794 UnrollFactor, LVL, CM) {}
795
796 private:
797 Value *getBroadcastInstrs(Value *V) override;
798 Value *getStepVector(Value *Val, int StartIdx, Value *Step,
799 Instruction::BinaryOps Opcode =
800 Instruction::BinaryOpsEnd) override;
801 Value *reverseVector(Value *Vec) override;
802 };
803
804 } // end namespace llvm
805
806 /// Look for a meaningful debug location on the instruction or it's
807 /// operands.
getDebugLocFromInstOrOperands(Instruction * I)808 static Instruction *getDebugLocFromInstOrOperands(Instruction *I) {
809 if (!I)
810 return I;
811
812 DebugLoc Empty;
813 if (I->getDebugLoc() != Empty)
814 return I;
815
816 for (User::op_iterator OI = I->op_begin(), OE = I->op_end(); OI != OE; ++OI) {
817 if (Instruction *OpInst = dyn_cast<Instruction>(*OI))
818 if (OpInst->getDebugLoc() != Empty)
819 return OpInst;
820 }
821
822 return I;
823 }
824
setDebugLocFromInst(IRBuilder<> & B,const Value * Ptr)825 void InnerLoopVectorizer::setDebugLocFromInst(IRBuilder<> &B, const Value *Ptr) {
826 if (const Instruction *Inst = dyn_cast_or_null<Instruction>(Ptr)) {
827 const DILocation *DIL = Inst->getDebugLoc();
828 if (DIL && Inst->getFunction()->isDebugInfoForProfiling() &&
829 !isa<DbgInfoIntrinsic>(Inst)) {
830 auto NewDIL = DIL->cloneByMultiplyingDuplicationFactor(UF * VF);
831 if (NewDIL)
832 B.SetCurrentDebugLocation(NewDIL.getValue());
833 else
834 LLVM_DEBUG(dbgs()
835 << "Failed to create new discriminator: "
836 << DIL->getFilename() << " Line: " << DIL->getLine());
837 }
838 else
839 B.SetCurrentDebugLocation(DIL);
840 } else
841 B.SetCurrentDebugLocation(DebugLoc());
842 }
843
844 /// Write a record \p DebugMsg about vectorization failure to the debug
845 /// output stream. If \p I is passed, it is an instruction that prevents
846 /// vectorization.
847 #ifndef NDEBUG
debugVectorizationFailure(const StringRef DebugMsg,Instruction * I)848 static void debugVectorizationFailure(const StringRef DebugMsg,
849 Instruction *I) {
850 dbgs() << "LV: Not vectorizing: " << DebugMsg;
851 if (I != nullptr)
852 dbgs() << " " << *I;
853 else
854 dbgs() << '.';
855 dbgs() << '\n';
856 }
857 #endif
858
859 /// Create an analysis remark that explains why vectorization failed
860 ///
861 /// \p PassName is the name of the pass (e.g. can be AlwaysPrint). \p
862 /// RemarkName is the identifier for the remark. If \p I is passed it is an
863 /// instruction that prevents vectorization. Otherwise \p TheLoop is used for
864 /// the location of the remark. \return the remark object that can be
865 /// streamed to.
createLVAnalysis(const char * PassName,StringRef RemarkName,Loop * TheLoop,Instruction * I)866 static OptimizationRemarkAnalysis createLVAnalysis(const char *PassName,
867 StringRef RemarkName, Loop *TheLoop, Instruction *I) {
868 Value *CodeRegion = TheLoop->getHeader();
869 DebugLoc DL = TheLoop->getStartLoc();
870
871 if (I) {
872 CodeRegion = I->getParent();
873 // If there is no debug location attached to the instruction, revert back to
874 // using the loop's.
875 if (I->getDebugLoc())
876 DL = I->getDebugLoc();
877 }
878
879 OptimizationRemarkAnalysis R(PassName, RemarkName, DL, CodeRegion);
880 R << "loop not vectorized: ";
881 return R;
882 }
883
884 namespace llvm {
885
reportVectorizationFailure(const StringRef DebugMsg,const StringRef OREMsg,const StringRef ORETag,OptimizationRemarkEmitter * ORE,Loop * TheLoop,Instruction * I)886 void reportVectorizationFailure(const StringRef DebugMsg,
887 const StringRef OREMsg, const StringRef ORETag,
888 OptimizationRemarkEmitter *ORE, Loop *TheLoop, Instruction *I) {
889 LLVM_DEBUG(debugVectorizationFailure(DebugMsg, I));
890 LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE);
891 ORE->emit(createLVAnalysis(Hints.vectorizeAnalysisPassName(),
892 ORETag, TheLoop, I) << OREMsg);
893 }
894
895 } // end namespace llvm
896
897 #ifndef NDEBUG
898 /// \return string containing a file name and a line # for the given loop.
getDebugLocString(const Loop * L)899 static std::string getDebugLocString(const Loop *L) {
900 std::string Result;
901 if (L) {
902 raw_string_ostream OS(Result);
903 if (const DebugLoc LoopDbgLoc = L->getStartLoc())
904 LoopDbgLoc.print(OS);
905 else
906 // Just print the module name.
907 OS << L->getHeader()->getParent()->getParent()->getModuleIdentifier();
908 OS.flush();
909 }
910 return Result;
911 }
912 #endif
913
addNewMetadata(Instruction * To,const Instruction * Orig)914 void InnerLoopVectorizer::addNewMetadata(Instruction *To,
915 const Instruction *Orig) {
916 // If the loop was versioned with memchecks, add the corresponding no-alias
917 // metadata.
918 if (LVer && (isa<LoadInst>(Orig) || isa<StoreInst>(Orig)))
919 LVer->annotateInstWithNoAlias(To, Orig);
920 }
921
addMetadata(Instruction * To,Instruction * From)922 void InnerLoopVectorizer::addMetadata(Instruction *To,
923 Instruction *From) {
924 propagateMetadata(To, From);
925 addNewMetadata(To, From);
926 }
927
addMetadata(ArrayRef<Value * > To,Instruction * From)928 void InnerLoopVectorizer::addMetadata(ArrayRef<Value *> To,
929 Instruction *From) {
930 for (Value *V : To) {
931 if (Instruction *I = dyn_cast<Instruction>(V))
932 addMetadata(I, From);
933 }
934 }
935
936 namespace llvm {
937
938 // Loop vectorization cost-model hints how the scalar epilogue loop should be
939 // lowered.
940 enum ScalarEpilogueLowering {
941
942 // The default: allowing scalar epilogues.
943 CM_ScalarEpilogueAllowed,
944
945 // Vectorization with OptForSize: don't allow epilogues.
946 CM_ScalarEpilogueNotAllowedOptSize,
947
948 // A special case of vectorisation with OptForSize: loops with a very small
949 // trip count are considered for vectorization under OptForSize, thereby
950 // making sure the cost of their loop body is dominant, free of runtime
951 // guards and scalar iteration overheads.
952 CM_ScalarEpilogueNotAllowedLowTripLoop,
953
954 // Loop hint predicate indicating an epilogue is undesired.
955 CM_ScalarEpilogueNotNeededUsePredicate
956 };
957
958 /// LoopVectorizationCostModel - estimates the expected speedups due to
959 /// vectorization.
960 /// In many cases vectorization is not profitable. This can happen because of
961 /// a number of reasons. In this class we mainly attempt to predict the
962 /// expected speedup/slowdowns due to the supported instruction set. We use the
963 /// TargetTransformInfo to query the different backends for the cost of
964 /// different operations.
965 class LoopVectorizationCostModel {
966 public:
LoopVectorizationCostModel(ScalarEpilogueLowering SEL,Loop * L,PredicatedScalarEvolution & PSE,LoopInfo * LI,LoopVectorizationLegality * Legal,const TargetTransformInfo & TTI,const TargetLibraryInfo * TLI,DemandedBits * DB,AssumptionCache * AC,OptimizationRemarkEmitter * ORE,const Function * F,const LoopVectorizeHints * Hints,InterleavedAccessInfo & IAI)967 LoopVectorizationCostModel(ScalarEpilogueLowering SEL, Loop *L,
968 PredicatedScalarEvolution &PSE, LoopInfo *LI,
969 LoopVectorizationLegality *Legal,
970 const TargetTransformInfo &TTI,
971 const TargetLibraryInfo *TLI, DemandedBits *DB,
972 AssumptionCache *AC,
973 OptimizationRemarkEmitter *ORE, const Function *F,
974 const LoopVectorizeHints *Hints,
975 InterleavedAccessInfo &IAI)
976 : ScalarEpilogueStatus(SEL), TheLoop(L), PSE(PSE), LI(LI), Legal(Legal),
977 TTI(TTI), TLI(TLI), DB(DB), AC(AC), ORE(ORE), TheFunction(F),
978 Hints(Hints), InterleaveInfo(IAI) {}
979
980 /// \return An upper bound for the vectorization factor, or None if
981 /// vectorization and interleaving should be avoided up front.
982 Optional<unsigned> computeMaxVF(unsigned UserVF, unsigned UserIC);
983
984 /// \return True if runtime checks are required for vectorization, and false
985 /// otherwise.
986 bool runtimeChecksRequired();
987
988 /// \return The most profitable vectorization factor and the cost of that VF.
989 /// This method checks every power of two up to MaxVF. If UserVF is not ZERO
990 /// then this vectorization factor will be selected if vectorization is
991 /// possible.
992 VectorizationFactor selectVectorizationFactor(unsigned MaxVF);
993
994 /// Setup cost-based decisions for user vectorization factor.
selectUserVectorizationFactor(unsigned UserVF)995 void selectUserVectorizationFactor(unsigned UserVF) {
996 collectUniformsAndScalars(UserVF);
997 collectInstsToScalarize(UserVF);
998 }
999
1000 /// \return The size (in bits) of the smallest and widest types in the code
1001 /// that needs to be vectorized. We ignore values that remain scalar such as
1002 /// 64 bit loop indices.
1003 std::pair<unsigned, unsigned> getSmallestAndWidestTypes();
1004
1005 /// \return The desired interleave count.
1006 /// If interleave count has been specified by metadata it will be returned.
1007 /// Otherwise, the interleave count is computed and returned. VF and LoopCost
1008 /// are the selected vectorization factor and the cost of the selected VF.
1009 unsigned selectInterleaveCount(unsigned VF, unsigned LoopCost);
1010
1011 /// Memory access instruction may be vectorized in more than one way.
1012 /// Form of instruction after vectorization depends on cost.
1013 /// This function takes cost-based decisions for Load/Store instructions
1014 /// and collects them in a map. This decisions map is used for building
1015 /// the lists of loop-uniform and loop-scalar instructions.
1016 /// The calculated cost is saved with widening decision in order to
1017 /// avoid redundant calculations.
1018 void setCostBasedWideningDecision(unsigned VF);
1019
1020 /// A struct that represents some properties of the register usage
1021 /// of a loop.
1022 struct RegisterUsage {
1023 /// Holds the number of loop invariant values that are used in the loop.
1024 /// The key is ClassID of target-provided register class.
1025 SmallMapVector<unsigned, unsigned, 4> LoopInvariantRegs;
1026 /// Holds the maximum number of concurrent live intervals in the loop.
1027 /// The key is ClassID of target-provided register class.
1028 SmallMapVector<unsigned, unsigned, 4> MaxLocalUsers;
1029 };
1030
1031 /// \return Returns information about the register usages of the loop for the
1032 /// given vectorization factors.
1033 SmallVector<RegisterUsage, 8> calculateRegisterUsage(ArrayRef<unsigned> VFs);
1034
1035 /// Collect values we want to ignore in the cost model.
1036 void collectValuesToIgnore();
1037
1038 /// \returns The smallest bitwidth each instruction can be represented with.
1039 /// The vector equivalents of these instructions should be truncated to this
1040 /// type.
getMinimalBitwidths() const1041 const MapVector<Instruction *, uint64_t> &getMinimalBitwidths() const {
1042 return MinBWs;
1043 }
1044
1045 /// \returns True if it is more profitable to scalarize instruction \p I for
1046 /// vectorization factor \p VF.
isProfitableToScalarize(Instruction * I,unsigned VF) const1047 bool isProfitableToScalarize(Instruction *I, unsigned VF) const {
1048 assert(VF > 1 && "Profitable to scalarize relevant only for VF > 1.");
1049
1050 // Cost model is not run in the VPlan-native path - return conservative
1051 // result until this changes.
1052 if (EnableVPlanNativePath)
1053 return false;
1054
1055 auto Scalars = InstsToScalarize.find(VF);
1056 assert(Scalars != InstsToScalarize.end() &&
1057 "VF not yet analyzed for scalarization profitability");
1058 return Scalars->second.find(I) != Scalars->second.end();
1059 }
1060
1061 /// Returns true if \p I is known to be uniform after vectorization.
isUniformAfterVectorization(Instruction * I,unsigned VF) const1062 bool isUniformAfterVectorization(Instruction *I, unsigned VF) const {
1063 if (VF == 1)
1064 return true;
1065
1066 // Cost model is not run in the VPlan-native path - return conservative
1067 // result until this changes.
1068 if (EnableVPlanNativePath)
1069 return false;
1070
1071 auto UniformsPerVF = Uniforms.find(VF);
1072 assert(UniformsPerVF != Uniforms.end() &&
1073 "VF not yet analyzed for uniformity");
1074 return UniformsPerVF->second.count(I);
1075 }
1076
1077 /// Returns true if \p I is known to be scalar after vectorization.
isScalarAfterVectorization(Instruction * I,unsigned VF) const1078 bool isScalarAfterVectorization(Instruction *I, unsigned VF) const {
1079 if (VF == 1)
1080 return true;
1081
1082 // Cost model is not run in the VPlan-native path - return conservative
1083 // result until this changes.
1084 if (EnableVPlanNativePath)
1085 return false;
1086
1087 auto ScalarsPerVF = Scalars.find(VF);
1088 assert(ScalarsPerVF != Scalars.end() &&
1089 "Scalar values are not calculated for VF");
1090 return ScalarsPerVF->second.count(I);
1091 }
1092
1093 /// \returns True if instruction \p I can be truncated to a smaller bitwidth
1094 /// for vectorization factor \p VF.
canTruncateToMinimalBitwidth(Instruction * I,unsigned VF) const1095 bool canTruncateToMinimalBitwidth(Instruction *I, unsigned VF) const {
1096 return VF > 1 && MinBWs.find(I) != MinBWs.end() &&
1097 !isProfitableToScalarize(I, VF) &&
1098 !isScalarAfterVectorization(I, VF);
1099 }
1100
1101 /// Decision that was taken during cost calculation for memory instruction.
1102 enum InstWidening {
1103 CM_Unknown,
1104 CM_Widen, // For consecutive accesses with stride +1.
1105 CM_Widen_Reverse, // For consecutive accesses with stride -1.
1106 CM_Interleave,
1107 CM_GatherScatter,
1108 CM_Scalarize
1109 };
1110
1111 /// Save vectorization decision \p W and \p Cost taken by the cost model for
1112 /// instruction \p I and vector width \p VF.
setWideningDecision(Instruction * I,unsigned VF,InstWidening W,unsigned Cost)1113 void setWideningDecision(Instruction *I, unsigned VF, InstWidening W,
1114 unsigned Cost) {
1115 assert(VF >= 2 && "Expected VF >=2");
1116 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost);
1117 }
1118
1119 /// Save vectorization decision \p W and \p Cost taken by the cost model for
1120 /// interleaving group \p Grp and vector width \p VF.
setWideningDecision(const InterleaveGroup<Instruction> * Grp,unsigned VF,InstWidening W,unsigned Cost)1121 void setWideningDecision(const InterleaveGroup<Instruction> *Grp, unsigned VF,
1122 InstWidening W, unsigned Cost) {
1123 assert(VF >= 2 && "Expected VF >=2");
1124 /// Broadcast this decicion to all instructions inside the group.
1125 /// But the cost will be assigned to one instruction only.
1126 for (unsigned i = 0; i < Grp->getFactor(); ++i) {
1127 if (auto *I = Grp->getMember(i)) {
1128 if (Grp->getInsertPos() == I)
1129 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost);
1130 else
1131 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, 0);
1132 }
1133 }
1134 }
1135
1136 /// Return the cost model decision for the given instruction \p I and vector
1137 /// width \p VF. Return CM_Unknown if this instruction did not pass
1138 /// through the cost modeling.
getWideningDecision(Instruction * I,unsigned VF)1139 InstWidening getWideningDecision(Instruction *I, unsigned VF) {
1140 assert(VF >= 2 && "Expected VF >=2");
1141
1142 // Cost model is not run in the VPlan-native path - return conservative
1143 // result until this changes.
1144 if (EnableVPlanNativePath)
1145 return CM_GatherScatter;
1146
1147 std::pair<Instruction *, unsigned> InstOnVF = std::make_pair(I, VF);
1148 auto Itr = WideningDecisions.find(InstOnVF);
1149 if (Itr == WideningDecisions.end())
1150 return CM_Unknown;
1151 return Itr->second.first;
1152 }
1153
1154 /// Return the vectorization cost for the given instruction \p I and vector
1155 /// width \p VF.
getWideningCost(Instruction * I,unsigned VF)1156 unsigned getWideningCost(Instruction *I, unsigned VF) {
1157 assert(VF >= 2 && "Expected VF >=2");
1158 std::pair<Instruction *, unsigned> InstOnVF = std::make_pair(I, VF);
1159 assert(WideningDecisions.find(InstOnVF) != WideningDecisions.end() &&
1160 "The cost is not calculated");
1161 return WideningDecisions[InstOnVF].second;
1162 }
1163
1164 /// Return True if instruction \p I is an optimizable truncate whose operand
1165 /// is an induction variable. Such a truncate will be removed by adding a new
1166 /// induction variable with the destination type.
isOptimizableIVTruncate(Instruction * I,unsigned VF)1167 bool isOptimizableIVTruncate(Instruction *I, unsigned VF) {
1168 // If the instruction is not a truncate, return false.
1169 auto *Trunc = dyn_cast<TruncInst>(I);
1170 if (!Trunc)
1171 return false;
1172
1173 // Get the source and destination types of the truncate.
1174 Type *SrcTy = ToVectorTy(cast<CastInst>(I)->getSrcTy(), VF);
1175 Type *DestTy = ToVectorTy(cast<CastInst>(I)->getDestTy(), VF);
1176
1177 // If the truncate is free for the given types, return false. Replacing a
1178 // free truncate with an induction variable would add an induction variable
1179 // update instruction to each iteration of the loop. We exclude from this
1180 // check the primary induction variable since it will need an update
1181 // instruction regardless.
1182 Value *Op = Trunc->getOperand(0);
1183 if (Op != Legal->getPrimaryInduction() && TTI.isTruncateFree(SrcTy, DestTy))
1184 return false;
1185
1186 // If the truncated value is not an induction variable, return false.
1187 return Legal->isInductionPhi(Op);
1188 }
1189
1190 /// Collects the instructions to scalarize for each predicated instruction in
1191 /// the loop.
1192 void collectInstsToScalarize(unsigned VF);
1193
1194 /// Collect Uniform and Scalar values for the given \p VF.
1195 /// The sets depend on CM decision for Load/Store instructions
1196 /// that may be vectorized as interleave, gather-scatter or scalarized.
collectUniformsAndScalars(unsigned VF)1197 void collectUniformsAndScalars(unsigned VF) {
1198 // Do the analysis once.
1199 if (VF == 1 || Uniforms.find(VF) != Uniforms.end())
1200 return;
1201 setCostBasedWideningDecision(VF);
1202 collectLoopUniforms(VF);
1203 collectLoopScalars(VF);
1204 }
1205
1206 /// Returns true if the target machine supports masked store operation
1207 /// for the given \p DataType and kind of access to \p Ptr.
isLegalMaskedStore(Type * DataType,Value * Ptr,Align Alignment)1208 bool isLegalMaskedStore(Type *DataType, Value *Ptr, Align Alignment) {
1209 return Legal->isConsecutivePtr(Ptr) &&
1210 TTI.isLegalMaskedStore(DataType, Alignment);
1211 }
1212
1213 /// Returns true if the target machine supports masked load operation
1214 /// for the given \p DataType and kind of access to \p Ptr.
isLegalMaskedLoad(Type * DataType,Value * Ptr,Align Alignment)1215 bool isLegalMaskedLoad(Type *DataType, Value *Ptr, Align Alignment) {
1216 return Legal->isConsecutivePtr(Ptr) &&
1217 TTI.isLegalMaskedLoad(DataType, Alignment);
1218 }
1219
1220 /// Returns true if the target machine supports masked scatter operation
1221 /// for the given \p DataType.
isLegalMaskedScatter(Type * DataType,Align Alignment)1222 bool isLegalMaskedScatter(Type *DataType, Align Alignment) {
1223 return TTI.isLegalMaskedScatter(DataType, Alignment);
1224 }
1225
1226 /// Returns true if the target machine supports masked gather operation
1227 /// for the given \p DataType.
isLegalMaskedGather(Type * DataType,Align Alignment)1228 bool isLegalMaskedGather(Type *DataType, Align Alignment) {
1229 return TTI.isLegalMaskedGather(DataType, Alignment);
1230 }
1231
1232 /// Returns true if the target machine can represent \p V as a masked gather
1233 /// or scatter operation.
isLegalGatherOrScatter(Value * V)1234 bool isLegalGatherOrScatter(Value *V) {
1235 bool LI = isa<LoadInst>(V);
1236 bool SI = isa<StoreInst>(V);
1237 if (!LI && !SI)
1238 return false;
1239 auto *Ty = getMemInstValueType(V);
1240 Align Align = getLoadStoreAlignment(V);
1241 return (LI && isLegalMaskedGather(Ty, Align)) ||
1242 (SI && isLegalMaskedScatter(Ty, Align));
1243 }
1244
1245 /// Returns true if \p I is an instruction that will be scalarized with
1246 /// predication. Such instructions include conditional stores and
1247 /// instructions that may divide by zero.
1248 /// If a non-zero VF has been calculated, we check if I will be scalarized
1249 /// predication for that VF.
1250 bool isScalarWithPredication(Instruction *I, unsigned VF = 1);
1251
1252 // Returns true if \p I is an instruction that will be predicated either
1253 // through scalar predication or masked load/store or masked gather/scatter.
1254 // Superset of instructions that return true for isScalarWithPredication.
isPredicatedInst(Instruction * I)1255 bool isPredicatedInst(Instruction *I) {
1256 if (!blockNeedsPredication(I->getParent()))
1257 return false;
1258 // Loads and stores that need some form of masked operation are predicated
1259 // instructions.
1260 if (isa<LoadInst>(I) || isa<StoreInst>(I))
1261 return Legal->isMaskRequired(I);
1262 return isScalarWithPredication(I);
1263 }
1264
1265 /// Returns true if \p I is a memory instruction with consecutive memory
1266 /// access that can be widened.
1267 bool memoryInstructionCanBeWidened(Instruction *I, unsigned VF = 1);
1268
1269 /// Returns true if \p I is a memory instruction in an interleaved-group
1270 /// of memory accesses that can be vectorized with wide vector loads/stores
1271 /// and shuffles.
1272 bool interleavedAccessCanBeWidened(Instruction *I, unsigned VF = 1);
1273
1274 /// Check if \p Instr belongs to any interleaved access group.
isAccessInterleaved(Instruction * Instr)1275 bool isAccessInterleaved(Instruction *Instr) {
1276 return InterleaveInfo.isInterleaved(Instr);
1277 }
1278
1279 /// Get the interleaved access group that \p Instr belongs to.
1280 const InterleaveGroup<Instruction> *
getInterleavedAccessGroup(Instruction * Instr)1281 getInterleavedAccessGroup(Instruction *Instr) {
1282 return InterleaveInfo.getInterleaveGroup(Instr);
1283 }
1284
1285 /// Returns true if an interleaved group requires a scalar iteration
1286 /// to handle accesses with gaps, and there is nothing preventing us from
1287 /// creating a scalar epilogue.
requiresScalarEpilogue() const1288 bool requiresScalarEpilogue() const {
1289 return isScalarEpilogueAllowed() && InterleaveInfo.requiresScalarEpilogue();
1290 }
1291
1292 /// Returns true if a scalar epilogue is not allowed due to optsize or a
1293 /// loop hint annotation.
isScalarEpilogueAllowed() const1294 bool isScalarEpilogueAllowed() const {
1295 return ScalarEpilogueStatus == CM_ScalarEpilogueAllowed;
1296 }
1297
1298 /// Returns true if all loop blocks should be masked to fold tail loop.
foldTailByMasking() const1299 bool foldTailByMasking() const { return FoldTailByMasking; }
1300
blockNeedsPredication(BasicBlock * BB)1301 bool blockNeedsPredication(BasicBlock *BB) {
1302 return foldTailByMasking() || Legal->blockNeedsPredication(BB);
1303 }
1304
1305 /// Estimate cost of an intrinsic call instruction CI if it were vectorized
1306 /// with factor VF. Return the cost of the instruction, including
1307 /// scalarization overhead if it's needed.
1308 unsigned getVectorIntrinsicCost(CallInst *CI, unsigned VF);
1309
1310 /// Estimate cost of a call instruction CI if it were vectorized with factor
1311 /// VF. Return the cost of the instruction, including scalarization overhead
1312 /// if it's needed. The flag NeedToScalarize shows if the call needs to be
1313 /// scalarized -
1314 /// i.e. either vector version isn't available, or is too expensive.
1315 unsigned getVectorCallCost(CallInst *CI, unsigned VF, bool &NeedToScalarize);
1316
1317 /// Invalidates decisions already taken by the cost model.
invalidateCostModelingDecisions()1318 void invalidateCostModelingDecisions() {
1319 WideningDecisions.clear();
1320 Uniforms.clear();
1321 Scalars.clear();
1322 }
1323
1324 private:
1325 unsigned NumPredStores = 0;
1326
1327 /// \return An upper bound for the vectorization factor, a power-of-2 larger
1328 /// than zero. One is returned if vectorization should best be avoided due
1329 /// to cost.
1330 unsigned computeFeasibleMaxVF(unsigned ConstTripCount);
1331
1332 /// The vectorization cost is a combination of the cost itself and a boolean
1333 /// indicating whether any of the contributing operations will actually
1334 /// operate on
1335 /// vector values after type legalization in the backend. If this latter value
1336 /// is
1337 /// false, then all operations will be scalarized (i.e. no vectorization has
1338 /// actually taken place).
1339 using VectorizationCostTy = std::pair<unsigned, bool>;
1340
1341 /// Returns the expected execution cost. The unit of the cost does
1342 /// not matter because we use the 'cost' units to compare different
1343 /// vector widths. The cost that is returned is *not* normalized by
1344 /// the factor width.
1345 VectorizationCostTy expectedCost(unsigned VF);
1346
1347 /// Returns the execution time cost of an instruction for a given vector
1348 /// width. Vector width of one means scalar.
1349 VectorizationCostTy getInstructionCost(Instruction *I, unsigned VF);
1350
1351 /// The cost-computation logic from getInstructionCost which provides
1352 /// the vector type as an output parameter.
1353 unsigned getInstructionCost(Instruction *I, unsigned VF, Type *&VectorTy);
1354
1355 /// Calculate vectorization cost of memory instruction \p I.
1356 unsigned getMemoryInstructionCost(Instruction *I, unsigned VF);
1357
1358 /// The cost computation for scalarized memory instruction.
1359 unsigned getMemInstScalarizationCost(Instruction *I, unsigned VF);
1360
1361 /// The cost computation for interleaving group of memory instructions.
1362 unsigned getInterleaveGroupCost(Instruction *I, unsigned VF);
1363
1364 /// The cost computation for Gather/Scatter instruction.
1365 unsigned getGatherScatterCost(Instruction *I, unsigned VF);
1366
1367 /// The cost computation for widening instruction \p I with consecutive
1368 /// memory access.
1369 unsigned getConsecutiveMemOpCost(Instruction *I, unsigned VF);
1370
1371 /// The cost calculation for Load/Store instruction \p I with uniform pointer -
1372 /// Load: scalar load + broadcast.
1373 /// Store: scalar store + (loop invariant value stored? 0 : extract of last
1374 /// element)
1375 unsigned getUniformMemOpCost(Instruction *I, unsigned VF);
1376
1377 /// Estimate the overhead of scalarizing an instruction. This is a
1378 /// convenience wrapper for the type-based getScalarizationOverhead API.
1379 unsigned getScalarizationOverhead(Instruction *I, unsigned VF);
1380
1381 /// Returns whether the instruction is a load or store and will be a emitted
1382 /// as a vector operation.
1383 bool isConsecutiveLoadOrStore(Instruction *I);
1384
1385 /// Returns true if an artificially high cost for emulated masked memrefs
1386 /// should be used.
1387 bool useEmulatedMaskMemRefHack(Instruction *I);
1388
1389 /// Map of scalar integer values to the smallest bitwidth they can be legally
1390 /// represented as. The vector equivalents of these values should be truncated
1391 /// to this type.
1392 MapVector<Instruction *, uint64_t> MinBWs;
1393
1394 /// A type representing the costs for instructions if they were to be
1395 /// scalarized rather than vectorized. The entries are Instruction-Cost
1396 /// pairs.
1397 using ScalarCostsTy = DenseMap<Instruction *, unsigned>;
1398
1399 /// A set containing all BasicBlocks that are known to present after
1400 /// vectorization as a predicated block.
1401 SmallPtrSet<BasicBlock *, 4> PredicatedBBsAfterVectorization;
1402
1403 /// Records whether it is allowed to have the original scalar loop execute at
1404 /// least once. This may be needed as a fallback loop in case runtime
1405 /// aliasing/dependence checks fail, or to handle the tail/remainder
1406 /// iterations when the trip count is unknown or doesn't divide by the VF,
1407 /// or as a peel-loop to handle gaps in interleave-groups.
1408 /// Under optsize and when the trip count is very small we don't allow any
1409 /// iterations to execute in the scalar loop.
1410 ScalarEpilogueLowering ScalarEpilogueStatus = CM_ScalarEpilogueAllowed;
1411
1412 /// All blocks of loop are to be masked to fold tail of scalar iterations.
1413 bool FoldTailByMasking = false;
1414
1415 /// A map holding scalar costs for different vectorization factors. The
1416 /// presence of a cost for an instruction in the mapping indicates that the
1417 /// instruction will be scalarized when vectorizing with the associated
1418 /// vectorization factor. The entries are VF-ScalarCostTy pairs.
1419 DenseMap<unsigned, ScalarCostsTy> InstsToScalarize;
1420
1421 /// Holds the instructions known to be uniform after vectorization.
1422 /// The data is collected per VF.
1423 DenseMap<unsigned, SmallPtrSet<Instruction *, 4>> Uniforms;
1424
1425 /// Holds the instructions known to be scalar after vectorization.
1426 /// The data is collected per VF.
1427 DenseMap<unsigned, SmallPtrSet<Instruction *, 4>> Scalars;
1428
1429 /// Holds the instructions (address computations) that are forced to be
1430 /// scalarized.
1431 DenseMap<unsigned, SmallPtrSet<Instruction *, 4>> ForcedScalars;
1432
1433 /// Returns the expected difference in cost from scalarizing the expression
1434 /// feeding a predicated instruction \p PredInst. The instructions to
1435 /// scalarize and their scalar costs are collected in \p ScalarCosts. A
1436 /// non-negative return value implies the expression will be scalarized.
1437 /// Currently, only single-use chains are considered for scalarization.
1438 int computePredInstDiscount(Instruction *PredInst, ScalarCostsTy &ScalarCosts,
1439 unsigned VF);
1440
1441 /// Collect the instructions that are uniform after vectorization. An
1442 /// instruction is uniform if we represent it with a single scalar value in
1443 /// the vectorized loop corresponding to each vector iteration. Examples of
1444 /// uniform instructions include pointer operands of consecutive or
1445 /// interleaved memory accesses. Note that although uniformity implies an
1446 /// instruction will be scalar, the reverse is not true. In general, a
1447 /// scalarized instruction will be represented by VF scalar values in the
1448 /// vectorized loop, each corresponding to an iteration of the original
1449 /// scalar loop.
1450 void collectLoopUniforms(unsigned VF);
1451
1452 /// Collect the instructions that are scalar after vectorization. An
1453 /// instruction is scalar if it is known to be uniform or will be scalarized
1454 /// during vectorization. Non-uniform scalarized instructions will be
1455 /// represented by VF values in the vectorized loop, each corresponding to an
1456 /// iteration of the original scalar loop.
1457 void collectLoopScalars(unsigned VF);
1458
1459 /// Keeps cost model vectorization decision and cost for instructions.
1460 /// Right now it is used for memory instructions only.
1461 using DecisionList = DenseMap<std::pair<Instruction *, unsigned>,
1462 std::pair<InstWidening, unsigned>>;
1463
1464 DecisionList WideningDecisions;
1465
1466 /// Returns true if \p V is expected to be vectorized and it needs to be
1467 /// extracted.
needsExtract(Value * V,unsigned VF) const1468 bool needsExtract(Value *V, unsigned VF) const {
1469 Instruction *I = dyn_cast<Instruction>(V);
1470 if (VF == 1 || !I || !TheLoop->contains(I) || TheLoop->isLoopInvariant(I))
1471 return false;
1472
1473 // Assume we can vectorize V (and hence we need extraction) if the
1474 // scalars are not computed yet. This can happen, because it is called
1475 // via getScalarizationOverhead from setCostBasedWideningDecision, before
1476 // the scalars are collected. That should be a safe assumption in most
1477 // cases, because we check if the operands have vectorizable types
1478 // beforehand in LoopVectorizationLegality.
1479 return Scalars.find(VF) == Scalars.end() ||
1480 !isScalarAfterVectorization(I, VF);
1481 };
1482
1483 /// Returns a range containing only operands needing to be extracted.
filterExtractingOperands(Instruction::op_range Ops,unsigned VF)1484 SmallVector<Value *, 4> filterExtractingOperands(Instruction::op_range Ops,
1485 unsigned VF) {
1486 return SmallVector<Value *, 4>(make_filter_range(
1487 Ops, [this, VF](Value *V) { return this->needsExtract(V, VF); }));
1488 }
1489
1490 public:
1491 /// The loop that we evaluate.
1492 Loop *TheLoop;
1493
1494 /// Predicated scalar evolution analysis.
1495 PredicatedScalarEvolution &PSE;
1496
1497 /// Loop Info analysis.
1498 LoopInfo *LI;
1499
1500 /// Vectorization legality.
1501 LoopVectorizationLegality *Legal;
1502
1503 /// Vector target information.
1504 const TargetTransformInfo &TTI;
1505
1506 /// Target Library Info.
1507 const TargetLibraryInfo *TLI;
1508
1509 /// Demanded bits analysis.
1510 DemandedBits *DB;
1511
1512 /// Assumption cache.
1513 AssumptionCache *AC;
1514
1515 /// Interface to emit optimization remarks.
1516 OptimizationRemarkEmitter *ORE;
1517
1518 const Function *TheFunction;
1519
1520 /// Loop Vectorize Hint.
1521 const LoopVectorizeHints *Hints;
1522
1523 /// The interleave access information contains groups of interleaved accesses
1524 /// with the same stride and close to each other.
1525 InterleavedAccessInfo &InterleaveInfo;
1526
1527 /// Values to ignore in the cost model.
1528 SmallPtrSet<const Value *, 16> ValuesToIgnore;
1529
1530 /// Values to ignore in the cost model when VF > 1.
1531 SmallPtrSet<const Value *, 16> VecValuesToIgnore;
1532 };
1533
1534 } // end namespace llvm
1535
1536 // Return true if \p OuterLp is an outer loop annotated with hints for explicit
1537 // vectorization. The loop needs to be annotated with #pragma omp simd
1538 // simdlen(#) or #pragma clang vectorize(enable) vectorize_width(#). If the
1539 // vector length information is not provided, vectorization is not considered
1540 // explicit. Interleave hints are not allowed either. These limitations will be
1541 // relaxed in the future.
1542 // Please, note that we are currently forced to abuse the pragma 'clang
1543 // vectorize' semantics. This pragma provides *auto-vectorization hints*
1544 // (i.e., LV must check that vectorization is legal) whereas pragma 'omp simd'
1545 // provides *explicit vectorization hints* (LV can bypass legal checks and
1546 // assume that vectorization is legal). However, both hints are implemented
1547 // using the same metadata (llvm.loop.vectorize, processed by
1548 // LoopVectorizeHints). This will be fixed in the future when the native IR
1549 // representation for pragma 'omp simd' is introduced.
isExplicitVecOuterLoop(Loop * OuterLp,OptimizationRemarkEmitter * ORE)1550 static bool isExplicitVecOuterLoop(Loop *OuterLp,
1551 OptimizationRemarkEmitter *ORE) {
1552 assert(!OuterLp->empty() && "This is not an outer loop");
1553 LoopVectorizeHints Hints(OuterLp, true /*DisableInterleaving*/, *ORE);
1554
1555 // Only outer loops with an explicit vectorization hint are supported.
1556 // Unannotated outer loops are ignored.
1557 if (Hints.getForce() == LoopVectorizeHints::FK_Undefined)
1558 return false;
1559
1560 Function *Fn = OuterLp->getHeader()->getParent();
1561 if (!Hints.allowVectorization(Fn, OuterLp,
1562 true /*VectorizeOnlyWhenForced*/)) {
1563 LLVM_DEBUG(dbgs() << "LV: Loop hints prevent outer loop vectorization.\n");
1564 return false;
1565 }
1566
1567 if (Hints.getInterleave() > 1) {
1568 // TODO: Interleave support is future work.
1569 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Interleave is not supported for "
1570 "outer loops.\n");
1571 Hints.emitRemarkWithHints();
1572 return false;
1573 }
1574
1575 return true;
1576 }
1577
collectSupportedLoops(Loop & L,LoopInfo * LI,OptimizationRemarkEmitter * ORE,SmallVectorImpl<Loop * > & V)1578 static void collectSupportedLoops(Loop &L, LoopInfo *LI,
1579 OptimizationRemarkEmitter *ORE,
1580 SmallVectorImpl<Loop *> &V) {
1581 // Collect inner loops and outer loops without irreducible control flow. For
1582 // now, only collect outer loops that have explicit vectorization hints. If we
1583 // are stress testing the VPlan H-CFG construction, we collect the outermost
1584 // loop of every loop nest.
1585 if (L.empty() || VPlanBuildStressTest ||
1586 (EnableVPlanNativePath && isExplicitVecOuterLoop(&L, ORE))) {
1587 LoopBlocksRPO RPOT(&L);
1588 RPOT.perform(LI);
1589 if (!containsIrreducibleCFG<const BasicBlock *>(RPOT, *LI)) {
1590 V.push_back(&L);
1591 // TODO: Collect inner loops inside marked outer loops in case
1592 // vectorization fails for the outer loop. Do not invoke
1593 // 'containsIrreducibleCFG' again for inner loops when the outer loop is
1594 // already known to be reducible. We can use an inherited attribute for
1595 // that.
1596 return;
1597 }
1598 }
1599 for (Loop *InnerL : L)
1600 collectSupportedLoops(*InnerL, LI, ORE, V);
1601 }
1602
1603 namespace {
1604
1605 /// The LoopVectorize Pass.
1606 struct LoopVectorize : public FunctionPass {
1607 /// Pass identification, replacement for typeid
1608 static char ID;
1609
1610 LoopVectorizePass Impl;
1611
LoopVectorize__anon7809adbb0211::LoopVectorize1612 explicit LoopVectorize(bool InterleaveOnlyWhenForced = false,
1613 bool VectorizeOnlyWhenForced = false)
1614 : FunctionPass(ID),
1615 Impl({InterleaveOnlyWhenForced, VectorizeOnlyWhenForced}) {
1616 initializeLoopVectorizePass(*PassRegistry::getPassRegistry());
1617 }
1618
runOnFunction__anon7809adbb0211::LoopVectorize1619 bool runOnFunction(Function &F) override {
1620 if (skipFunction(F))
1621 return false;
1622
1623 auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
1624 auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
1625 auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
1626 auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
1627 auto *BFI = &getAnalysis<BlockFrequencyInfoWrapperPass>().getBFI();
1628 auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
1629 auto *TLI = TLIP ? &TLIP->getTLI(F) : nullptr;
1630 auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
1631 auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
1632 auto *LAA = &getAnalysis<LoopAccessLegacyAnalysis>();
1633 auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits();
1634 auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE();
1635 auto *PSI = &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI();
1636
1637 std::function<const LoopAccessInfo &(Loop &)> GetLAA =
1638 [&](Loop &L) -> const LoopAccessInfo & { return LAA->getInfo(&L); };
1639
1640 return Impl.runImpl(F, *SE, *LI, *TTI, *DT, *BFI, TLI, *DB, *AA, *AC,
1641 GetLAA, *ORE, PSI).MadeAnyChange;
1642 }
1643
getAnalysisUsage__anon7809adbb0211::LoopVectorize1644 void getAnalysisUsage(AnalysisUsage &AU) const override {
1645 AU.addRequired<AssumptionCacheTracker>();
1646 AU.addRequired<BlockFrequencyInfoWrapperPass>();
1647 AU.addRequired<DominatorTreeWrapperPass>();
1648 AU.addRequired<LoopInfoWrapperPass>();
1649 AU.addRequired<ScalarEvolutionWrapperPass>();
1650 AU.addRequired<TargetTransformInfoWrapperPass>();
1651 AU.addRequired<AAResultsWrapperPass>();
1652 AU.addRequired<LoopAccessLegacyAnalysis>();
1653 AU.addRequired<DemandedBitsWrapperPass>();
1654 AU.addRequired<OptimizationRemarkEmitterWrapperPass>();
1655 AU.addRequired<InjectTLIMappingsLegacy>();
1656
1657 // We currently do not preserve loopinfo/dominator analyses with outer loop
1658 // vectorization. Until this is addressed, mark these analyses as preserved
1659 // only for non-VPlan-native path.
1660 // TODO: Preserve Loop and Dominator analyses for VPlan-native path.
1661 if (!EnableVPlanNativePath) {
1662 AU.addPreserved<LoopInfoWrapperPass>();
1663 AU.addPreserved<DominatorTreeWrapperPass>();
1664 }
1665
1666 AU.addPreserved<BasicAAWrapperPass>();
1667 AU.addPreserved<GlobalsAAWrapperPass>();
1668 AU.addRequired<ProfileSummaryInfoWrapperPass>();
1669 }
1670 };
1671
1672 } // end anonymous namespace
1673
1674 //===----------------------------------------------------------------------===//
1675 // Implementation of LoopVectorizationLegality, InnerLoopVectorizer and
1676 // LoopVectorizationCostModel and LoopVectorizationPlanner.
1677 //===----------------------------------------------------------------------===//
1678
getBroadcastInstrs(Value * V)1679 Value *InnerLoopVectorizer::getBroadcastInstrs(Value *V) {
1680 // We need to place the broadcast of invariant variables outside the loop,
1681 // but only if it's proven safe to do so. Else, broadcast will be inside
1682 // vector loop body.
1683 Instruction *Instr = dyn_cast<Instruction>(V);
1684 bool SafeToHoist = OrigLoop->isLoopInvariant(V) &&
1685 (!Instr ||
1686 DT->dominates(Instr->getParent(), LoopVectorPreHeader));
1687 // Place the code for broadcasting invariant variables in the new preheader.
1688 IRBuilder<>::InsertPointGuard Guard(Builder);
1689 if (SafeToHoist)
1690 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
1691
1692 // Broadcast the scalar into all locations in the vector.
1693 Value *Shuf = Builder.CreateVectorSplat(VF, V, "broadcast");
1694
1695 return Shuf;
1696 }
1697
createVectorIntOrFpInductionPHI(const InductionDescriptor & II,Value * Step,Instruction * EntryVal)1698 void InnerLoopVectorizer::createVectorIntOrFpInductionPHI(
1699 const InductionDescriptor &II, Value *Step, Instruction *EntryVal) {
1700 assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) &&
1701 "Expected either an induction phi-node or a truncate of it!");
1702 Value *Start = II.getStartValue();
1703
1704 // Construct the initial value of the vector IV in the vector loop preheader
1705 auto CurrIP = Builder.saveIP();
1706 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
1707 if (isa<TruncInst>(EntryVal)) {
1708 assert(Start->getType()->isIntegerTy() &&
1709 "Truncation requires an integer type");
1710 auto *TruncType = cast<IntegerType>(EntryVal->getType());
1711 Step = Builder.CreateTrunc(Step, TruncType);
1712 Start = Builder.CreateCast(Instruction::Trunc, Start, TruncType);
1713 }
1714 Value *SplatStart = Builder.CreateVectorSplat(VF, Start);
1715 Value *SteppedStart =
1716 getStepVector(SplatStart, 0, Step, II.getInductionOpcode());
1717
1718 // We create vector phi nodes for both integer and floating-point induction
1719 // variables. Here, we determine the kind of arithmetic we will perform.
1720 Instruction::BinaryOps AddOp;
1721 Instruction::BinaryOps MulOp;
1722 if (Step->getType()->isIntegerTy()) {
1723 AddOp = Instruction::Add;
1724 MulOp = Instruction::Mul;
1725 } else {
1726 AddOp = II.getInductionOpcode();
1727 MulOp = Instruction::FMul;
1728 }
1729
1730 // Multiply the vectorization factor by the step using integer or
1731 // floating-point arithmetic as appropriate.
1732 Value *ConstVF = getSignedIntOrFpConstant(Step->getType(), VF);
1733 Value *Mul = addFastMathFlag(Builder.CreateBinOp(MulOp, Step, ConstVF));
1734
1735 // Create a vector splat to use in the induction update.
1736 //
1737 // FIXME: If the step is non-constant, we create the vector splat with
1738 // IRBuilder. IRBuilder can constant-fold the multiply, but it doesn't
1739 // handle a constant vector splat.
1740 Value *SplatVF =
1741 isa<Constant>(Mul)
1742 ? ConstantVector::getSplat({VF, false}, cast<Constant>(Mul))
1743 : Builder.CreateVectorSplat(VF, Mul);
1744 Builder.restoreIP(CurrIP);
1745
1746 // We may need to add the step a number of times, depending on the unroll
1747 // factor. The last of those goes into the PHI.
1748 PHINode *VecInd = PHINode::Create(SteppedStart->getType(), 2, "vec.ind",
1749 &*LoopVectorBody->getFirstInsertionPt());
1750 VecInd->setDebugLoc(EntryVal->getDebugLoc());
1751 Instruction *LastInduction = VecInd;
1752 for (unsigned Part = 0; Part < UF; ++Part) {
1753 VectorLoopValueMap.setVectorValue(EntryVal, Part, LastInduction);
1754
1755 if (isa<TruncInst>(EntryVal))
1756 addMetadata(LastInduction, EntryVal);
1757 recordVectorLoopValueForInductionCast(II, EntryVal, LastInduction, Part);
1758
1759 LastInduction = cast<Instruction>(addFastMathFlag(
1760 Builder.CreateBinOp(AddOp, LastInduction, SplatVF, "step.add")));
1761 LastInduction->setDebugLoc(EntryVal->getDebugLoc());
1762 }
1763
1764 // Move the last step to the end of the latch block. This ensures consistent
1765 // placement of all induction updates.
1766 auto *LoopVectorLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch();
1767 auto *Br = cast<BranchInst>(LoopVectorLatch->getTerminator());
1768 auto *ICmp = cast<Instruction>(Br->getCondition());
1769 LastInduction->moveBefore(ICmp);
1770 LastInduction->setName("vec.ind.next");
1771
1772 VecInd->addIncoming(SteppedStart, LoopVectorPreHeader);
1773 VecInd->addIncoming(LastInduction, LoopVectorLatch);
1774 }
1775
shouldScalarizeInstruction(Instruction * I) const1776 bool InnerLoopVectorizer::shouldScalarizeInstruction(Instruction *I) const {
1777 return Cost->isScalarAfterVectorization(I, VF) ||
1778 Cost->isProfitableToScalarize(I, VF);
1779 }
1780
needsScalarInduction(Instruction * IV) const1781 bool InnerLoopVectorizer::needsScalarInduction(Instruction *IV) const {
1782 if (shouldScalarizeInstruction(IV))
1783 return true;
1784 auto isScalarInst = [&](User *U) -> bool {
1785 auto *I = cast<Instruction>(U);
1786 return (OrigLoop->contains(I) && shouldScalarizeInstruction(I));
1787 };
1788 return llvm::any_of(IV->users(), isScalarInst);
1789 }
1790
recordVectorLoopValueForInductionCast(const InductionDescriptor & ID,const Instruction * EntryVal,Value * VectorLoopVal,unsigned Part,unsigned Lane)1791 void InnerLoopVectorizer::recordVectorLoopValueForInductionCast(
1792 const InductionDescriptor &ID, const Instruction *EntryVal,
1793 Value *VectorLoopVal, unsigned Part, unsigned Lane) {
1794 assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) &&
1795 "Expected either an induction phi-node or a truncate of it!");
1796
1797 // This induction variable is not the phi from the original loop but the
1798 // newly-created IV based on the proof that casted Phi is equal to the
1799 // uncasted Phi in the vectorized loop (under a runtime guard possibly). It
1800 // re-uses the same InductionDescriptor that original IV uses but we don't
1801 // have to do any recording in this case - that is done when original IV is
1802 // processed.
1803 if (isa<TruncInst>(EntryVal))
1804 return;
1805
1806 const SmallVectorImpl<Instruction *> &Casts = ID.getCastInsts();
1807 if (Casts.empty())
1808 return;
1809 // Only the first Cast instruction in the Casts vector is of interest.
1810 // The rest of the Casts (if exist) have no uses outside the
1811 // induction update chain itself.
1812 Instruction *CastInst = *Casts.begin();
1813 if (Lane < UINT_MAX)
1814 VectorLoopValueMap.setScalarValue(CastInst, {Part, Lane}, VectorLoopVal);
1815 else
1816 VectorLoopValueMap.setVectorValue(CastInst, Part, VectorLoopVal);
1817 }
1818
widenIntOrFpInduction(PHINode * IV,TruncInst * Trunc)1819 void InnerLoopVectorizer::widenIntOrFpInduction(PHINode *IV, TruncInst *Trunc) {
1820 assert((IV->getType()->isIntegerTy() || IV != OldInduction) &&
1821 "Primary induction variable must have an integer type");
1822
1823 auto II = Legal->getInductionVars().find(IV);
1824 assert(II != Legal->getInductionVars().end() && "IV is not an induction");
1825
1826 auto ID = II->second;
1827 assert(IV->getType() == ID.getStartValue()->getType() && "Types must match");
1828
1829 // The value from the original loop to which we are mapping the new induction
1830 // variable.
1831 Instruction *EntryVal = Trunc ? cast<Instruction>(Trunc) : IV;
1832
1833 auto &DL = OrigLoop->getHeader()->getModule()->getDataLayout();
1834
1835 // Generate code for the induction step. Note that induction steps are
1836 // required to be loop-invariant
1837 auto CreateStepValue = [&](const SCEV *Step) -> Value * {
1838 assert(PSE.getSE()->isLoopInvariant(Step, OrigLoop) &&
1839 "Induction step should be loop invariant");
1840 if (PSE.getSE()->isSCEVable(IV->getType())) {
1841 SCEVExpander Exp(*PSE.getSE(), DL, "induction");
1842 return Exp.expandCodeFor(Step, Step->getType(),
1843 LoopVectorPreHeader->getTerminator());
1844 }
1845 return cast<SCEVUnknown>(Step)->getValue();
1846 };
1847
1848 // The scalar value to broadcast. This is derived from the canonical
1849 // induction variable. If a truncation type is given, truncate the canonical
1850 // induction variable and step. Otherwise, derive these values from the
1851 // induction descriptor.
1852 auto CreateScalarIV = [&](Value *&Step) -> Value * {
1853 Value *ScalarIV = Induction;
1854 if (IV != OldInduction) {
1855 ScalarIV = IV->getType()->isIntegerTy()
1856 ? Builder.CreateSExtOrTrunc(Induction, IV->getType())
1857 : Builder.CreateCast(Instruction::SIToFP, Induction,
1858 IV->getType());
1859 ScalarIV = emitTransformedIndex(Builder, ScalarIV, PSE.getSE(), DL, ID);
1860 ScalarIV->setName("offset.idx");
1861 }
1862 if (Trunc) {
1863 auto *TruncType = cast<IntegerType>(Trunc->getType());
1864 assert(Step->getType()->isIntegerTy() &&
1865 "Truncation requires an integer step");
1866 ScalarIV = Builder.CreateTrunc(ScalarIV, TruncType);
1867 Step = Builder.CreateTrunc(Step, TruncType);
1868 }
1869 return ScalarIV;
1870 };
1871
1872 // Create the vector values from the scalar IV, in the absence of creating a
1873 // vector IV.
1874 auto CreateSplatIV = [&](Value *ScalarIV, Value *Step) {
1875 Value *Broadcasted = getBroadcastInstrs(ScalarIV);
1876 for (unsigned Part = 0; Part < UF; ++Part) {
1877 Value *EntryPart =
1878 getStepVector(Broadcasted, VF * Part, Step, ID.getInductionOpcode());
1879 VectorLoopValueMap.setVectorValue(EntryVal, Part, EntryPart);
1880 if (Trunc)
1881 addMetadata(EntryPart, Trunc);
1882 recordVectorLoopValueForInductionCast(ID, EntryVal, EntryPart, Part);
1883 }
1884 };
1885
1886 // Now do the actual transformations, and start with creating the step value.
1887 Value *Step = CreateStepValue(ID.getStep());
1888 if (VF <= 1) {
1889 Value *ScalarIV = CreateScalarIV(Step);
1890 CreateSplatIV(ScalarIV, Step);
1891 return;
1892 }
1893
1894 // Determine if we want a scalar version of the induction variable. This is
1895 // true if the induction variable itself is not widened, or if it has at
1896 // least one user in the loop that is not widened.
1897 auto NeedsScalarIV = needsScalarInduction(EntryVal);
1898 if (!NeedsScalarIV) {
1899 createVectorIntOrFpInductionPHI(ID, Step, EntryVal);
1900 return;
1901 }
1902
1903 // Try to create a new independent vector induction variable. If we can't
1904 // create the phi node, we will splat the scalar induction variable in each
1905 // loop iteration.
1906 if (!shouldScalarizeInstruction(EntryVal)) {
1907 createVectorIntOrFpInductionPHI(ID, Step, EntryVal);
1908 Value *ScalarIV = CreateScalarIV(Step);
1909 // Create scalar steps that can be used by instructions we will later
1910 // scalarize. Note that the addition of the scalar steps will not increase
1911 // the number of instructions in the loop in the common case prior to
1912 // InstCombine. We will be trading one vector extract for each scalar step.
1913 buildScalarSteps(ScalarIV, Step, EntryVal, ID);
1914 return;
1915 }
1916
1917 // All IV users are scalar instructions, so only emit a scalar IV, not a
1918 // vectorised IV. Except when we tail-fold, then the splat IV feeds the
1919 // predicate used by the masked loads/stores.
1920 Value *ScalarIV = CreateScalarIV(Step);
1921 if (!Cost->isScalarEpilogueAllowed())
1922 CreateSplatIV(ScalarIV, Step);
1923 buildScalarSteps(ScalarIV, Step, EntryVal, ID);
1924 }
1925
getStepVector(Value * Val,int StartIdx,Value * Step,Instruction::BinaryOps BinOp)1926 Value *InnerLoopVectorizer::getStepVector(Value *Val, int StartIdx, Value *Step,
1927 Instruction::BinaryOps BinOp) {
1928 // Create and check the types.
1929 auto *ValVTy = cast<VectorType>(Val->getType());
1930 int VLen = ValVTy->getNumElements();
1931
1932 Type *STy = Val->getType()->getScalarType();
1933 assert((STy->isIntegerTy() || STy->isFloatingPointTy()) &&
1934 "Induction Step must be an integer or FP");
1935 assert(Step->getType() == STy && "Step has wrong type");
1936
1937 SmallVector<Constant *, 8> Indices;
1938
1939 if (STy->isIntegerTy()) {
1940 // Create a vector of consecutive numbers from zero to VF.
1941 for (int i = 0; i < VLen; ++i)
1942 Indices.push_back(ConstantInt::get(STy, StartIdx + i));
1943
1944 // Add the consecutive indices to the vector value.
1945 Constant *Cv = ConstantVector::get(Indices);
1946 assert(Cv->getType() == Val->getType() && "Invalid consecutive vec");
1947 Step = Builder.CreateVectorSplat(VLen, Step);
1948 assert(Step->getType() == Val->getType() && "Invalid step vec");
1949 // FIXME: The newly created binary instructions should contain nsw/nuw flags,
1950 // which can be found from the original scalar operations.
1951 Step = Builder.CreateMul(Cv, Step);
1952 return Builder.CreateAdd(Val, Step, "induction");
1953 }
1954
1955 // Floating point induction.
1956 assert((BinOp == Instruction::FAdd || BinOp == Instruction::FSub) &&
1957 "Binary Opcode should be specified for FP induction");
1958 // Create a vector of consecutive numbers from zero to VF.
1959 for (int i = 0; i < VLen; ++i)
1960 Indices.push_back(ConstantFP::get(STy, (double)(StartIdx + i)));
1961
1962 // Add the consecutive indices to the vector value.
1963 Constant *Cv = ConstantVector::get(Indices);
1964
1965 Step = Builder.CreateVectorSplat(VLen, Step);
1966
1967 // Floating point operations had to be 'fast' to enable the induction.
1968 FastMathFlags Flags;
1969 Flags.setFast();
1970
1971 Value *MulOp = Builder.CreateFMul(Cv, Step);
1972 if (isa<Instruction>(MulOp))
1973 // Have to check, MulOp may be a constant
1974 cast<Instruction>(MulOp)->setFastMathFlags(Flags);
1975
1976 Value *BOp = Builder.CreateBinOp(BinOp, Val, MulOp, "induction");
1977 if (isa<Instruction>(BOp))
1978 cast<Instruction>(BOp)->setFastMathFlags(Flags);
1979 return BOp;
1980 }
1981
buildScalarSteps(Value * ScalarIV,Value * Step,Instruction * EntryVal,const InductionDescriptor & ID)1982 void InnerLoopVectorizer::buildScalarSteps(Value *ScalarIV, Value *Step,
1983 Instruction *EntryVal,
1984 const InductionDescriptor &ID) {
1985 // We shouldn't have to build scalar steps if we aren't vectorizing.
1986 assert(VF > 1 && "VF should be greater than one");
1987
1988 // Get the value type and ensure it and the step have the same integer type.
1989 Type *ScalarIVTy = ScalarIV->getType()->getScalarType();
1990 assert(ScalarIVTy == Step->getType() &&
1991 "Val and Step should have the same type");
1992
1993 // We build scalar steps for both integer and floating-point induction
1994 // variables. Here, we determine the kind of arithmetic we will perform.
1995 Instruction::BinaryOps AddOp;
1996 Instruction::BinaryOps MulOp;
1997 if (ScalarIVTy->isIntegerTy()) {
1998 AddOp = Instruction::Add;
1999 MulOp = Instruction::Mul;
2000 } else {
2001 AddOp = ID.getInductionOpcode();
2002 MulOp = Instruction::FMul;
2003 }
2004
2005 // Determine the number of scalars we need to generate for each unroll
2006 // iteration. If EntryVal is uniform, we only need to generate the first
2007 // lane. Otherwise, we generate all VF values.
2008 unsigned Lanes =
2009 Cost->isUniformAfterVectorization(cast<Instruction>(EntryVal), VF) ? 1
2010 : VF;
2011 // Compute the scalar steps and save the results in VectorLoopValueMap.
2012 for (unsigned Part = 0; Part < UF; ++Part) {
2013 for (unsigned Lane = 0; Lane < Lanes; ++Lane) {
2014 auto *StartIdx = getSignedIntOrFpConstant(ScalarIVTy, VF * Part + Lane);
2015 auto *Mul = addFastMathFlag(Builder.CreateBinOp(MulOp, StartIdx, Step));
2016 auto *Add = addFastMathFlag(Builder.CreateBinOp(AddOp, ScalarIV, Mul));
2017 VectorLoopValueMap.setScalarValue(EntryVal, {Part, Lane}, Add);
2018 recordVectorLoopValueForInductionCast(ID, EntryVal, Add, Part, Lane);
2019 }
2020 }
2021 }
2022
getOrCreateVectorValue(Value * V,unsigned Part)2023 Value *InnerLoopVectorizer::getOrCreateVectorValue(Value *V, unsigned Part) {
2024 assert(V != Induction && "The new induction variable should not be used.");
2025 assert(!V->getType()->isVectorTy() && "Can't widen a vector");
2026 assert(!V->getType()->isVoidTy() && "Type does not produce a value");
2027
2028 // If we have a stride that is replaced by one, do it here. Defer this for
2029 // the VPlan-native path until we start running Legal checks in that path.
2030 if (!EnableVPlanNativePath && Legal->hasStride(V))
2031 V = ConstantInt::get(V->getType(), 1);
2032
2033 // If we have a vector mapped to this value, return it.
2034 if (VectorLoopValueMap.hasVectorValue(V, Part))
2035 return VectorLoopValueMap.getVectorValue(V, Part);
2036
2037 // If the value has not been vectorized, check if it has been scalarized
2038 // instead. If it has been scalarized, and we actually need the value in
2039 // vector form, we will construct the vector values on demand.
2040 if (VectorLoopValueMap.hasAnyScalarValue(V)) {
2041 Value *ScalarValue = VectorLoopValueMap.getScalarValue(V, {Part, 0});
2042
2043 // If we've scalarized a value, that value should be an instruction.
2044 auto *I = cast<Instruction>(V);
2045
2046 // If we aren't vectorizing, we can just copy the scalar map values over to
2047 // the vector map.
2048 if (VF == 1) {
2049 VectorLoopValueMap.setVectorValue(V, Part, ScalarValue);
2050 return ScalarValue;
2051 }
2052
2053 // Get the last scalar instruction we generated for V and Part. If the value
2054 // is known to be uniform after vectorization, this corresponds to lane zero
2055 // of the Part unroll iteration. Otherwise, the last instruction is the one
2056 // we created for the last vector lane of the Part unroll iteration.
2057 unsigned LastLane = Cost->isUniformAfterVectorization(I, VF) ? 0 : VF - 1;
2058 auto *LastInst = cast<Instruction>(
2059 VectorLoopValueMap.getScalarValue(V, {Part, LastLane}));
2060
2061 // Set the insert point after the last scalarized instruction. This ensures
2062 // the insertelement sequence will directly follow the scalar definitions.
2063 auto OldIP = Builder.saveIP();
2064 auto NewIP = std::next(BasicBlock::iterator(LastInst));
2065 Builder.SetInsertPoint(&*NewIP);
2066
2067 // However, if we are vectorizing, we need to construct the vector values.
2068 // If the value is known to be uniform after vectorization, we can just
2069 // broadcast the scalar value corresponding to lane zero for each unroll
2070 // iteration. Otherwise, we construct the vector values using insertelement
2071 // instructions. Since the resulting vectors are stored in
2072 // VectorLoopValueMap, we will only generate the insertelements once.
2073 Value *VectorValue = nullptr;
2074 if (Cost->isUniformAfterVectorization(I, VF)) {
2075 VectorValue = getBroadcastInstrs(ScalarValue);
2076 VectorLoopValueMap.setVectorValue(V, Part, VectorValue);
2077 } else {
2078 // Initialize packing with insertelements to start from undef.
2079 Value *Undef = UndefValue::get(FixedVectorType::get(V->getType(), VF));
2080 VectorLoopValueMap.setVectorValue(V, Part, Undef);
2081 for (unsigned Lane = 0; Lane < VF; ++Lane)
2082 packScalarIntoVectorValue(V, {Part, Lane});
2083 VectorValue = VectorLoopValueMap.getVectorValue(V, Part);
2084 }
2085 Builder.restoreIP(OldIP);
2086 return VectorValue;
2087 }
2088
2089 // If this scalar is unknown, assume that it is a constant or that it is
2090 // loop invariant. Broadcast V and save the value for future uses.
2091 Value *B = getBroadcastInstrs(V);
2092 VectorLoopValueMap.setVectorValue(V, Part, B);
2093 return B;
2094 }
2095
2096 Value *
getOrCreateScalarValue(Value * V,const VPIteration & Instance)2097 InnerLoopVectorizer::getOrCreateScalarValue(Value *V,
2098 const VPIteration &Instance) {
2099 // If the value is not an instruction contained in the loop, it should
2100 // already be scalar.
2101 if (OrigLoop->isLoopInvariant(V))
2102 return V;
2103
2104 assert(Instance.Lane > 0
2105 ? !Cost->isUniformAfterVectorization(cast<Instruction>(V), VF)
2106 : true && "Uniform values only have lane zero");
2107
2108 // If the value from the original loop has not been vectorized, it is
2109 // represented by UF x VF scalar values in the new loop. Return the requested
2110 // scalar value.
2111 if (VectorLoopValueMap.hasScalarValue(V, Instance))
2112 return VectorLoopValueMap.getScalarValue(V, Instance);
2113
2114 // If the value has not been scalarized, get its entry in VectorLoopValueMap
2115 // for the given unroll part. If this entry is not a vector type (i.e., the
2116 // vectorization factor is one), there is no need to generate an
2117 // extractelement instruction.
2118 auto *U = getOrCreateVectorValue(V, Instance.Part);
2119 if (!U->getType()->isVectorTy()) {
2120 assert(VF == 1 && "Value not scalarized has non-vector type");
2121 return U;
2122 }
2123
2124 // Otherwise, the value from the original loop has been vectorized and is
2125 // represented by UF vector values. Extract and return the requested scalar
2126 // value from the appropriate vector lane.
2127 return Builder.CreateExtractElement(U, Builder.getInt32(Instance.Lane));
2128 }
2129
packScalarIntoVectorValue(Value * V,const VPIteration & Instance)2130 void InnerLoopVectorizer::packScalarIntoVectorValue(
2131 Value *V, const VPIteration &Instance) {
2132 assert(V != Induction && "The new induction variable should not be used.");
2133 assert(!V->getType()->isVectorTy() && "Can't pack a vector");
2134 assert(!V->getType()->isVoidTy() && "Type does not produce a value");
2135
2136 Value *ScalarInst = VectorLoopValueMap.getScalarValue(V, Instance);
2137 Value *VectorValue = VectorLoopValueMap.getVectorValue(V, Instance.Part);
2138 VectorValue = Builder.CreateInsertElement(VectorValue, ScalarInst,
2139 Builder.getInt32(Instance.Lane));
2140 VectorLoopValueMap.resetVectorValue(V, Instance.Part, VectorValue);
2141 }
2142
reverseVector(Value * Vec)2143 Value *InnerLoopVectorizer::reverseVector(Value *Vec) {
2144 assert(Vec->getType()->isVectorTy() && "Invalid type");
2145 SmallVector<int, 8> ShuffleMask;
2146 for (unsigned i = 0; i < VF; ++i)
2147 ShuffleMask.push_back(VF - i - 1);
2148
2149 return Builder.CreateShuffleVector(Vec, UndefValue::get(Vec->getType()),
2150 ShuffleMask, "reverse");
2151 }
2152
2153 // Return whether we allow using masked interleave-groups (for dealing with
2154 // strided loads/stores that reside in predicated blocks, or for dealing
2155 // with gaps).
useMaskedInterleavedAccesses(const TargetTransformInfo & TTI)2156 static bool useMaskedInterleavedAccesses(const TargetTransformInfo &TTI) {
2157 // If an override option has been passed in for interleaved accesses, use it.
2158 if (EnableMaskedInterleavedMemAccesses.getNumOccurrences() > 0)
2159 return EnableMaskedInterleavedMemAccesses;
2160
2161 return TTI.enableMaskedInterleavedAccessVectorization();
2162 }
2163
2164 // Try to vectorize the interleave group that \p Instr belongs to.
2165 //
2166 // E.g. Translate following interleaved load group (factor = 3):
2167 // for (i = 0; i < N; i+=3) {
2168 // R = Pic[i]; // Member of index 0
2169 // G = Pic[i+1]; // Member of index 1
2170 // B = Pic[i+2]; // Member of index 2
2171 // ... // do something to R, G, B
2172 // }
2173 // To:
2174 // %wide.vec = load <12 x i32> ; Read 4 tuples of R,G,B
2175 // %R.vec = shuffle %wide.vec, undef, <0, 3, 6, 9> ; R elements
2176 // %G.vec = shuffle %wide.vec, undef, <1, 4, 7, 10> ; G elements
2177 // %B.vec = shuffle %wide.vec, undef, <2, 5, 8, 11> ; B elements
2178 //
2179 // Or translate following interleaved store group (factor = 3):
2180 // for (i = 0; i < N; i+=3) {
2181 // ... do something to R, G, B
2182 // Pic[i] = R; // Member of index 0
2183 // Pic[i+1] = G; // Member of index 1
2184 // Pic[i+2] = B; // Member of index 2
2185 // }
2186 // To:
2187 // %R_G.vec = shuffle %R.vec, %G.vec, <0, 1, 2, ..., 7>
2188 // %B_U.vec = shuffle %B.vec, undef, <0, 1, 2, 3, u, u, u, u>
2189 // %interleaved.vec = shuffle %R_G.vec, %B_U.vec,
2190 // <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11> ; Interleave R,G,B elements
2191 // store <12 x i32> %interleaved.vec ; Write 4 tuples of R,G,B
vectorizeInterleaveGroup(const InterleaveGroup<Instruction> * Group,VPTransformState & State,VPValue * Addr,VPValue * BlockInMask)2192 void InnerLoopVectorizer::vectorizeInterleaveGroup(
2193 const InterleaveGroup<Instruction> *Group, VPTransformState &State,
2194 VPValue *Addr, VPValue *BlockInMask) {
2195 Instruction *Instr = Group->getInsertPos();
2196 const DataLayout &DL = Instr->getModule()->getDataLayout();
2197
2198 // Prepare for the vector type of the interleaved load/store.
2199 Type *ScalarTy = getMemInstValueType(Instr);
2200 unsigned InterleaveFactor = Group->getFactor();
2201 auto *VecTy = FixedVectorType::get(ScalarTy, InterleaveFactor * VF);
2202
2203 // Prepare for the new pointers.
2204 SmallVector<Value *, 2> AddrParts;
2205 unsigned Index = Group->getIndex(Instr);
2206
2207 // TODO: extend the masked interleaved-group support to reversed access.
2208 assert((!BlockInMask || !Group->isReverse()) &&
2209 "Reversed masked interleave-group not supported.");
2210
2211 // If the group is reverse, adjust the index to refer to the last vector lane
2212 // instead of the first. We adjust the index from the first vector lane,
2213 // rather than directly getting the pointer for lane VF - 1, because the
2214 // pointer operand of the interleaved access is supposed to be uniform. For
2215 // uniform instructions, we're only required to generate a value for the
2216 // first vector lane in each unroll iteration.
2217 if (Group->isReverse())
2218 Index += (VF - 1) * Group->getFactor();
2219
2220 for (unsigned Part = 0; Part < UF; Part++) {
2221 Value *AddrPart = State.get(Addr, {Part, 0});
2222 setDebugLocFromInst(Builder, AddrPart);
2223
2224 // Notice current instruction could be any index. Need to adjust the address
2225 // to the member of index 0.
2226 //
2227 // E.g. a = A[i+1]; // Member of index 1 (Current instruction)
2228 // b = A[i]; // Member of index 0
2229 // Current pointer is pointed to A[i+1], adjust it to A[i].
2230 //
2231 // E.g. A[i+1] = a; // Member of index 1
2232 // A[i] = b; // Member of index 0
2233 // A[i+2] = c; // Member of index 2 (Current instruction)
2234 // Current pointer is pointed to A[i+2], adjust it to A[i].
2235
2236 bool InBounds = false;
2237 if (auto *gep = dyn_cast<GetElementPtrInst>(AddrPart->stripPointerCasts()))
2238 InBounds = gep->isInBounds();
2239 AddrPart = Builder.CreateGEP(ScalarTy, AddrPart, Builder.getInt32(-Index));
2240 cast<GetElementPtrInst>(AddrPart)->setIsInBounds(InBounds);
2241
2242 // Cast to the vector pointer type.
2243 unsigned AddressSpace = AddrPart->getType()->getPointerAddressSpace();
2244 Type *PtrTy = VecTy->getPointerTo(AddressSpace);
2245 AddrParts.push_back(Builder.CreateBitCast(AddrPart, PtrTy));
2246 }
2247
2248 setDebugLocFromInst(Builder, Instr);
2249 Value *UndefVec = UndefValue::get(VecTy);
2250
2251 Value *MaskForGaps = nullptr;
2252 if (Group->requiresScalarEpilogue() && !Cost->isScalarEpilogueAllowed()) {
2253 MaskForGaps = createBitMaskForGaps(Builder, VF, *Group);
2254 assert(MaskForGaps && "Mask for Gaps is required but it is null");
2255 }
2256
2257 // Vectorize the interleaved load group.
2258 if (isa<LoadInst>(Instr)) {
2259 // For each unroll part, create a wide load for the group.
2260 SmallVector<Value *, 2> NewLoads;
2261 for (unsigned Part = 0; Part < UF; Part++) {
2262 Instruction *NewLoad;
2263 if (BlockInMask || MaskForGaps) {
2264 assert(useMaskedInterleavedAccesses(*TTI) &&
2265 "masked interleaved groups are not allowed.");
2266 Value *GroupMask = MaskForGaps;
2267 if (BlockInMask) {
2268 Value *BlockInMaskPart = State.get(BlockInMask, Part);
2269 auto *Undefs = UndefValue::get(BlockInMaskPart->getType());
2270 Value *ShuffledMask = Builder.CreateShuffleVector(
2271 BlockInMaskPart, Undefs,
2272 createReplicatedMask(InterleaveFactor, VF), "interleaved.mask");
2273 GroupMask = MaskForGaps
2274 ? Builder.CreateBinOp(Instruction::And, ShuffledMask,
2275 MaskForGaps)
2276 : ShuffledMask;
2277 }
2278 NewLoad =
2279 Builder.CreateMaskedLoad(AddrParts[Part], Group->getAlign(),
2280 GroupMask, UndefVec, "wide.masked.vec");
2281 }
2282 else
2283 NewLoad = Builder.CreateAlignedLoad(VecTy, AddrParts[Part],
2284 Group->getAlign(), "wide.vec");
2285 Group->addMetadata(NewLoad);
2286 NewLoads.push_back(NewLoad);
2287 }
2288
2289 // For each member in the group, shuffle out the appropriate data from the
2290 // wide loads.
2291 for (unsigned I = 0; I < InterleaveFactor; ++I) {
2292 Instruction *Member = Group->getMember(I);
2293
2294 // Skip the gaps in the group.
2295 if (!Member)
2296 continue;
2297
2298 auto StrideMask = createStrideMask(I, InterleaveFactor, VF);
2299 for (unsigned Part = 0; Part < UF; Part++) {
2300 Value *StridedVec = Builder.CreateShuffleVector(
2301 NewLoads[Part], UndefVec, StrideMask, "strided.vec");
2302
2303 // If this member has different type, cast the result type.
2304 if (Member->getType() != ScalarTy) {
2305 VectorType *OtherVTy = FixedVectorType::get(Member->getType(), VF);
2306 StridedVec = createBitOrPointerCast(StridedVec, OtherVTy, DL);
2307 }
2308
2309 if (Group->isReverse())
2310 StridedVec = reverseVector(StridedVec);
2311
2312 VectorLoopValueMap.setVectorValue(Member, Part, StridedVec);
2313 }
2314 }
2315 return;
2316 }
2317
2318 // The sub vector type for current instruction.
2319 auto *SubVT = FixedVectorType::get(ScalarTy, VF);
2320
2321 // Vectorize the interleaved store group.
2322 for (unsigned Part = 0; Part < UF; Part++) {
2323 // Collect the stored vector from each member.
2324 SmallVector<Value *, 4> StoredVecs;
2325 for (unsigned i = 0; i < InterleaveFactor; i++) {
2326 // Interleaved store group doesn't allow a gap, so each index has a member
2327 Instruction *Member = Group->getMember(i);
2328 assert(Member && "Fail to get a member from an interleaved store group");
2329
2330 Value *StoredVec = getOrCreateVectorValue(
2331 cast<StoreInst>(Member)->getValueOperand(), Part);
2332 if (Group->isReverse())
2333 StoredVec = reverseVector(StoredVec);
2334
2335 // If this member has different type, cast it to a unified type.
2336
2337 if (StoredVec->getType() != SubVT)
2338 StoredVec = createBitOrPointerCast(StoredVec, SubVT, DL);
2339
2340 StoredVecs.push_back(StoredVec);
2341 }
2342
2343 // Concatenate all vectors into a wide vector.
2344 Value *WideVec = concatenateVectors(Builder, StoredVecs);
2345
2346 // Interleave the elements in the wide vector.
2347 Value *IVec = Builder.CreateShuffleVector(
2348 WideVec, UndefVec, createInterleaveMask(VF, InterleaveFactor),
2349 "interleaved.vec");
2350
2351 Instruction *NewStoreInstr;
2352 if (BlockInMask) {
2353 Value *BlockInMaskPart = State.get(BlockInMask, Part);
2354 auto *Undefs = UndefValue::get(BlockInMaskPart->getType());
2355 Value *ShuffledMask = Builder.CreateShuffleVector(
2356 BlockInMaskPart, Undefs, createReplicatedMask(InterleaveFactor, VF),
2357 "interleaved.mask");
2358 NewStoreInstr = Builder.CreateMaskedStore(
2359 IVec, AddrParts[Part], Group->getAlign(), ShuffledMask);
2360 }
2361 else
2362 NewStoreInstr =
2363 Builder.CreateAlignedStore(IVec, AddrParts[Part], Group->getAlign());
2364
2365 Group->addMetadata(NewStoreInstr);
2366 }
2367 }
2368
vectorizeMemoryInstruction(Instruction * Instr,VPTransformState & State,VPValue * Addr,VPValue * StoredValue,VPValue * BlockInMask)2369 void InnerLoopVectorizer::vectorizeMemoryInstruction(Instruction *Instr,
2370 VPTransformState &State,
2371 VPValue *Addr,
2372 VPValue *StoredValue,
2373 VPValue *BlockInMask) {
2374 // Attempt to issue a wide load.
2375 LoadInst *LI = dyn_cast<LoadInst>(Instr);
2376 StoreInst *SI = dyn_cast<StoreInst>(Instr);
2377
2378 assert((LI || SI) && "Invalid Load/Store instruction");
2379 assert((!SI || StoredValue) && "No stored value provided for widened store");
2380 assert((!LI || !StoredValue) && "Stored value provided for widened load");
2381
2382 LoopVectorizationCostModel::InstWidening Decision =
2383 Cost->getWideningDecision(Instr, VF);
2384 assert((Decision == LoopVectorizationCostModel::CM_Widen ||
2385 Decision == LoopVectorizationCostModel::CM_Widen_Reverse ||
2386 Decision == LoopVectorizationCostModel::CM_GatherScatter) &&
2387 "CM decision is not to widen the memory instruction");
2388
2389 Type *ScalarDataTy = getMemInstValueType(Instr);
2390 auto *DataTy = FixedVectorType::get(ScalarDataTy, VF);
2391 const Align Alignment = getLoadStoreAlignment(Instr);
2392
2393 // Determine if the pointer operand of the access is either consecutive or
2394 // reverse consecutive.
2395 bool Reverse = (Decision == LoopVectorizationCostModel::CM_Widen_Reverse);
2396 bool ConsecutiveStride =
2397 Reverse || (Decision == LoopVectorizationCostModel::CM_Widen);
2398 bool CreateGatherScatter =
2399 (Decision == LoopVectorizationCostModel::CM_GatherScatter);
2400
2401 // Either Ptr feeds a vector load/store, or a vector GEP should feed a vector
2402 // gather/scatter. Otherwise Decision should have been to Scalarize.
2403 assert((ConsecutiveStride || CreateGatherScatter) &&
2404 "The instruction should be scalarized");
2405 (void)ConsecutiveStride;
2406
2407 VectorParts BlockInMaskParts(UF);
2408 bool isMaskRequired = BlockInMask;
2409 if (isMaskRequired)
2410 for (unsigned Part = 0; Part < UF; ++Part)
2411 BlockInMaskParts[Part] = State.get(BlockInMask, Part);
2412
2413 const auto CreateVecPtr = [&](unsigned Part, Value *Ptr) -> Value * {
2414 // Calculate the pointer for the specific unroll-part.
2415 GetElementPtrInst *PartPtr = nullptr;
2416
2417 bool InBounds = false;
2418 if (auto *gep = dyn_cast<GetElementPtrInst>(Ptr->stripPointerCasts()))
2419 InBounds = gep->isInBounds();
2420
2421 if (Reverse) {
2422 // If the address is consecutive but reversed, then the
2423 // wide store needs to start at the last vector element.
2424 PartPtr = cast<GetElementPtrInst>(
2425 Builder.CreateGEP(ScalarDataTy, Ptr, Builder.getInt32(-Part * VF)));
2426 PartPtr->setIsInBounds(InBounds);
2427 PartPtr = cast<GetElementPtrInst>(
2428 Builder.CreateGEP(ScalarDataTy, PartPtr, Builder.getInt32(1 - VF)));
2429 PartPtr->setIsInBounds(InBounds);
2430 if (isMaskRequired) // Reverse of a null all-one mask is a null mask.
2431 BlockInMaskParts[Part] = reverseVector(BlockInMaskParts[Part]);
2432 } else {
2433 PartPtr = cast<GetElementPtrInst>(
2434 Builder.CreateGEP(ScalarDataTy, Ptr, Builder.getInt32(Part * VF)));
2435 PartPtr->setIsInBounds(InBounds);
2436 }
2437
2438 unsigned AddressSpace = Ptr->getType()->getPointerAddressSpace();
2439 return Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace));
2440 };
2441
2442 // Handle Stores:
2443 if (SI) {
2444 setDebugLocFromInst(Builder, SI);
2445
2446 for (unsigned Part = 0; Part < UF; ++Part) {
2447 Instruction *NewSI = nullptr;
2448 Value *StoredVal = State.get(StoredValue, Part);
2449 if (CreateGatherScatter) {
2450 Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr;
2451 Value *VectorGep = State.get(Addr, Part);
2452 NewSI = Builder.CreateMaskedScatter(StoredVal, VectorGep, Alignment,
2453 MaskPart);
2454 } else {
2455 if (Reverse) {
2456 // If we store to reverse consecutive memory locations, then we need
2457 // to reverse the order of elements in the stored value.
2458 StoredVal = reverseVector(StoredVal);
2459 // We don't want to update the value in the map as it might be used in
2460 // another expression. So don't call resetVectorValue(StoredVal).
2461 }
2462 auto *VecPtr = CreateVecPtr(Part, State.get(Addr, {0, 0}));
2463 if (isMaskRequired)
2464 NewSI = Builder.CreateMaskedStore(StoredVal, VecPtr, Alignment,
2465 BlockInMaskParts[Part]);
2466 else
2467 NewSI = Builder.CreateAlignedStore(StoredVal, VecPtr, Alignment);
2468 }
2469 addMetadata(NewSI, SI);
2470 }
2471 return;
2472 }
2473
2474 // Handle loads.
2475 assert(LI && "Must have a load instruction");
2476 setDebugLocFromInst(Builder, LI);
2477 for (unsigned Part = 0; Part < UF; ++Part) {
2478 Value *NewLI;
2479 if (CreateGatherScatter) {
2480 Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr;
2481 Value *VectorGep = State.get(Addr, Part);
2482 NewLI = Builder.CreateMaskedGather(VectorGep, Alignment, MaskPart,
2483 nullptr, "wide.masked.gather");
2484 addMetadata(NewLI, LI);
2485 } else {
2486 auto *VecPtr = CreateVecPtr(Part, State.get(Addr, {0, 0}));
2487 if (isMaskRequired)
2488 NewLI = Builder.CreateMaskedLoad(
2489 VecPtr, Alignment, BlockInMaskParts[Part], UndefValue::get(DataTy),
2490 "wide.masked.load");
2491 else
2492 NewLI =
2493 Builder.CreateAlignedLoad(DataTy, VecPtr, Alignment, "wide.load");
2494
2495 // Add metadata to the load, but setVectorValue to the reverse shuffle.
2496 addMetadata(NewLI, LI);
2497 if (Reverse)
2498 NewLI = reverseVector(NewLI);
2499 }
2500 VectorLoopValueMap.setVectorValue(Instr, Part, NewLI);
2501 }
2502 }
2503
scalarizeInstruction(Instruction * Instr,VPUser & User,const VPIteration & Instance,bool IfPredicateInstr,VPTransformState & State)2504 void InnerLoopVectorizer::scalarizeInstruction(Instruction *Instr, VPUser &User,
2505 const VPIteration &Instance,
2506 bool IfPredicateInstr,
2507 VPTransformState &State) {
2508 assert(!Instr->getType()->isAggregateType() && "Can't handle vectors");
2509
2510 setDebugLocFromInst(Builder, Instr);
2511
2512 // Does this instruction return a value ?
2513 bool IsVoidRetTy = Instr->getType()->isVoidTy();
2514
2515 Instruction *Cloned = Instr->clone();
2516 if (!IsVoidRetTy)
2517 Cloned->setName(Instr->getName() + ".cloned");
2518
2519 // Replace the operands of the cloned instructions with their scalar
2520 // equivalents in the new loop.
2521 for (unsigned op = 0, e = User.getNumOperands(); op != e; ++op) {
2522 auto *NewOp = State.get(User.getOperand(op), Instance);
2523 Cloned->setOperand(op, NewOp);
2524 }
2525 addNewMetadata(Cloned, Instr);
2526
2527 // Place the cloned scalar in the new loop.
2528 Builder.Insert(Cloned);
2529
2530 // Add the cloned scalar to the scalar map entry.
2531 VectorLoopValueMap.setScalarValue(Instr, Instance, Cloned);
2532
2533 // If we just cloned a new assumption, add it the assumption cache.
2534 if (auto *II = dyn_cast<IntrinsicInst>(Cloned))
2535 if (II->getIntrinsicID() == Intrinsic::assume)
2536 AC->registerAssumption(II);
2537
2538 // End if-block.
2539 if (IfPredicateInstr)
2540 PredicatedInstructions.push_back(Cloned);
2541 }
2542
createInductionVariable(Loop * L,Value * Start,Value * End,Value * Step,Instruction * DL)2543 PHINode *InnerLoopVectorizer::createInductionVariable(Loop *L, Value *Start,
2544 Value *End, Value *Step,
2545 Instruction *DL) {
2546 BasicBlock *Header = L->getHeader();
2547 BasicBlock *Latch = L->getLoopLatch();
2548 // As we're just creating this loop, it's possible no latch exists
2549 // yet. If so, use the header as this will be a single block loop.
2550 if (!Latch)
2551 Latch = Header;
2552
2553 IRBuilder<> Builder(&*Header->getFirstInsertionPt());
2554 Instruction *OldInst = getDebugLocFromInstOrOperands(OldInduction);
2555 setDebugLocFromInst(Builder, OldInst);
2556 auto *Induction = Builder.CreatePHI(Start->getType(), 2, "index");
2557
2558 Builder.SetInsertPoint(Latch->getTerminator());
2559 setDebugLocFromInst(Builder, OldInst);
2560
2561 // Create i+1 and fill the PHINode.
2562 Value *Next = Builder.CreateAdd(Induction, Step, "index.next");
2563 Induction->addIncoming(Start, L->getLoopPreheader());
2564 Induction->addIncoming(Next, Latch);
2565 // Create the compare.
2566 Value *ICmp = Builder.CreateICmpEQ(Next, End);
2567 Builder.CreateCondBr(ICmp, L->getExitBlock(), Header);
2568
2569 // Now we have two terminators. Remove the old one from the block.
2570 Latch->getTerminator()->eraseFromParent();
2571
2572 return Induction;
2573 }
2574
getOrCreateTripCount(Loop * L)2575 Value *InnerLoopVectorizer::getOrCreateTripCount(Loop *L) {
2576 if (TripCount)
2577 return TripCount;
2578
2579 assert(L && "Create Trip Count for null loop.");
2580 IRBuilder<> Builder(L->getLoopPreheader()->getTerminator());
2581 // Find the loop boundaries.
2582 ScalarEvolution *SE = PSE.getSE();
2583 const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount();
2584 assert(BackedgeTakenCount != SE->getCouldNotCompute() &&
2585 "Invalid loop count");
2586
2587 Type *IdxTy = Legal->getWidestInductionType();
2588 assert(IdxTy && "No type for induction");
2589
2590 // The exit count might have the type of i64 while the phi is i32. This can
2591 // happen if we have an induction variable that is sign extended before the
2592 // compare. The only way that we get a backedge taken count is that the
2593 // induction variable was signed and as such will not overflow. In such a case
2594 // truncation is legal.
2595 if (SE->getTypeSizeInBits(BackedgeTakenCount->getType()) >
2596 IdxTy->getPrimitiveSizeInBits())
2597 BackedgeTakenCount = SE->getTruncateOrNoop(BackedgeTakenCount, IdxTy);
2598 BackedgeTakenCount = SE->getNoopOrZeroExtend(BackedgeTakenCount, IdxTy);
2599
2600 // Get the total trip count from the count by adding 1.
2601 const SCEV *ExitCount = SE->getAddExpr(
2602 BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType()));
2603
2604 const DataLayout &DL = L->getHeader()->getModule()->getDataLayout();
2605
2606 // Expand the trip count and place the new instructions in the preheader.
2607 // Notice that the pre-header does not change, only the loop body.
2608 SCEVExpander Exp(*SE, DL, "induction");
2609
2610 // Count holds the overall loop count (N).
2611 TripCount = Exp.expandCodeFor(ExitCount, ExitCount->getType(),
2612 L->getLoopPreheader()->getTerminator());
2613
2614 if (TripCount->getType()->isPointerTy())
2615 TripCount =
2616 CastInst::CreatePointerCast(TripCount, IdxTy, "exitcount.ptrcnt.to.int",
2617 L->getLoopPreheader()->getTerminator());
2618
2619 return TripCount;
2620 }
2621
getOrCreateVectorTripCount(Loop * L)2622 Value *InnerLoopVectorizer::getOrCreateVectorTripCount(Loop *L) {
2623 if (VectorTripCount)
2624 return VectorTripCount;
2625
2626 Value *TC = getOrCreateTripCount(L);
2627 IRBuilder<> Builder(L->getLoopPreheader()->getTerminator());
2628
2629 Type *Ty = TC->getType();
2630 Constant *Step = ConstantInt::get(Ty, VF * UF);
2631
2632 // If the tail is to be folded by masking, round the number of iterations N
2633 // up to a multiple of Step instead of rounding down. This is done by first
2634 // adding Step-1 and then rounding down. Note that it's ok if this addition
2635 // overflows: the vector induction variable will eventually wrap to zero given
2636 // that it starts at zero and its Step is a power of two; the loop will then
2637 // exit, with the last early-exit vector comparison also producing all-true.
2638 if (Cost->foldTailByMasking()) {
2639 assert(isPowerOf2_32(VF * UF) &&
2640 "VF*UF must be a power of 2 when folding tail by masking");
2641 TC = Builder.CreateAdd(TC, ConstantInt::get(Ty, VF * UF - 1), "n.rnd.up");
2642 }
2643
2644 // Now we need to generate the expression for the part of the loop that the
2645 // vectorized body will execute. This is equal to N - (N % Step) if scalar
2646 // iterations are not required for correctness, or N - Step, otherwise. Step
2647 // is equal to the vectorization factor (number of SIMD elements) times the
2648 // unroll factor (number of SIMD instructions).
2649 Value *R = Builder.CreateURem(TC, Step, "n.mod.vf");
2650
2651 // If there is a non-reversed interleaved group that may speculatively access
2652 // memory out-of-bounds, we need to ensure that there will be at least one
2653 // iteration of the scalar epilogue loop. Thus, if the step evenly divides
2654 // the trip count, we set the remainder to be equal to the step. If the step
2655 // does not evenly divide the trip count, no adjustment is necessary since
2656 // there will already be scalar iterations. Note that the minimum iterations
2657 // check ensures that N >= Step.
2658 if (VF > 1 && Cost->requiresScalarEpilogue()) {
2659 auto *IsZero = Builder.CreateICmpEQ(R, ConstantInt::get(R->getType(), 0));
2660 R = Builder.CreateSelect(IsZero, Step, R);
2661 }
2662
2663 VectorTripCount = Builder.CreateSub(TC, R, "n.vec");
2664
2665 return VectorTripCount;
2666 }
2667
createBitOrPointerCast(Value * V,VectorType * DstVTy,const DataLayout & DL)2668 Value *InnerLoopVectorizer::createBitOrPointerCast(Value *V, VectorType *DstVTy,
2669 const DataLayout &DL) {
2670 // Verify that V is a vector type with same number of elements as DstVTy.
2671 unsigned VF = DstVTy->getNumElements();
2672 VectorType *SrcVecTy = cast<VectorType>(V->getType());
2673 assert((VF == SrcVecTy->getNumElements()) && "Vector dimensions do not match");
2674 Type *SrcElemTy = SrcVecTy->getElementType();
2675 Type *DstElemTy = DstVTy->getElementType();
2676 assert((DL.getTypeSizeInBits(SrcElemTy) == DL.getTypeSizeInBits(DstElemTy)) &&
2677 "Vector elements must have same size");
2678
2679 // Do a direct cast if element types are castable.
2680 if (CastInst::isBitOrNoopPointerCastable(SrcElemTy, DstElemTy, DL)) {
2681 return Builder.CreateBitOrPointerCast(V, DstVTy);
2682 }
2683 // V cannot be directly casted to desired vector type.
2684 // May happen when V is a floating point vector but DstVTy is a vector of
2685 // pointers or vice-versa. Handle this using a two-step bitcast using an
2686 // intermediate Integer type for the bitcast i.e. Ptr <-> Int <-> Float.
2687 assert((DstElemTy->isPointerTy() != SrcElemTy->isPointerTy()) &&
2688 "Only one type should be a pointer type");
2689 assert((DstElemTy->isFloatingPointTy() != SrcElemTy->isFloatingPointTy()) &&
2690 "Only one type should be a floating point type");
2691 Type *IntTy =
2692 IntegerType::getIntNTy(V->getContext(), DL.getTypeSizeInBits(SrcElemTy));
2693 auto *VecIntTy = FixedVectorType::get(IntTy, VF);
2694 Value *CastVal = Builder.CreateBitOrPointerCast(V, VecIntTy);
2695 return Builder.CreateBitOrPointerCast(CastVal, DstVTy);
2696 }
2697
emitMinimumIterationCountCheck(Loop * L,BasicBlock * Bypass)2698 void InnerLoopVectorizer::emitMinimumIterationCountCheck(Loop *L,
2699 BasicBlock *Bypass) {
2700 Value *Count = getOrCreateTripCount(L);
2701 // Reuse existing vector loop preheader for TC checks.
2702 // Note that new preheader block is generated for vector loop.
2703 BasicBlock *const TCCheckBlock = LoopVectorPreHeader;
2704 IRBuilder<> Builder(TCCheckBlock->getTerminator());
2705
2706 // Generate code to check if the loop's trip count is less than VF * UF, or
2707 // equal to it in case a scalar epilogue is required; this implies that the
2708 // vector trip count is zero. This check also covers the case where adding one
2709 // to the backedge-taken count overflowed leading to an incorrect trip count
2710 // of zero. In this case we will also jump to the scalar loop.
2711 auto P = Cost->requiresScalarEpilogue() ? ICmpInst::ICMP_ULE
2712 : ICmpInst::ICMP_ULT;
2713
2714 // If tail is to be folded, vector loop takes care of all iterations.
2715 Value *CheckMinIters = Builder.getFalse();
2716 if (!Cost->foldTailByMasking())
2717 CheckMinIters = Builder.CreateICmp(
2718 P, Count, ConstantInt::get(Count->getType(), VF * UF),
2719 "min.iters.check");
2720
2721 // Create new preheader for vector loop.
2722 LoopVectorPreHeader =
2723 SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(), DT, LI, nullptr,
2724 "vector.ph");
2725
2726 assert(DT->properlyDominates(DT->getNode(TCCheckBlock),
2727 DT->getNode(Bypass)->getIDom()) &&
2728 "TC check is expected to dominate Bypass");
2729
2730 // Update dominator for Bypass & LoopExit.
2731 DT->changeImmediateDominator(Bypass, TCCheckBlock);
2732 DT->changeImmediateDominator(LoopExitBlock, TCCheckBlock);
2733
2734 ReplaceInstWithInst(
2735 TCCheckBlock->getTerminator(),
2736 BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters));
2737 LoopBypassBlocks.push_back(TCCheckBlock);
2738 }
2739
emitSCEVChecks(Loop * L,BasicBlock * Bypass)2740 void InnerLoopVectorizer::emitSCEVChecks(Loop *L, BasicBlock *Bypass) {
2741 // Reuse existing vector loop preheader for SCEV checks.
2742 // Note that new preheader block is generated for vector loop.
2743 BasicBlock *const SCEVCheckBlock = LoopVectorPreHeader;
2744
2745 // Generate the code to check that the SCEV assumptions that we made.
2746 // We want the new basic block to start at the first instruction in a
2747 // sequence of instructions that form a check.
2748 SCEVExpander Exp(*PSE.getSE(), Bypass->getModule()->getDataLayout(),
2749 "scev.check");
2750 Value *SCEVCheck = Exp.expandCodeForPredicate(
2751 &PSE.getUnionPredicate(), SCEVCheckBlock->getTerminator());
2752
2753 if (auto *C = dyn_cast<ConstantInt>(SCEVCheck))
2754 if (C->isZero())
2755 return;
2756
2757 assert(!SCEVCheckBlock->getParent()->hasOptSize() &&
2758 "Cannot SCEV check stride or overflow when optimizing for size");
2759
2760 SCEVCheckBlock->setName("vector.scevcheck");
2761 // Create new preheader for vector loop.
2762 LoopVectorPreHeader =
2763 SplitBlock(SCEVCheckBlock, SCEVCheckBlock->getTerminator(), DT, LI,
2764 nullptr, "vector.ph");
2765
2766 // Update dominator only if this is first RT check.
2767 if (LoopBypassBlocks.empty()) {
2768 DT->changeImmediateDominator(Bypass, SCEVCheckBlock);
2769 DT->changeImmediateDominator(LoopExitBlock, SCEVCheckBlock);
2770 }
2771
2772 ReplaceInstWithInst(
2773 SCEVCheckBlock->getTerminator(),
2774 BranchInst::Create(Bypass, LoopVectorPreHeader, SCEVCheck));
2775 LoopBypassBlocks.push_back(SCEVCheckBlock);
2776 AddedSafetyChecks = true;
2777 }
2778
emitMemRuntimeChecks(Loop * L,BasicBlock * Bypass)2779 void InnerLoopVectorizer::emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass) {
2780 // VPlan-native path does not do any analysis for runtime checks currently.
2781 if (EnableVPlanNativePath)
2782 return;
2783
2784 // Reuse existing vector loop preheader for runtime memory checks.
2785 // Note that new preheader block is generated for vector loop.
2786 BasicBlock *const MemCheckBlock = L->getLoopPreheader();
2787
2788 // Generate the code that checks in runtime if arrays overlap. We put the
2789 // checks into a separate block to make the more common case of few elements
2790 // faster.
2791 auto *LAI = Legal->getLAI();
2792 const auto &RtPtrChecking = *LAI->getRuntimePointerChecking();
2793 if (!RtPtrChecking.Need)
2794 return;
2795 Instruction *FirstCheckInst;
2796 Instruction *MemRuntimeCheck;
2797 std::tie(FirstCheckInst, MemRuntimeCheck) =
2798 addRuntimeChecks(MemCheckBlock->getTerminator(), OrigLoop,
2799 RtPtrChecking.getChecks(), RtPtrChecking.getSE());
2800 assert(MemRuntimeCheck && "no RT checks generated although RtPtrChecking "
2801 "claimed checks are required");
2802
2803 if (MemCheckBlock->getParent()->hasOptSize()) {
2804 assert(Cost->Hints->getForce() == LoopVectorizeHints::FK_Enabled &&
2805 "Cannot emit memory checks when optimizing for size, unless forced "
2806 "to vectorize.");
2807 ORE->emit([&]() {
2808 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationCodeSize",
2809 L->getStartLoc(), L->getHeader())
2810 << "Code-size may be reduced by not forcing "
2811 "vectorization, or by source-code modifications "
2812 "eliminating the need for runtime checks "
2813 "(e.g., adding 'restrict').";
2814 });
2815 }
2816
2817 MemCheckBlock->setName("vector.memcheck");
2818 // Create new preheader for vector loop.
2819 LoopVectorPreHeader =
2820 SplitBlock(MemCheckBlock, MemCheckBlock->getTerminator(), DT, LI, nullptr,
2821 "vector.ph");
2822
2823 // Update dominator only if this is first RT check.
2824 if (LoopBypassBlocks.empty()) {
2825 DT->changeImmediateDominator(Bypass, MemCheckBlock);
2826 DT->changeImmediateDominator(LoopExitBlock, MemCheckBlock);
2827 }
2828
2829 ReplaceInstWithInst(
2830 MemCheckBlock->getTerminator(),
2831 BranchInst::Create(Bypass, LoopVectorPreHeader, MemRuntimeCheck));
2832 LoopBypassBlocks.push_back(MemCheckBlock);
2833 AddedSafetyChecks = true;
2834
2835 // We currently don't use LoopVersioning for the actual loop cloning but we
2836 // still use it to add the noalias metadata.
2837 LVer = std::make_unique<LoopVersioning>(*Legal->getLAI(), OrigLoop, LI, DT,
2838 PSE.getSE());
2839 LVer->prepareNoAliasMetadata();
2840 }
2841
emitTransformedIndex(IRBuilder<> & B,Value * Index,ScalarEvolution * SE,const DataLayout & DL,const InductionDescriptor & ID) const2842 Value *InnerLoopVectorizer::emitTransformedIndex(
2843 IRBuilder<> &B, Value *Index, ScalarEvolution *SE, const DataLayout &DL,
2844 const InductionDescriptor &ID) const {
2845
2846 SCEVExpander Exp(*SE, DL, "induction");
2847 auto Step = ID.getStep();
2848 auto StartValue = ID.getStartValue();
2849 assert(Index->getType() == Step->getType() &&
2850 "Index type does not match StepValue type");
2851
2852 // Note: the IR at this point is broken. We cannot use SE to create any new
2853 // SCEV and then expand it, hoping that SCEV's simplification will give us
2854 // a more optimal code. Unfortunately, attempt of doing so on invalid IR may
2855 // lead to various SCEV crashes. So all we can do is to use builder and rely
2856 // on InstCombine for future simplifications. Here we handle some trivial
2857 // cases only.
2858 auto CreateAdd = [&B](Value *X, Value *Y) {
2859 assert(X->getType() == Y->getType() && "Types don't match!");
2860 if (auto *CX = dyn_cast<ConstantInt>(X))
2861 if (CX->isZero())
2862 return Y;
2863 if (auto *CY = dyn_cast<ConstantInt>(Y))
2864 if (CY->isZero())
2865 return X;
2866 return B.CreateAdd(X, Y);
2867 };
2868
2869 auto CreateMul = [&B](Value *X, Value *Y) {
2870 assert(X->getType() == Y->getType() && "Types don't match!");
2871 if (auto *CX = dyn_cast<ConstantInt>(X))
2872 if (CX->isOne())
2873 return Y;
2874 if (auto *CY = dyn_cast<ConstantInt>(Y))
2875 if (CY->isOne())
2876 return X;
2877 return B.CreateMul(X, Y);
2878 };
2879
2880 // Get a suitable insert point for SCEV expansion. For blocks in the vector
2881 // loop, choose the end of the vector loop header (=LoopVectorBody), because
2882 // the DomTree is not kept up-to-date for additional blocks generated in the
2883 // vector loop. By using the header as insertion point, we guarantee that the
2884 // expanded instructions dominate all their uses.
2885 auto GetInsertPoint = [this, &B]() {
2886 BasicBlock *InsertBB = B.GetInsertPoint()->getParent();
2887 if (InsertBB != LoopVectorBody &&
2888 LI->getLoopFor(LoopVectorBody) == LI->getLoopFor(InsertBB))
2889 return LoopVectorBody->getTerminator();
2890 return &*B.GetInsertPoint();
2891 };
2892 switch (ID.getKind()) {
2893 case InductionDescriptor::IK_IntInduction: {
2894 assert(Index->getType() == StartValue->getType() &&
2895 "Index type does not match StartValue type");
2896 if (ID.getConstIntStepValue() && ID.getConstIntStepValue()->isMinusOne())
2897 return B.CreateSub(StartValue, Index);
2898 auto *Offset = CreateMul(
2899 Index, Exp.expandCodeFor(Step, Index->getType(), GetInsertPoint()));
2900 return CreateAdd(StartValue, Offset);
2901 }
2902 case InductionDescriptor::IK_PtrInduction: {
2903 assert(isa<SCEVConstant>(Step) &&
2904 "Expected constant step for pointer induction");
2905 return B.CreateGEP(
2906 StartValue->getType()->getPointerElementType(), StartValue,
2907 CreateMul(Index,
2908 Exp.expandCodeFor(Step, Index->getType(), GetInsertPoint())));
2909 }
2910 case InductionDescriptor::IK_FpInduction: {
2911 assert(Step->getType()->isFloatingPointTy() && "Expected FP Step value");
2912 auto InductionBinOp = ID.getInductionBinOp();
2913 assert(InductionBinOp &&
2914 (InductionBinOp->getOpcode() == Instruction::FAdd ||
2915 InductionBinOp->getOpcode() == Instruction::FSub) &&
2916 "Original bin op should be defined for FP induction");
2917
2918 Value *StepValue = cast<SCEVUnknown>(Step)->getValue();
2919
2920 // Floating point operations had to be 'fast' to enable the induction.
2921 FastMathFlags Flags;
2922 Flags.setFast();
2923
2924 Value *MulExp = B.CreateFMul(StepValue, Index);
2925 if (isa<Instruction>(MulExp))
2926 // We have to check, the MulExp may be a constant.
2927 cast<Instruction>(MulExp)->setFastMathFlags(Flags);
2928
2929 Value *BOp = B.CreateBinOp(InductionBinOp->getOpcode(), StartValue, MulExp,
2930 "induction");
2931 if (isa<Instruction>(BOp))
2932 cast<Instruction>(BOp)->setFastMathFlags(Flags);
2933
2934 return BOp;
2935 }
2936 case InductionDescriptor::IK_NoInduction:
2937 return nullptr;
2938 }
2939 llvm_unreachable("invalid enum");
2940 }
2941
createVectorizedLoopSkeleton()2942 BasicBlock *InnerLoopVectorizer::createVectorizedLoopSkeleton() {
2943 /*
2944 In this function we generate a new loop. The new loop will contain
2945 the vectorized instructions while the old loop will continue to run the
2946 scalar remainder.
2947
2948 [ ] <-- loop iteration number check.
2949 / |
2950 / v
2951 | [ ] <-- vector loop bypass (may consist of multiple blocks).
2952 | / |
2953 | / v
2954 || [ ] <-- vector pre header.
2955 |/ |
2956 | v
2957 | [ ] \
2958 | [ ]_| <-- vector loop.
2959 | |
2960 | v
2961 | -[ ] <--- middle-block.
2962 | / |
2963 | / v
2964 -|- >[ ] <--- new preheader.
2965 | |
2966 | v
2967 | [ ] \
2968 | [ ]_| <-- old scalar loop to handle remainder.
2969 \ |
2970 \ v
2971 >[ ] <-- exit block.
2972 ...
2973 */
2974
2975 MDNode *OrigLoopID = OrigLoop->getLoopID();
2976
2977 // Some loops have a single integer induction variable, while other loops
2978 // don't. One example is c++ iterators that often have multiple pointer
2979 // induction variables. In the code below we also support a case where we
2980 // don't have a single induction variable.
2981 //
2982 // We try to obtain an induction variable from the original loop as hard
2983 // as possible. However if we don't find one that:
2984 // - is an integer
2985 // - counts from zero, stepping by one
2986 // - is the size of the widest induction variable type
2987 // then we create a new one.
2988 OldInduction = Legal->getPrimaryInduction();
2989 Type *IdxTy = Legal->getWidestInductionType();
2990
2991 // Split the single block loop into the two loop structure described above.
2992 LoopScalarBody = OrigLoop->getHeader();
2993 LoopVectorPreHeader = OrigLoop->getLoopPreheader();
2994 LoopExitBlock = OrigLoop->getExitBlock();
2995 assert(LoopExitBlock && "Must have an exit block");
2996 assert(LoopVectorPreHeader && "Invalid loop structure");
2997
2998 LoopMiddleBlock =
2999 SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT,
3000 LI, nullptr, "middle.block");
3001 LoopScalarPreHeader =
3002 SplitBlock(LoopMiddleBlock, LoopMiddleBlock->getTerminator(), DT, LI,
3003 nullptr, "scalar.ph");
3004 // We intentionally don't let SplitBlock to update LoopInfo since
3005 // LoopVectorBody should belong to another loop than LoopVectorPreHeader.
3006 // LoopVectorBody is explicitly added to the correct place few lines later.
3007 LoopVectorBody =
3008 SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT,
3009 nullptr, nullptr, "vector.body");
3010
3011 // Update dominator for loop exit.
3012 DT->changeImmediateDominator(LoopExitBlock, LoopMiddleBlock);
3013
3014 // Create and register the new vector loop.
3015 Loop *Lp = LI->AllocateLoop();
3016 Loop *ParentLoop = OrigLoop->getParentLoop();
3017
3018 // Insert the new loop into the loop nest and register the new basic blocks
3019 // before calling any utilities such as SCEV that require valid LoopInfo.
3020 if (ParentLoop) {
3021 ParentLoop->addChildLoop(Lp);
3022 } else {
3023 LI->addTopLevelLoop(Lp);
3024 }
3025 Lp->addBasicBlockToLoop(LoopVectorBody, *LI);
3026
3027 // Find the loop boundaries.
3028 Value *Count = getOrCreateTripCount(Lp);
3029
3030 Value *StartIdx = ConstantInt::get(IdxTy, 0);
3031
3032 // Now, compare the new count to zero. If it is zero skip the vector loop and
3033 // jump to the scalar loop. This check also covers the case where the
3034 // backedge-taken count is uint##_max: adding one to it will overflow leading
3035 // to an incorrect trip count of zero. In this (rare) case we will also jump
3036 // to the scalar loop.
3037 emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader);
3038
3039 // Generate the code to check any assumptions that we've made for SCEV
3040 // expressions.
3041 emitSCEVChecks(Lp, LoopScalarPreHeader);
3042
3043 // Generate the code that checks in runtime if arrays overlap. We put the
3044 // checks into a separate block to make the more common case of few elements
3045 // faster.
3046 emitMemRuntimeChecks(Lp, LoopScalarPreHeader);
3047
3048 // Generate the induction variable.
3049 // The loop step is equal to the vectorization factor (num of SIMD elements)
3050 // times the unroll factor (num of SIMD instructions).
3051 Value *CountRoundDown = getOrCreateVectorTripCount(Lp);
3052 Constant *Step = ConstantInt::get(IdxTy, VF * UF);
3053 Induction =
3054 createInductionVariable(Lp, StartIdx, CountRoundDown, Step,
3055 getDebugLocFromInstOrOperands(OldInduction));
3056
3057 // We are going to resume the execution of the scalar loop.
3058 // Go over all of the induction variables that we found and fix the
3059 // PHIs that are left in the scalar version of the loop.
3060 // The starting values of PHI nodes depend on the counter of the last
3061 // iteration in the vectorized loop.
3062 // If we come from a bypass edge then we need to start from the original
3063 // start value.
3064
3065 // This variable saves the new starting index for the scalar loop. It is used
3066 // to test if there are any tail iterations left once the vector loop has
3067 // completed.
3068 for (auto &InductionEntry : Legal->getInductionVars()) {
3069 PHINode *OrigPhi = InductionEntry.first;
3070 InductionDescriptor II = InductionEntry.second;
3071
3072 // Create phi nodes to merge from the backedge-taken check block.
3073 PHINode *BCResumeVal =
3074 PHINode::Create(OrigPhi->getType(), 3, "bc.resume.val",
3075 LoopScalarPreHeader->getTerminator());
3076 // Copy original phi DL over to the new one.
3077 BCResumeVal->setDebugLoc(OrigPhi->getDebugLoc());
3078 Value *&EndValue = IVEndValues[OrigPhi];
3079 if (OrigPhi == OldInduction) {
3080 // We know what the end value is.
3081 EndValue = CountRoundDown;
3082 } else {
3083 IRBuilder<> B(Lp->getLoopPreheader()->getTerminator());
3084 Type *StepType = II.getStep()->getType();
3085 Instruction::CastOps CastOp =
3086 CastInst::getCastOpcode(CountRoundDown, true, StepType, true);
3087 Value *CRD = B.CreateCast(CastOp, CountRoundDown, StepType, "cast.crd");
3088 const DataLayout &DL = LoopScalarBody->getModule()->getDataLayout();
3089 EndValue = emitTransformedIndex(B, CRD, PSE.getSE(), DL, II);
3090 EndValue->setName("ind.end");
3091 }
3092
3093 // The new PHI merges the original incoming value, in case of a bypass,
3094 // or the value at the end of the vectorized loop.
3095 BCResumeVal->addIncoming(EndValue, LoopMiddleBlock);
3096
3097 // Fix the scalar body counter (PHI node).
3098 // The old induction's phi node in the scalar body needs the truncated
3099 // value.
3100 for (BasicBlock *BB : LoopBypassBlocks)
3101 BCResumeVal->addIncoming(II.getStartValue(), BB);
3102 OrigPhi->setIncomingValueForBlock(LoopScalarPreHeader, BCResumeVal);
3103 }
3104
3105 // We need the OrigLoop (scalar loop part) latch terminator to help
3106 // produce correct debug info for the middle block BB instructions.
3107 // The legality check stage guarantees that the loop will have a single
3108 // latch.
3109 assert(isa<BranchInst>(OrigLoop->getLoopLatch()->getTerminator()) &&
3110 "Scalar loop latch terminator isn't a branch");
3111 BranchInst *ScalarLatchBr =
3112 cast<BranchInst>(OrigLoop->getLoopLatch()->getTerminator());
3113
3114 // Add a check in the middle block to see if we have completed
3115 // all of the iterations in the first vector loop.
3116 // If (N - N%VF) == N, then we *don't* need to run the remainder.
3117 // If tail is to be folded, we know we don't need to run the remainder.
3118 Value *CmpN = Builder.getTrue();
3119 if (!Cost->foldTailByMasking()) {
3120 CmpN = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ, Count,
3121 CountRoundDown, "cmp.n",
3122 LoopMiddleBlock->getTerminator());
3123
3124 // Here we use the same DebugLoc as the scalar loop latch branch instead
3125 // of the corresponding compare because they may have ended up with
3126 // different line numbers and we want to avoid awkward line stepping while
3127 // debugging. Eg. if the compare has got a line number inside the loop.
3128 cast<Instruction>(CmpN)->setDebugLoc(ScalarLatchBr->getDebugLoc());
3129 }
3130
3131 BranchInst *BrInst =
3132 BranchInst::Create(LoopExitBlock, LoopScalarPreHeader, CmpN);
3133 BrInst->setDebugLoc(ScalarLatchBr->getDebugLoc());
3134 ReplaceInstWithInst(LoopMiddleBlock->getTerminator(), BrInst);
3135
3136 // Get ready to start creating new instructions into the vectorized body.
3137 assert(LoopVectorPreHeader == Lp->getLoopPreheader() &&
3138 "Inconsistent vector loop preheader");
3139 Builder.SetInsertPoint(&*LoopVectorBody->getFirstInsertionPt());
3140
3141 Optional<MDNode *> VectorizedLoopID =
3142 makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll,
3143 LLVMLoopVectorizeFollowupVectorized});
3144 if (VectorizedLoopID.hasValue()) {
3145 Lp->setLoopID(VectorizedLoopID.getValue());
3146
3147 // Do not setAlreadyVectorized if loop attributes have been defined
3148 // explicitly.
3149 return LoopVectorPreHeader;
3150 }
3151
3152 // Keep all loop hints from the original loop on the vector loop (we'll
3153 // replace the vectorizer-specific hints below).
3154 if (MDNode *LID = OrigLoop->getLoopID())
3155 Lp->setLoopID(LID);
3156
3157 LoopVectorizeHints Hints(Lp, true, *ORE);
3158 Hints.setAlreadyVectorized();
3159
3160 #ifdef EXPENSIVE_CHECKS
3161 assert(DT->verify(DominatorTree::VerificationLevel::Fast));
3162 LI->verify(*DT);
3163 #endif
3164
3165 return LoopVectorPreHeader;
3166 }
3167
3168 // Fix up external users of the induction variable. At this point, we are
3169 // in LCSSA form, with all external PHIs that use the IV having one input value,
3170 // coming from the remainder loop. We need those PHIs to also have a correct
3171 // value for the IV when arriving directly from the middle block.
fixupIVUsers(PHINode * OrigPhi,const InductionDescriptor & II,Value * CountRoundDown,Value * EndValue,BasicBlock * MiddleBlock)3172 void InnerLoopVectorizer::fixupIVUsers(PHINode *OrigPhi,
3173 const InductionDescriptor &II,
3174 Value *CountRoundDown, Value *EndValue,
3175 BasicBlock *MiddleBlock) {
3176 // There are two kinds of external IV usages - those that use the value
3177 // computed in the last iteration (the PHI) and those that use the penultimate
3178 // value (the value that feeds into the phi from the loop latch).
3179 // We allow both, but they, obviously, have different values.
3180
3181 assert(OrigLoop->getExitBlock() && "Expected a single exit block");
3182
3183 DenseMap<Value *, Value *> MissingVals;
3184
3185 // An external user of the last iteration's value should see the value that
3186 // the remainder loop uses to initialize its own IV.
3187 Value *PostInc = OrigPhi->getIncomingValueForBlock(OrigLoop->getLoopLatch());
3188 for (User *U : PostInc->users()) {
3189 Instruction *UI = cast<Instruction>(U);
3190 if (!OrigLoop->contains(UI)) {
3191 assert(isa<PHINode>(UI) && "Expected LCSSA form");
3192 MissingVals[UI] = EndValue;
3193 }
3194 }
3195
3196 // An external user of the penultimate value need to see EndValue - Step.
3197 // The simplest way to get this is to recompute it from the constituent SCEVs,
3198 // that is Start + (Step * (CRD - 1)).
3199 for (User *U : OrigPhi->users()) {
3200 auto *UI = cast<Instruction>(U);
3201 if (!OrigLoop->contains(UI)) {
3202 const DataLayout &DL =
3203 OrigLoop->getHeader()->getModule()->getDataLayout();
3204 assert(isa<PHINode>(UI) && "Expected LCSSA form");
3205
3206 IRBuilder<> B(MiddleBlock->getTerminator());
3207 Value *CountMinusOne = B.CreateSub(
3208 CountRoundDown, ConstantInt::get(CountRoundDown->getType(), 1));
3209 Value *CMO =
3210 !II.getStep()->getType()->isIntegerTy()
3211 ? B.CreateCast(Instruction::SIToFP, CountMinusOne,
3212 II.getStep()->getType())
3213 : B.CreateSExtOrTrunc(CountMinusOne, II.getStep()->getType());
3214 CMO->setName("cast.cmo");
3215 Value *Escape = emitTransformedIndex(B, CMO, PSE.getSE(), DL, II);
3216 Escape->setName("ind.escape");
3217 MissingVals[UI] = Escape;
3218 }
3219 }
3220
3221 for (auto &I : MissingVals) {
3222 PHINode *PHI = cast<PHINode>(I.first);
3223 // One corner case we have to handle is two IVs "chasing" each-other,
3224 // that is %IV2 = phi [...], [ %IV1, %latch ]
3225 // In this case, if IV1 has an external use, we need to avoid adding both
3226 // "last value of IV1" and "penultimate value of IV2". So, verify that we
3227 // don't already have an incoming value for the middle block.
3228 if (PHI->getBasicBlockIndex(MiddleBlock) == -1)
3229 PHI->addIncoming(I.second, MiddleBlock);
3230 }
3231 }
3232
3233 namespace {
3234
3235 struct CSEDenseMapInfo {
canHandle__anon7809adbb0d11::CSEDenseMapInfo3236 static bool canHandle(const Instruction *I) {
3237 return isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) ||
3238 isa<ShuffleVectorInst>(I) || isa<GetElementPtrInst>(I);
3239 }
3240
getEmptyKey__anon7809adbb0d11::CSEDenseMapInfo3241 static inline Instruction *getEmptyKey() {
3242 return DenseMapInfo<Instruction *>::getEmptyKey();
3243 }
3244
getTombstoneKey__anon7809adbb0d11::CSEDenseMapInfo3245 static inline Instruction *getTombstoneKey() {
3246 return DenseMapInfo<Instruction *>::getTombstoneKey();
3247 }
3248
getHashValue__anon7809adbb0d11::CSEDenseMapInfo3249 static unsigned getHashValue(const Instruction *I) {
3250 assert(canHandle(I) && "Unknown instruction!");
3251 return hash_combine(I->getOpcode(), hash_combine_range(I->value_op_begin(),
3252 I->value_op_end()));
3253 }
3254
isEqual__anon7809adbb0d11::CSEDenseMapInfo3255 static bool isEqual(const Instruction *LHS, const Instruction *RHS) {
3256 if (LHS == getEmptyKey() || RHS == getEmptyKey() ||
3257 LHS == getTombstoneKey() || RHS == getTombstoneKey())
3258 return LHS == RHS;
3259 return LHS->isIdenticalTo(RHS);
3260 }
3261 };
3262
3263 } // end anonymous namespace
3264
3265 ///Perform cse of induction variable instructions.
cse(BasicBlock * BB)3266 static void cse(BasicBlock *BB) {
3267 // Perform simple cse.
3268 SmallDenseMap<Instruction *, Instruction *, 4, CSEDenseMapInfo> CSEMap;
3269 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;) {
3270 Instruction *In = &*I++;
3271
3272 if (!CSEDenseMapInfo::canHandle(In))
3273 continue;
3274
3275 // Check if we can replace this instruction with any of the
3276 // visited instructions.
3277 if (Instruction *V = CSEMap.lookup(In)) {
3278 In->replaceAllUsesWith(V);
3279 In->eraseFromParent();
3280 continue;
3281 }
3282
3283 CSEMap[In] = In;
3284 }
3285 }
3286
getVectorCallCost(CallInst * CI,unsigned VF,bool & NeedToScalarize)3287 unsigned LoopVectorizationCostModel::getVectorCallCost(CallInst *CI,
3288 unsigned VF,
3289 bool &NeedToScalarize) {
3290 Function *F = CI->getCalledFunction();
3291 Type *ScalarRetTy = CI->getType();
3292 SmallVector<Type *, 4> Tys, ScalarTys;
3293 for (auto &ArgOp : CI->arg_operands())
3294 ScalarTys.push_back(ArgOp->getType());
3295
3296 // Estimate cost of scalarized vector call. The source operands are assumed
3297 // to be vectors, so we need to extract individual elements from there,
3298 // execute VF scalar calls, and then gather the result into the vector return
3299 // value.
3300 unsigned ScalarCallCost = TTI.getCallInstrCost(F, ScalarRetTy, ScalarTys,
3301 TTI::TCK_RecipThroughput);
3302 if (VF == 1)
3303 return ScalarCallCost;
3304
3305 // Compute corresponding vector type for return value and arguments.
3306 Type *RetTy = ToVectorTy(ScalarRetTy, VF);
3307 for (Type *ScalarTy : ScalarTys)
3308 Tys.push_back(ToVectorTy(ScalarTy, VF));
3309
3310 // Compute costs of unpacking argument values for the scalar calls and
3311 // packing the return values to a vector.
3312 unsigned ScalarizationCost = getScalarizationOverhead(CI, VF);
3313
3314 unsigned Cost = ScalarCallCost * VF + ScalarizationCost;
3315
3316 // If we can't emit a vector call for this function, then the currently found
3317 // cost is the cost we need to return.
3318 NeedToScalarize = true;
3319 VFShape Shape = VFShape::get(*CI, {VF, false}, false /*HasGlobalPred*/);
3320 Function *VecFunc = VFDatabase(*CI).getVectorizedFunction(Shape);
3321
3322 if (!TLI || CI->isNoBuiltin() || !VecFunc)
3323 return Cost;
3324
3325 // If the corresponding vector cost is cheaper, return its cost.
3326 unsigned VectorCallCost = TTI.getCallInstrCost(nullptr, RetTy, Tys,
3327 TTI::TCK_RecipThroughput);
3328 if (VectorCallCost < Cost) {
3329 NeedToScalarize = false;
3330 return VectorCallCost;
3331 }
3332 return Cost;
3333 }
3334
getVectorIntrinsicCost(CallInst * CI,unsigned VF)3335 unsigned LoopVectorizationCostModel::getVectorIntrinsicCost(CallInst *CI,
3336 unsigned VF) {
3337 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
3338 assert(ID && "Expected intrinsic call!");
3339
3340 IntrinsicCostAttributes CostAttrs(ID, *CI, VF);
3341 return TTI.getIntrinsicInstrCost(CostAttrs,
3342 TargetTransformInfo::TCK_RecipThroughput);
3343 }
3344
smallestIntegerVectorType(Type * T1,Type * T2)3345 static Type *smallestIntegerVectorType(Type *T1, Type *T2) {
3346 auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType());
3347 auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType());
3348 return I1->getBitWidth() < I2->getBitWidth() ? T1 : T2;
3349 }
3350
largestIntegerVectorType(Type * T1,Type * T2)3351 static Type *largestIntegerVectorType(Type *T1, Type *T2) {
3352 auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType());
3353 auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType());
3354 return I1->getBitWidth() > I2->getBitWidth() ? T1 : T2;
3355 }
3356
truncateToMinimalBitwidths()3357 void InnerLoopVectorizer::truncateToMinimalBitwidths() {
3358 // For every instruction `I` in MinBWs, truncate the operands, create a
3359 // truncated version of `I` and reextend its result. InstCombine runs
3360 // later and will remove any ext/trunc pairs.
3361 SmallPtrSet<Value *, 4> Erased;
3362 for (const auto &KV : Cost->getMinimalBitwidths()) {
3363 // If the value wasn't vectorized, we must maintain the original scalar
3364 // type. The absence of the value from VectorLoopValueMap indicates that it
3365 // wasn't vectorized.
3366 if (!VectorLoopValueMap.hasAnyVectorValue(KV.first))
3367 continue;
3368 for (unsigned Part = 0; Part < UF; ++Part) {
3369 Value *I = getOrCreateVectorValue(KV.first, Part);
3370 if (Erased.count(I) || I->use_empty() || !isa<Instruction>(I))
3371 continue;
3372 Type *OriginalTy = I->getType();
3373 Type *ScalarTruncatedTy =
3374 IntegerType::get(OriginalTy->getContext(), KV.second);
3375 auto *TruncatedTy = FixedVectorType::get(
3376 ScalarTruncatedTy, cast<VectorType>(OriginalTy)->getNumElements());
3377 if (TruncatedTy == OriginalTy)
3378 continue;
3379
3380 IRBuilder<> B(cast<Instruction>(I));
3381 auto ShrinkOperand = [&](Value *V) -> Value * {
3382 if (auto *ZI = dyn_cast<ZExtInst>(V))
3383 if (ZI->getSrcTy() == TruncatedTy)
3384 return ZI->getOperand(0);
3385 return B.CreateZExtOrTrunc(V, TruncatedTy);
3386 };
3387
3388 // The actual instruction modification depends on the instruction type,
3389 // unfortunately.
3390 Value *NewI = nullptr;
3391 if (auto *BO = dyn_cast<BinaryOperator>(I)) {
3392 NewI = B.CreateBinOp(BO->getOpcode(), ShrinkOperand(BO->getOperand(0)),
3393 ShrinkOperand(BO->getOperand(1)));
3394
3395 // Any wrapping introduced by shrinking this operation shouldn't be
3396 // considered undefined behavior. So, we can't unconditionally copy
3397 // arithmetic wrapping flags to NewI.
3398 cast<BinaryOperator>(NewI)->copyIRFlags(I, /*IncludeWrapFlags=*/false);
3399 } else if (auto *CI = dyn_cast<ICmpInst>(I)) {
3400 NewI =
3401 B.CreateICmp(CI->getPredicate(), ShrinkOperand(CI->getOperand(0)),
3402 ShrinkOperand(CI->getOperand(1)));
3403 } else if (auto *SI = dyn_cast<SelectInst>(I)) {
3404 NewI = B.CreateSelect(SI->getCondition(),
3405 ShrinkOperand(SI->getTrueValue()),
3406 ShrinkOperand(SI->getFalseValue()));
3407 } else if (auto *CI = dyn_cast<CastInst>(I)) {
3408 switch (CI->getOpcode()) {
3409 default:
3410 llvm_unreachable("Unhandled cast!");
3411 case Instruction::Trunc:
3412 NewI = ShrinkOperand(CI->getOperand(0));
3413 break;
3414 case Instruction::SExt:
3415 NewI = B.CreateSExtOrTrunc(
3416 CI->getOperand(0),
3417 smallestIntegerVectorType(OriginalTy, TruncatedTy));
3418 break;
3419 case Instruction::ZExt:
3420 NewI = B.CreateZExtOrTrunc(
3421 CI->getOperand(0),
3422 smallestIntegerVectorType(OriginalTy, TruncatedTy));
3423 break;
3424 }
3425 } else if (auto *SI = dyn_cast<ShuffleVectorInst>(I)) {
3426 auto Elements0 =
3427 cast<VectorType>(SI->getOperand(0)->getType())->getNumElements();
3428 auto *O0 = B.CreateZExtOrTrunc(
3429 SI->getOperand(0),
3430 FixedVectorType::get(ScalarTruncatedTy, Elements0));
3431 auto Elements1 =
3432 cast<VectorType>(SI->getOperand(1)->getType())->getNumElements();
3433 auto *O1 = B.CreateZExtOrTrunc(
3434 SI->getOperand(1),
3435 FixedVectorType::get(ScalarTruncatedTy, Elements1));
3436
3437 NewI = B.CreateShuffleVector(O0, O1, SI->getShuffleMask());
3438 } else if (isa<LoadInst>(I) || isa<PHINode>(I)) {
3439 // Don't do anything with the operands, just extend the result.
3440 continue;
3441 } else if (auto *IE = dyn_cast<InsertElementInst>(I)) {
3442 auto Elements =
3443 cast<VectorType>(IE->getOperand(0)->getType())->getNumElements();
3444 auto *O0 = B.CreateZExtOrTrunc(
3445 IE->getOperand(0),
3446 FixedVectorType::get(ScalarTruncatedTy, Elements));
3447 auto *O1 = B.CreateZExtOrTrunc(IE->getOperand(1), ScalarTruncatedTy);
3448 NewI = B.CreateInsertElement(O0, O1, IE->getOperand(2));
3449 } else if (auto *EE = dyn_cast<ExtractElementInst>(I)) {
3450 auto Elements =
3451 cast<VectorType>(EE->getOperand(0)->getType())->getNumElements();
3452 auto *O0 = B.CreateZExtOrTrunc(
3453 EE->getOperand(0),
3454 FixedVectorType::get(ScalarTruncatedTy, Elements));
3455 NewI = B.CreateExtractElement(O0, EE->getOperand(2));
3456 } else {
3457 // If we don't know what to do, be conservative and don't do anything.
3458 continue;
3459 }
3460
3461 // Lastly, extend the result.
3462 NewI->takeName(cast<Instruction>(I));
3463 Value *Res = B.CreateZExtOrTrunc(NewI, OriginalTy);
3464 I->replaceAllUsesWith(Res);
3465 cast<Instruction>(I)->eraseFromParent();
3466 Erased.insert(I);
3467 VectorLoopValueMap.resetVectorValue(KV.first, Part, Res);
3468 }
3469 }
3470
3471 // We'll have created a bunch of ZExts that are now parentless. Clean up.
3472 for (const auto &KV : Cost->getMinimalBitwidths()) {
3473 // If the value wasn't vectorized, we must maintain the original scalar
3474 // type. The absence of the value from VectorLoopValueMap indicates that it
3475 // wasn't vectorized.
3476 if (!VectorLoopValueMap.hasAnyVectorValue(KV.first))
3477 continue;
3478 for (unsigned Part = 0; Part < UF; ++Part) {
3479 Value *I = getOrCreateVectorValue(KV.first, Part);
3480 ZExtInst *Inst = dyn_cast<ZExtInst>(I);
3481 if (Inst && Inst->use_empty()) {
3482 Value *NewI = Inst->getOperand(0);
3483 Inst->eraseFromParent();
3484 VectorLoopValueMap.resetVectorValue(KV.first, Part, NewI);
3485 }
3486 }
3487 }
3488 }
3489
fixVectorizedLoop()3490 void InnerLoopVectorizer::fixVectorizedLoop() {
3491 // Insert truncates and extends for any truncated instructions as hints to
3492 // InstCombine.
3493 if (VF > 1)
3494 truncateToMinimalBitwidths();
3495
3496 // Fix widened non-induction PHIs by setting up the PHI operands.
3497 if (OrigPHIsToFix.size()) {
3498 assert(EnableVPlanNativePath &&
3499 "Unexpected non-induction PHIs for fixup in non VPlan-native path");
3500 fixNonInductionPHIs();
3501 }
3502
3503 // At this point every instruction in the original loop is widened to a
3504 // vector form. Now we need to fix the recurrences in the loop. These PHI
3505 // nodes are currently empty because we did not want to introduce cycles.
3506 // This is the second stage of vectorizing recurrences.
3507 fixCrossIterationPHIs();
3508
3509 // Forget the original basic block.
3510 PSE.getSE()->forgetLoop(OrigLoop);
3511
3512 // Fix-up external users of the induction variables.
3513 for (auto &Entry : Legal->getInductionVars())
3514 fixupIVUsers(Entry.first, Entry.second,
3515 getOrCreateVectorTripCount(LI->getLoopFor(LoopVectorBody)),
3516 IVEndValues[Entry.first], LoopMiddleBlock);
3517
3518 fixLCSSAPHIs();
3519 for (Instruction *PI : PredicatedInstructions)
3520 sinkScalarOperands(&*PI);
3521
3522 // Remove redundant induction instructions.
3523 cse(LoopVectorBody);
3524
3525 // Set/update profile weights for the vector and remainder loops as original
3526 // loop iterations are now distributed among them. Note that original loop
3527 // represented by LoopScalarBody becomes remainder loop after vectorization.
3528 //
3529 // For cases like foldTailByMasking() and requiresScalarEpiloque() we may
3530 // end up getting slightly roughened result but that should be OK since
3531 // profile is not inherently precise anyway. Note also possible bypass of
3532 // vector code caused by legality checks is ignored, assigning all the weight
3533 // to the vector loop, optimistically.
3534 setProfileInfoAfterUnrolling(LI->getLoopFor(LoopScalarBody),
3535 LI->getLoopFor(LoopVectorBody),
3536 LI->getLoopFor(LoopScalarBody), VF * UF);
3537 }
3538
fixCrossIterationPHIs()3539 void InnerLoopVectorizer::fixCrossIterationPHIs() {
3540 // In order to support recurrences we need to be able to vectorize Phi nodes.
3541 // Phi nodes have cycles, so we need to vectorize them in two stages. This is
3542 // stage #2: We now need to fix the recurrences by adding incoming edges to
3543 // the currently empty PHI nodes. At this point every instruction in the
3544 // original loop is widened to a vector form so we can use them to construct
3545 // the incoming edges.
3546 for (PHINode &Phi : OrigLoop->getHeader()->phis()) {
3547 // Handle first-order recurrences and reductions that need to be fixed.
3548 if (Legal->isFirstOrderRecurrence(&Phi))
3549 fixFirstOrderRecurrence(&Phi);
3550 else if (Legal->isReductionVariable(&Phi))
3551 fixReduction(&Phi);
3552 }
3553 }
3554
fixFirstOrderRecurrence(PHINode * Phi)3555 void InnerLoopVectorizer::fixFirstOrderRecurrence(PHINode *Phi) {
3556 // This is the second phase of vectorizing first-order recurrences. An
3557 // overview of the transformation is described below. Suppose we have the
3558 // following loop.
3559 //
3560 // for (int i = 0; i < n; ++i)
3561 // b[i] = a[i] - a[i - 1];
3562 //
3563 // There is a first-order recurrence on "a". For this loop, the shorthand
3564 // scalar IR looks like:
3565 //
3566 // scalar.ph:
3567 // s_init = a[-1]
3568 // br scalar.body
3569 //
3570 // scalar.body:
3571 // i = phi [0, scalar.ph], [i+1, scalar.body]
3572 // s1 = phi [s_init, scalar.ph], [s2, scalar.body]
3573 // s2 = a[i]
3574 // b[i] = s2 - s1
3575 // br cond, scalar.body, ...
3576 //
3577 // In this example, s1 is a recurrence because it's value depends on the
3578 // previous iteration. In the first phase of vectorization, we created a
3579 // temporary value for s1. We now complete the vectorization and produce the
3580 // shorthand vector IR shown below (for VF = 4, UF = 1).
3581 //
3582 // vector.ph:
3583 // v_init = vector(..., ..., ..., a[-1])
3584 // br vector.body
3585 //
3586 // vector.body
3587 // i = phi [0, vector.ph], [i+4, vector.body]
3588 // v1 = phi [v_init, vector.ph], [v2, vector.body]
3589 // v2 = a[i, i+1, i+2, i+3];
3590 // v3 = vector(v1(3), v2(0, 1, 2))
3591 // b[i, i+1, i+2, i+3] = v2 - v3
3592 // br cond, vector.body, middle.block
3593 //
3594 // middle.block:
3595 // x = v2(3)
3596 // br scalar.ph
3597 //
3598 // scalar.ph:
3599 // s_init = phi [x, middle.block], [a[-1], otherwise]
3600 // br scalar.body
3601 //
3602 // After execution completes the vector loop, we extract the next value of
3603 // the recurrence (x) to use as the initial value in the scalar loop.
3604
3605 // Get the original loop preheader and single loop latch.
3606 auto *Preheader = OrigLoop->getLoopPreheader();
3607 auto *Latch = OrigLoop->getLoopLatch();
3608
3609 // Get the initial and previous values of the scalar recurrence.
3610 auto *ScalarInit = Phi->getIncomingValueForBlock(Preheader);
3611 auto *Previous = Phi->getIncomingValueForBlock(Latch);
3612
3613 // Create a vector from the initial value.
3614 auto *VectorInit = ScalarInit;
3615 if (VF > 1) {
3616 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
3617 VectorInit = Builder.CreateInsertElement(
3618 UndefValue::get(FixedVectorType::get(VectorInit->getType(), VF)),
3619 VectorInit, Builder.getInt32(VF - 1), "vector.recur.init");
3620 }
3621
3622 // We constructed a temporary phi node in the first phase of vectorization.
3623 // This phi node will eventually be deleted.
3624 Builder.SetInsertPoint(
3625 cast<Instruction>(VectorLoopValueMap.getVectorValue(Phi, 0)));
3626
3627 // Create a phi node for the new recurrence. The current value will either be
3628 // the initial value inserted into a vector or loop-varying vector value.
3629 auto *VecPhi = Builder.CreatePHI(VectorInit->getType(), 2, "vector.recur");
3630 VecPhi->addIncoming(VectorInit, LoopVectorPreHeader);
3631
3632 // Get the vectorized previous value of the last part UF - 1. It appears last
3633 // among all unrolled iterations, due to the order of their construction.
3634 Value *PreviousLastPart = getOrCreateVectorValue(Previous, UF - 1);
3635
3636 // Find and set the insertion point after the previous value if it is an
3637 // instruction.
3638 BasicBlock::iterator InsertPt;
3639 // Note that the previous value may have been constant-folded so it is not
3640 // guaranteed to be an instruction in the vector loop.
3641 // FIXME: Loop invariant values do not form recurrences. We should deal with
3642 // them earlier.
3643 if (LI->getLoopFor(LoopVectorBody)->isLoopInvariant(PreviousLastPart))
3644 InsertPt = LoopVectorBody->getFirstInsertionPt();
3645 else {
3646 Instruction *PreviousInst = cast<Instruction>(PreviousLastPart);
3647 if (isa<PHINode>(PreviousLastPart))
3648 // If the previous value is a phi node, we should insert after all the phi
3649 // nodes in the block containing the PHI to avoid breaking basic block
3650 // verification. Note that the basic block may be different to
3651 // LoopVectorBody, in case we predicate the loop.
3652 InsertPt = PreviousInst->getParent()->getFirstInsertionPt();
3653 else
3654 InsertPt = ++PreviousInst->getIterator();
3655 }
3656 Builder.SetInsertPoint(&*InsertPt);
3657
3658 // We will construct a vector for the recurrence by combining the values for
3659 // the current and previous iterations. This is the required shuffle mask.
3660 SmallVector<int, 8> ShuffleMask(VF);
3661 ShuffleMask[0] = VF - 1;
3662 for (unsigned I = 1; I < VF; ++I)
3663 ShuffleMask[I] = I + VF - 1;
3664
3665 // The vector from which to take the initial value for the current iteration
3666 // (actual or unrolled). Initially, this is the vector phi node.
3667 Value *Incoming = VecPhi;
3668
3669 // Shuffle the current and previous vector and update the vector parts.
3670 for (unsigned Part = 0; Part < UF; ++Part) {
3671 Value *PreviousPart = getOrCreateVectorValue(Previous, Part);
3672 Value *PhiPart = VectorLoopValueMap.getVectorValue(Phi, Part);
3673 auto *Shuffle = VF > 1 ? Builder.CreateShuffleVector(Incoming, PreviousPart,
3674 ShuffleMask)
3675 : Incoming;
3676 PhiPart->replaceAllUsesWith(Shuffle);
3677 cast<Instruction>(PhiPart)->eraseFromParent();
3678 VectorLoopValueMap.resetVectorValue(Phi, Part, Shuffle);
3679 Incoming = PreviousPart;
3680 }
3681
3682 // Fix the latch value of the new recurrence in the vector loop.
3683 VecPhi->addIncoming(Incoming, LI->getLoopFor(LoopVectorBody)->getLoopLatch());
3684
3685 // Extract the last vector element in the middle block. This will be the
3686 // initial value for the recurrence when jumping to the scalar loop.
3687 auto *ExtractForScalar = Incoming;
3688 if (VF > 1) {
3689 Builder.SetInsertPoint(LoopMiddleBlock->getTerminator());
3690 ExtractForScalar = Builder.CreateExtractElement(
3691 ExtractForScalar, Builder.getInt32(VF - 1), "vector.recur.extract");
3692 }
3693 // Extract the second last element in the middle block if the
3694 // Phi is used outside the loop. We need to extract the phi itself
3695 // and not the last element (the phi update in the current iteration). This
3696 // will be the value when jumping to the exit block from the LoopMiddleBlock,
3697 // when the scalar loop is not run at all.
3698 Value *ExtractForPhiUsedOutsideLoop = nullptr;
3699 if (VF > 1)
3700 ExtractForPhiUsedOutsideLoop = Builder.CreateExtractElement(
3701 Incoming, Builder.getInt32(VF - 2), "vector.recur.extract.for.phi");
3702 // When loop is unrolled without vectorizing, initialize
3703 // ExtractForPhiUsedOutsideLoop with the value just prior to unrolled value of
3704 // `Incoming`. This is analogous to the vectorized case above: extracting the
3705 // second last element when VF > 1.
3706 else if (UF > 1)
3707 ExtractForPhiUsedOutsideLoop = getOrCreateVectorValue(Previous, UF - 2);
3708
3709 // Fix the initial value of the original recurrence in the scalar loop.
3710 Builder.SetInsertPoint(&*LoopScalarPreHeader->begin());
3711 auto *Start = Builder.CreatePHI(Phi->getType(), 2, "scalar.recur.init");
3712 for (auto *BB : predecessors(LoopScalarPreHeader)) {
3713 auto *Incoming = BB == LoopMiddleBlock ? ExtractForScalar : ScalarInit;
3714 Start->addIncoming(Incoming, BB);
3715 }
3716
3717 Phi->setIncomingValueForBlock(LoopScalarPreHeader, Start);
3718 Phi->setName("scalar.recur");
3719
3720 // Finally, fix users of the recurrence outside the loop. The users will need
3721 // either the last value of the scalar recurrence or the last value of the
3722 // vector recurrence we extracted in the middle block. Since the loop is in
3723 // LCSSA form, we just need to find all the phi nodes for the original scalar
3724 // recurrence in the exit block, and then add an edge for the middle block.
3725 for (PHINode &LCSSAPhi : LoopExitBlock->phis()) {
3726 if (LCSSAPhi.getIncomingValue(0) == Phi) {
3727 LCSSAPhi.addIncoming(ExtractForPhiUsedOutsideLoop, LoopMiddleBlock);
3728 }
3729 }
3730 }
3731
fixReduction(PHINode * Phi)3732 void InnerLoopVectorizer::fixReduction(PHINode *Phi) {
3733 Constant *Zero = Builder.getInt32(0);
3734
3735 // Get it's reduction variable descriptor.
3736 assert(Legal->isReductionVariable(Phi) &&
3737 "Unable to find the reduction variable");
3738 RecurrenceDescriptor RdxDesc = Legal->getReductionVars()[Phi];
3739
3740 RecurrenceDescriptor::RecurrenceKind RK = RdxDesc.getRecurrenceKind();
3741 TrackingVH<Value> ReductionStartValue = RdxDesc.getRecurrenceStartValue();
3742 Instruction *LoopExitInst = RdxDesc.getLoopExitInstr();
3743 RecurrenceDescriptor::MinMaxRecurrenceKind MinMaxKind =
3744 RdxDesc.getMinMaxRecurrenceKind();
3745 setDebugLocFromInst(Builder, ReductionStartValue);
3746
3747 // We need to generate a reduction vector from the incoming scalar.
3748 // To do so, we need to generate the 'identity' vector and override
3749 // one of the elements with the incoming scalar reduction. We need
3750 // to do it in the vector-loop preheader.
3751 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
3752
3753 // This is the vector-clone of the value that leaves the loop.
3754 Type *VecTy = getOrCreateVectorValue(LoopExitInst, 0)->getType();
3755
3756 // Find the reduction identity variable. Zero for addition, or, xor,
3757 // one for multiplication, -1 for And.
3758 Value *Identity;
3759 Value *VectorStart;
3760 if (RK == RecurrenceDescriptor::RK_IntegerMinMax ||
3761 RK == RecurrenceDescriptor::RK_FloatMinMax) {
3762 // MinMax reduction have the start value as their identify.
3763 if (VF == 1) {
3764 VectorStart = Identity = ReductionStartValue;
3765 } else {
3766 VectorStart = Identity =
3767 Builder.CreateVectorSplat(VF, ReductionStartValue, "minmax.ident");
3768 }
3769 } else {
3770 // Handle other reduction kinds:
3771 Constant *Iden = RecurrenceDescriptor::getRecurrenceIdentity(
3772 RK, VecTy->getScalarType());
3773 if (VF == 1) {
3774 Identity = Iden;
3775 // This vector is the Identity vector where the first element is the
3776 // incoming scalar reduction.
3777 VectorStart = ReductionStartValue;
3778 } else {
3779 Identity = ConstantVector::getSplat({VF, false}, Iden);
3780
3781 // This vector is the Identity vector where the first element is the
3782 // incoming scalar reduction.
3783 VectorStart =
3784 Builder.CreateInsertElement(Identity, ReductionStartValue, Zero);
3785 }
3786 }
3787
3788 // Wrap flags are in general invalid after vectorization, clear them.
3789 clearReductionWrapFlags(RdxDesc);
3790
3791 // Fix the vector-loop phi.
3792
3793 // Reductions do not have to start at zero. They can start with
3794 // any loop invariant values.
3795 BasicBlock *Latch = OrigLoop->getLoopLatch();
3796 Value *LoopVal = Phi->getIncomingValueForBlock(Latch);
3797
3798 for (unsigned Part = 0; Part < UF; ++Part) {
3799 Value *VecRdxPhi = getOrCreateVectorValue(Phi, Part);
3800 Value *Val = getOrCreateVectorValue(LoopVal, Part);
3801 // Make sure to add the reduction start value only to the
3802 // first unroll part.
3803 Value *StartVal = (Part == 0) ? VectorStart : Identity;
3804 cast<PHINode>(VecRdxPhi)->addIncoming(StartVal, LoopVectorPreHeader);
3805 cast<PHINode>(VecRdxPhi)
3806 ->addIncoming(Val, LI->getLoopFor(LoopVectorBody)->getLoopLatch());
3807 }
3808
3809 // Before each round, move the insertion point right between
3810 // the PHIs and the values we are going to write.
3811 // This allows us to write both PHINodes and the extractelement
3812 // instructions.
3813 Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt());
3814
3815 setDebugLocFromInst(Builder, LoopExitInst);
3816
3817 // If tail is folded by masking, the vector value to leave the loop should be
3818 // a Select choosing between the vectorized LoopExitInst and vectorized Phi,
3819 // instead of the former.
3820 if (Cost->foldTailByMasking()) {
3821 for (unsigned Part = 0; Part < UF; ++Part) {
3822 Value *VecLoopExitInst =
3823 VectorLoopValueMap.getVectorValue(LoopExitInst, Part);
3824 Value *Sel = nullptr;
3825 for (User *U : VecLoopExitInst->users()) {
3826 if (isa<SelectInst>(U)) {
3827 assert(!Sel && "Reduction exit feeding two selects");
3828 Sel = U;
3829 } else
3830 assert(isa<PHINode>(U) && "Reduction exit must feed Phi's or select");
3831 }
3832 assert(Sel && "Reduction exit feeds no select");
3833 VectorLoopValueMap.resetVectorValue(LoopExitInst, Part, Sel);
3834 }
3835 }
3836
3837 // If the vector reduction can be performed in a smaller type, we truncate
3838 // then extend the loop exit value to enable InstCombine to evaluate the
3839 // entire expression in the smaller type.
3840 if (VF > 1 && Phi->getType() != RdxDesc.getRecurrenceType()) {
3841 Type *RdxVecTy = FixedVectorType::get(RdxDesc.getRecurrenceType(), VF);
3842 Builder.SetInsertPoint(
3843 LI->getLoopFor(LoopVectorBody)->getLoopLatch()->getTerminator());
3844 VectorParts RdxParts(UF);
3845 for (unsigned Part = 0; Part < UF; ++Part) {
3846 RdxParts[Part] = VectorLoopValueMap.getVectorValue(LoopExitInst, Part);
3847 Value *Trunc = Builder.CreateTrunc(RdxParts[Part], RdxVecTy);
3848 Value *Extnd = RdxDesc.isSigned() ? Builder.CreateSExt(Trunc, VecTy)
3849 : Builder.CreateZExt(Trunc, VecTy);
3850 for (Value::user_iterator UI = RdxParts[Part]->user_begin();
3851 UI != RdxParts[Part]->user_end();)
3852 if (*UI != Trunc) {
3853 (*UI++)->replaceUsesOfWith(RdxParts[Part], Extnd);
3854 RdxParts[Part] = Extnd;
3855 } else {
3856 ++UI;
3857 }
3858 }
3859 Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt());
3860 for (unsigned Part = 0; Part < UF; ++Part) {
3861 RdxParts[Part] = Builder.CreateTrunc(RdxParts[Part], RdxVecTy);
3862 VectorLoopValueMap.resetVectorValue(LoopExitInst, Part, RdxParts[Part]);
3863 }
3864 }
3865
3866 // Reduce all of the unrolled parts into a single vector.
3867 Value *ReducedPartRdx = VectorLoopValueMap.getVectorValue(LoopExitInst, 0);
3868 unsigned Op = RecurrenceDescriptor::getRecurrenceBinOp(RK);
3869
3870 // The middle block terminator has already been assigned a DebugLoc here (the
3871 // OrigLoop's single latch terminator). We want the whole middle block to
3872 // appear to execute on this line because: (a) it is all compiler generated,
3873 // (b) these instructions are always executed after evaluating the latch
3874 // conditional branch, and (c) other passes may add new predecessors which
3875 // terminate on this line. This is the easiest way to ensure we don't
3876 // accidentally cause an extra step back into the loop while debugging.
3877 setDebugLocFromInst(Builder, LoopMiddleBlock->getTerminator());
3878 for (unsigned Part = 1; Part < UF; ++Part) {
3879 Value *RdxPart = VectorLoopValueMap.getVectorValue(LoopExitInst, Part);
3880 if (Op != Instruction::ICmp && Op != Instruction::FCmp)
3881 // Floating point operations had to be 'fast' to enable the reduction.
3882 ReducedPartRdx = addFastMathFlag(
3883 Builder.CreateBinOp((Instruction::BinaryOps)Op, RdxPart,
3884 ReducedPartRdx, "bin.rdx"),
3885 RdxDesc.getFastMathFlags());
3886 else
3887 ReducedPartRdx = createMinMaxOp(Builder, MinMaxKind, ReducedPartRdx,
3888 RdxPart);
3889 }
3890
3891 if (VF > 1) {
3892 bool NoNaN = Legal->hasFunNoNaNAttr();
3893 ReducedPartRdx =
3894 createTargetReduction(Builder, TTI, RdxDesc, ReducedPartRdx, NoNaN);
3895 // If the reduction can be performed in a smaller type, we need to extend
3896 // the reduction to the wider type before we branch to the original loop.
3897 if (Phi->getType() != RdxDesc.getRecurrenceType())
3898 ReducedPartRdx =
3899 RdxDesc.isSigned()
3900 ? Builder.CreateSExt(ReducedPartRdx, Phi->getType())
3901 : Builder.CreateZExt(ReducedPartRdx, Phi->getType());
3902 }
3903
3904 // Create a phi node that merges control-flow from the backedge-taken check
3905 // block and the middle block.
3906 PHINode *BCBlockPhi = PHINode::Create(Phi->getType(), 2, "bc.merge.rdx",
3907 LoopScalarPreHeader->getTerminator());
3908 for (unsigned I = 0, E = LoopBypassBlocks.size(); I != E; ++I)
3909 BCBlockPhi->addIncoming(ReductionStartValue, LoopBypassBlocks[I]);
3910 BCBlockPhi->addIncoming(ReducedPartRdx, LoopMiddleBlock);
3911
3912 // Now, we need to fix the users of the reduction variable
3913 // inside and outside of the scalar remainder loop.
3914 // We know that the loop is in LCSSA form. We need to update the
3915 // PHI nodes in the exit blocks.
3916 for (PHINode &LCSSAPhi : LoopExitBlock->phis()) {
3917 // All PHINodes need to have a single entry edge, or two if
3918 // we already fixed them.
3919 assert(LCSSAPhi.getNumIncomingValues() < 3 && "Invalid LCSSA PHI");
3920
3921 // We found a reduction value exit-PHI. Update it with the
3922 // incoming bypass edge.
3923 if (LCSSAPhi.getIncomingValue(0) == LoopExitInst)
3924 LCSSAPhi.addIncoming(ReducedPartRdx, LoopMiddleBlock);
3925 } // end of the LCSSA phi scan.
3926
3927 // Fix the scalar loop reduction variable with the incoming reduction sum
3928 // from the vector body and from the backedge value.
3929 int IncomingEdgeBlockIdx =
3930 Phi->getBasicBlockIndex(OrigLoop->getLoopLatch());
3931 assert(IncomingEdgeBlockIdx >= 0 && "Invalid block index");
3932 // Pick the other block.
3933 int SelfEdgeBlockIdx = (IncomingEdgeBlockIdx ? 0 : 1);
3934 Phi->setIncomingValue(SelfEdgeBlockIdx, BCBlockPhi);
3935 Phi->setIncomingValue(IncomingEdgeBlockIdx, LoopExitInst);
3936 }
3937
clearReductionWrapFlags(RecurrenceDescriptor & RdxDesc)3938 void InnerLoopVectorizer::clearReductionWrapFlags(
3939 RecurrenceDescriptor &RdxDesc) {
3940 RecurrenceDescriptor::RecurrenceKind RK = RdxDesc.getRecurrenceKind();
3941 if (RK != RecurrenceDescriptor::RK_IntegerAdd &&
3942 RK != RecurrenceDescriptor::RK_IntegerMult)
3943 return;
3944
3945 Instruction *LoopExitInstr = RdxDesc.getLoopExitInstr();
3946 assert(LoopExitInstr && "null loop exit instruction");
3947 SmallVector<Instruction *, 8> Worklist;
3948 SmallPtrSet<Instruction *, 8> Visited;
3949 Worklist.push_back(LoopExitInstr);
3950 Visited.insert(LoopExitInstr);
3951
3952 while (!Worklist.empty()) {
3953 Instruction *Cur = Worklist.pop_back_val();
3954 if (isa<OverflowingBinaryOperator>(Cur))
3955 for (unsigned Part = 0; Part < UF; ++Part) {
3956 Value *V = getOrCreateVectorValue(Cur, Part);
3957 cast<Instruction>(V)->dropPoisonGeneratingFlags();
3958 }
3959
3960 for (User *U : Cur->users()) {
3961 Instruction *UI = cast<Instruction>(U);
3962 if ((Cur != LoopExitInstr || OrigLoop->contains(UI->getParent())) &&
3963 Visited.insert(UI).second)
3964 Worklist.push_back(UI);
3965 }
3966 }
3967 }
3968
fixLCSSAPHIs()3969 void InnerLoopVectorizer::fixLCSSAPHIs() {
3970 for (PHINode &LCSSAPhi : LoopExitBlock->phis()) {
3971 if (LCSSAPhi.getNumIncomingValues() == 1) {
3972 auto *IncomingValue = LCSSAPhi.getIncomingValue(0);
3973 // Non-instruction incoming values will have only one value.
3974 unsigned LastLane = 0;
3975 if (isa<Instruction>(IncomingValue))
3976 LastLane = Cost->isUniformAfterVectorization(
3977 cast<Instruction>(IncomingValue), VF)
3978 ? 0
3979 : VF - 1;
3980 // Can be a loop invariant incoming value or the last scalar value to be
3981 // extracted from the vectorized loop.
3982 Builder.SetInsertPoint(LoopMiddleBlock->getTerminator());
3983 Value *lastIncomingValue =
3984 getOrCreateScalarValue(IncomingValue, { UF - 1, LastLane });
3985 LCSSAPhi.addIncoming(lastIncomingValue, LoopMiddleBlock);
3986 }
3987 }
3988 }
3989
sinkScalarOperands(Instruction * PredInst)3990 void InnerLoopVectorizer::sinkScalarOperands(Instruction *PredInst) {
3991 // The basic block and loop containing the predicated instruction.
3992 auto *PredBB = PredInst->getParent();
3993 auto *VectorLoop = LI->getLoopFor(PredBB);
3994
3995 // Initialize a worklist with the operands of the predicated instruction.
3996 SetVector<Value *> Worklist(PredInst->op_begin(), PredInst->op_end());
3997
3998 // Holds instructions that we need to analyze again. An instruction may be
3999 // reanalyzed if we don't yet know if we can sink it or not.
4000 SmallVector<Instruction *, 8> InstsToReanalyze;
4001
4002 // Returns true if a given use occurs in the predicated block. Phi nodes use
4003 // their operands in their corresponding predecessor blocks.
4004 auto isBlockOfUsePredicated = [&](Use &U) -> bool {
4005 auto *I = cast<Instruction>(U.getUser());
4006 BasicBlock *BB = I->getParent();
4007 if (auto *Phi = dyn_cast<PHINode>(I))
4008 BB = Phi->getIncomingBlock(
4009 PHINode::getIncomingValueNumForOperand(U.getOperandNo()));
4010 return BB == PredBB;
4011 };
4012
4013 // Iteratively sink the scalarized operands of the predicated instruction
4014 // into the block we created for it. When an instruction is sunk, it's
4015 // operands are then added to the worklist. The algorithm ends after one pass
4016 // through the worklist doesn't sink a single instruction.
4017 bool Changed;
4018 do {
4019 // Add the instructions that need to be reanalyzed to the worklist, and
4020 // reset the changed indicator.
4021 Worklist.insert(InstsToReanalyze.begin(), InstsToReanalyze.end());
4022 InstsToReanalyze.clear();
4023 Changed = false;
4024
4025 while (!Worklist.empty()) {
4026 auto *I = dyn_cast<Instruction>(Worklist.pop_back_val());
4027
4028 // We can't sink an instruction if it is a phi node, is already in the
4029 // predicated block, is not in the loop, or may have side effects.
4030 if (!I || isa<PHINode>(I) || I->getParent() == PredBB ||
4031 !VectorLoop->contains(I) || I->mayHaveSideEffects())
4032 continue;
4033
4034 // It's legal to sink the instruction if all its uses occur in the
4035 // predicated block. Otherwise, there's nothing to do yet, and we may
4036 // need to reanalyze the instruction.
4037 if (!llvm::all_of(I->uses(), isBlockOfUsePredicated)) {
4038 InstsToReanalyze.push_back(I);
4039 continue;
4040 }
4041
4042 // Move the instruction to the beginning of the predicated block, and add
4043 // it's operands to the worklist.
4044 I->moveBefore(&*PredBB->getFirstInsertionPt());
4045 Worklist.insert(I->op_begin(), I->op_end());
4046
4047 // The sinking may have enabled other instructions to be sunk, so we will
4048 // need to iterate.
4049 Changed = true;
4050 }
4051 } while (Changed);
4052 }
4053
fixNonInductionPHIs()4054 void InnerLoopVectorizer::fixNonInductionPHIs() {
4055 for (PHINode *OrigPhi : OrigPHIsToFix) {
4056 PHINode *NewPhi =
4057 cast<PHINode>(VectorLoopValueMap.getVectorValue(OrigPhi, 0));
4058 unsigned NumIncomingValues = OrigPhi->getNumIncomingValues();
4059
4060 SmallVector<BasicBlock *, 2> ScalarBBPredecessors(
4061 predecessors(OrigPhi->getParent()));
4062 SmallVector<BasicBlock *, 2> VectorBBPredecessors(
4063 predecessors(NewPhi->getParent()));
4064 assert(ScalarBBPredecessors.size() == VectorBBPredecessors.size() &&
4065 "Scalar and Vector BB should have the same number of predecessors");
4066
4067 // The insertion point in Builder may be invalidated by the time we get
4068 // here. Force the Builder insertion point to something valid so that we do
4069 // not run into issues during insertion point restore in
4070 // getOrCreateVectorValue calls below.
4071 Builder.SetInsertPoint(NewPhi);
4072
4073 // The predecessor order is preserved and we can rely on mapping between
4074 // scalar and vector block predecessors.
4075 for (unsigned i = 0; i < NumIncomingValues; ++i) {
4076 BasicBlock *NewPredBB = VectorBBPredecessors[i];
4077
4078 // When looking up the new scalar/vector values to fix up, use incoming
4079 // values from original phi.
4080 Value *ScIncV =
4081 OrigPhi->getIncomingValueForBlock(ScalarBBPredecessors[i]);
4082
4083 // Scalar incoming value may need a broadcast
4084 Value *NewIncV = getOrCreateVectorValue(ScIncV, 0);
4085 NewPhi->addIncoming(NewIncV, NewPredBB);
4086 }
4087 }
4088 }
4089
widenGEP(GetElementPtrInst * GEP,VPUser & Operands,unsigned UF,unsigned VF,bool IsPtrLoopInvariant,SmallBitVector & IsIndexLoopInvariant,VPTransformState & State)4090 void InnerLoopVectorizer::widenGEP(GetElementPtrInst *GEP, VPUser &Operands,
4091 unsigned UF, unsigned VF,
4092 bool IsPtrLoopInvariant,
4093 SmallBitVector &IsIndexLoopInvariant,
4094 VPTransformState &State) {
4095 // Construct a vector GEP by widening the operands of the scalar GEP as
4096 // necessary. We mark the vector GEP 'inbounds' if appropriate. A GEP
4097 // results in a vector of pointers when at least one operand of the GEP
4098 // is vector-typed. Thus, to keep the representation compact, we only use
4099 // vector-typed operands for loop-varying values.
4100
4101 if (VF > 1 && IsPtrLoopInvariant && IsIndexLoopInvariant.all()) {
4102 // If we are vectorizing, but the GEP has only loop-invariant operands,
4103 // the GEP we build (by only using vector-typed operands for
4104 // loop-varying values) would be a scalar pointer. Thus, to ensure we
4105 // produce a vector of pointers, we need to either arbitrarily pick an
4106 // operand to broadcast, or broadcast a clone of the original GEP.
4107 // Here, we broadcast a clone of the original.
4108 //
4109 // TODO: If at some point we decide to scalarize instructions having
4110 // loop-invariant operands, this special case will no longer be
4111 // required. We would add the scalarization decision to
4112 // collectLoopScalars() and teach getVectorValue() to broadcast
4113 // the lane-zero scalar value.
4114 auto *Clone = Builder.Insert(GEP->clone());
4115 for (unsigned Part = 0; Part < UF; ++Part) {
4116 Value *EntryPart = Builder.CreateVectorSplat(VF, Clone);
4117 VectorLoopValueMap.setVectorValue(GEP, Part, EntryPart);
4118 addMetadata(EntryPart, GEP);
4119 }
4120 } else {
4121 // If the GEP has at least one loop-varying operand, we are sure to
4122 // produce a vector of pointers. But if we are only unrolling, we want
4123 // to produce a scalar GEP for each unroll part. Thus, the GEP we
4124 // produce with the code below will be scalar (if VF == 1) or vector
4125 // (otherwise). Note that for the unroll-only case, we still maintain
4126 // values in the vector mapping with initVector, as we do for other
4127 // instructions.
4128 for (unsigned Part = 0; Part < UF; ++Part) {
4129 // The pointer operand of the new GEP. If it's loop-invariant, we
4130 // won't broadcast it.
4131 auto *Ptr = IsPtrLoopInvariant ? State.get(Operands.getOperand(0), {0, 0})
4132 : State.get(Operands.getOperand(0), Part);
4133
4134 // Collect all the indices for the new GEP. If any index is
4135 // loop-invariant, we won't broadcast it.
4136 SmallVector<Value *, 4> Indices;
4137 for (unsigned I = 1, E = Operands.getNumOperands(); I < E; I++) {
4138 VPValue *Operand = Operands.getOperand(I);
4139 if (IsIndexLoopInvariant[I - 1])
4140 Indices.push_back(State.get(Operand, {0, 0}));
4141 else
4142 Indices.push_back(State.get(Operand, Part));
4143 }
4144
4145 // Create the new GEP. Note that this GEP may be a scalar if VF == 1,
4146 // but it should be a vector, otherwise.
4147 auto *NewGEP =
4148 GEP->isInBounds()
4149 ? Builder.CreateInBoundsGEP(GEP->getSourceElementType(), Ptr,
4150 Indices)
4151 : Builder.CreateGEP(GEP->getSourceElementType(), Ptr, Indices);
4152 assert((VF == 1 || NewGEP->getType()->isVectorTy()) &&
4153 "NewGEP is not a pointer vector");
4154 VectorLoopValueMap.setVectorValue(GEP, Part, NewGEP);
4155 addMetadata(NewGEP, GEP);
4156 }
4157 }
4158 }
4159
widenPHIInstruction(Instruction * PN,unsigned UF,unsigned VF)4160 void InnerLoopVectorizer::widenPHIInstruction(Instruction *PN, unsigned UF,
4161 unsigned VF) {
4162 PHINode *P = cast<PHINode>(PN);
4163 if (EnableVPlanNativePath) {
4164 // Currently we enter here in the VPlan-native path for non-induction
4165 // PHIs where all control flow is uniform. We simply widen these PHIs.
4166 // Create a vector phi with no operands - the vector phi operands will be
4167 // set at the end of vector code generation.
4168 Type *VecTy =
4169 (VF == 1) ? PN->getType() : FixedVectorType::get(PN->getType(), VF);
4170 Value *VecPhi = Builder.CreatePHI(VecTy, PN->getNumOperands(), "vec.phi");
4171 VectorLoopValueMap.setVectorValue(P, 0, VecPhi);
4172 OrigPHIsToFix.push_back(P);
4173
4174 return;
4175 }
4176
4177 assert(PN->getParent() == OrigLoop->getHeader() &&
4178 "Non-header phis should have been handled elsewhere");
4179
4180 // In order to support recurrences we need to be able to vectorize Phi nodes.
4181 // Phi nodes have cycles, so we need to vectorize them in two stages. This is
4182 // stage #1: We create a new vector PHI node with no incoming edges. We'll use
4183 // this value when we vectorize all of the instructions that use the PHI.
4184 if (Legal->isReductionVariable(P) || Legal->isFirstOrderRecurrence(P)) {
4185 for (unsigned Part = 0; Part < UF; ++Part) {
4186 // This is phase one of vectorizing PHIs.
4187 Type *VecTy =
4188 (VF == 1) ? PN->getType() : FixedVectorType::get(PN->getType(), VF);
4189 Value *EntryPart = PHINode::Create(
4190 VecTy, 2, "vec.phi", &*LoopVectorBody->getFirstInsertionPt());
4191 VectorLoopValueMap.setVectorValue(P, Part, EntryPart);
4192 }
4193 return;
4194 }
4195
4196 setDebugLocFromInst(Builder, P);
4197
4198 // This PHINode must be an induction variable.
4199 // Make sure that we know about it.
4200 assert(Legal->getInductionVars().count(P) && "Not an induction variable");
4201
4202 InductionDescriptor II = Legal->getInductionVars().lookup(P);
4203 const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout();
4204
4205 // FIXME: The newly created binary instructions should contain nsw/nuw flags,
4206 // which can be found from the original scalar operations.
4207 switch (II.getKind()) {
4208 case InductionDescriptor::IK_NoInduction:
4209 llvm_unreachable("Unknown induction");
4210 case InductionDescriptor::IK_IntInduction:
4211 case InductionDescriptor::IK_FpInduction:
4212 llvm_unreachable("Integer/fp induction is handled elsewhere.");
4213 case InductionDescriptor::IK_PtrInduction: {
4214 // Handle the pointer induction variable case.
4215 assert(P->getType()->isPointerTy() && "Unexpected type.");
4216 // This is the normalized GEP that starts counting at zero.
4217 Value *PtrInd = Induction;
4218 PtrInd = Builder.CreateSExtOrTrunc(PtrInd, II.getStep()->getType());
4219 // Determine the number of scalars we need to generate for each unroll
4220 // iteration. If the instruction is uniform, we only need to generate the
4221 // first lane. Otherwise, we generate all VF values.
4222 unsigned Lanes = Cost->isUniformAfterVectorization(P, VF) ? 1 : VF;
4223 // These are the scalar results. Notice that we don't generate vector GEPs
4224 // because scalar GEPs result in better code.
4225 for (unsigned Part = 0; Part < UF; ++Part) {
4226 for (unsigned Lane = 0; Lane < Lanes; ++Lane) {
4227 Constant *Idx = ConstantInt::get(PtrInd->getType(), Lane + Part * VF);
4228 Value *GlobalIdx = Builder.CreateAdd(PtrInd, Idx);
4229 Value *SclrGep =
4230 emitTransformedIndex(Builder, GlobalIdx, PSE.getSE(), DL, II);
4231 SclrGep->setName("next.gep");
4232 VectorLoopValueMap.setScalarValue(P, {Part, Lane}, SclrGep);
4233 }
4234 }
4235 return;
4236 }
4237 }
4238 }
4239
4240 /// A helper function for checking whether an integer division-related
4241 /// instruction may divide by zero (in which case it must be predicated if
4242 /// executed conditionally in the scalar code).
4243 /// TODO: It may be worthwhile to generalize and check isKnownNonZero().
4244 /// Non-zero divisors that are non compile-time constants will not be
4245 /// converted into multiplication, so we will still end up scalarizing
4246 /// the division, but can do so w/o predication.
mayDivideByZero(Instruction & I)4247 static bool mayDivideByZero(Instruction &I) {
4248 assert((I.getOpcode() == Instruction::UDiv ||
4249 I.getOpcode() == Instruction::SDiv ||
4250 I.getOpcode() == Instruction::URem ||
4251 I.getOpcode() == Instruction::SRem) &&
4252 "Unexpected instruction");
4253 Value *Divisor = I.getOperand(1);
4254 auto *CInt = dyn_cast<ConstantInt>(Divisor);
4255 return !CInt || CInt->isZero();
4256 }
4257
widenInstruction(Instruction & I,VPUser & User,VPTransformState & State)4258 void InnerLoopVectorizer::widenInstruction(Instruction &I, VPUser &User,
4259 VPTransformState &State) {
4260 switch (I.getOpcode()) {
4261 case Instruction::Call:
4262 case Instruction::Br:
4263 case Instruction::PHI:
4264 case Instruction::GetElementPtr:
4265 case Instruction::Select:
4266 llvm_unreachable("This instruction is handled by a different recipe.");
4267 case Instruction::UDiv:
4268 case Instruction::SDiv:
4269 case Instruction::SRem:
4270 case Instruction::URem:
4271 case Instruction::Add:
4272 case Instruction::FAdd:
4273 case Instruction::Sub:
4274 case Instruction::FSub:
4275 case Instruction::FNeg:
4276 case Instruction::Mul:
4277 case Instruction::FMul:
4278 case Instruction::FDiv:
4279 case Instruction::FRem:
4280 case Instruction::Shl:
4281 case Instruction::LShr:
4282 case Instruction::AShr:
4283 case Instruction::And:
4284 case Instruction::Or:
4285 case Instruction::Xor: {
4286 // Just widen unops and binops.
4287 setDebugLocFromInst(Builder, &I);
4288
4289 for (unsigned Part = 0; Part < UF; ++Part) {
4290 SmallVector<Value *, 2> Ops;
4291 for (VPValue *VPOp : User.operands())
4292 Ops.push_back(State.get(VPOp, Part));
4293
4294 Value *V = Builder.CreateNAryOp(I.getOpcode(), Ops);
4295
4296 if (auto *VecOp = dyn_cast<Instruction>(V))
4297 VecOp->copyIRFlags(&I);
4298
4299 // Use this vector value for all users of the original instruction.
4300 VectorLoopValueMap.setVectorValue(&I, Part, V);
4301 addMetadata(V, &I);
4302 }
4303
4304 break;
4305 }
4306 case Instruction::ICmp:
4307 case Instruction::FCmp: {
4308 // Widen compares. Generate vector compares.
4309 bool FCmp = (I.getOpcode() == Instruction::FCmp);
4310 auto *Cmp = cast<CmpInst>(&I);
4311 setDebugLocFromInst(Builder, Cmp);
4312 for (unsigned Part = 0; Part < UF; ++Part) {
4313 Value *A = State.get(User.getOperand(0), Part);
4314 Value *B = State.get(User.getOperand(1), Part);
4315 Value *C = nullptr;
4316 if (FCmp) {
4317 // Propagate fast math flags.
4318 IRBuilder<>::FastMathFlagGuard FMFG(Builder);
4319 Builder.setFastMathFlags(Cmp->getFastMathFlags());
4320 C = Builder.CreateFCmp(Cmp->getPredicate(), A, B);
4321 } else {
4322 C = Builder.CreateICmp(Cmp->getPredicate(), A, B);
4323 }
4324 VectorLoopValueMap.setVectorValue(&I, Part, C);
4325 addMetadata(C, &I);
4326 }
4327
4328 break;
4329 }
4330
4331 case Instruction::ZExt:
4332 case Instruction::SExt:
4333 case Instruction::FPToUI:
4334 case Instruction::FPToSI:
4335 case Instruction::FPExt:
4336 case Instruction::PtrToInt:
4337 case Instruction::IntToPtr:
4338 case Instruction::SIToFP:
4339 case Instruction::UIToFP:
4340 case Instruction::Trunc:
4341 case Instruction::FPTrunc:
4342 case Instruction::BitCast: {
4343 auto *CI = cast<CastInst>(&I);
4344 setDebugLocFromInst(Builder, CI);
4345
4346 /// Vectorize casts.
4347 Type *DestTy =
4348 (VF == 1) ? CI->getType() : FixedVectorType::get(CI->getType(), VF);
4349
4350 for (unsigned Part = 0; Part < UF; ++Part) {
4351 Value *A = State.get(User.getOperand(0), Part);
4352 Value *Cast = Builder.CreateCast(CI->getOpcode(), A, DestTy);
4353 VectorLoopValueMap.setVectorValue(&I, Part, Cast);
4354 addMetadata(Cast, &I);
4355 }
4356 break;
4357 }
4358 default:
4359 // This instruction is not vectorized by simple widening.
4360 LLVM_DEBUG(dbgs() << "LV: Found an unhandled instruction: " << I);
4361 llvm_unreachable("Unhandled instruction!");
4362 } // end of switch.
4363 }
4364
widenCallInstruction(CallInst & I,VPUser & ArgOperands,VPTransformState & State)4365 void InnerLoopVectorizer::widenCallInstruction(CallInst &I, VPUser &ArgOperands,
4366 VPTransformState &State) {
4367 assert(!isa<DbgInfoIntrinsic>(I) &&
4368 "DbgInfoIntrinsic should have been dropped during VPlan construction");
4369 setDebugLocFromInst(Builder, &I);
4370
4371 Module *M = I.getParent()->getParent()->getParent();
4372 auto *CI = cast<CallInst>(&I);
4373
4374 SmallVector<Type *, 4> Tys;
4375 for (Value *ArgOperand : CI->arg_operands())
4376 Tys.push_back(ToVectorTy(ArgOperand->getType(), VF));
4377
4378 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
4379
4380 // The flag shows whether we use Intrinsic or a usual Call for vectorized
4381 // version of the instruction.
4382 // Is it beneficial to perform intrinsic call compared to lib call?
4383 bool NeedToScalarize = false;
4384 unsigned CallCost = Cost->getVectorCallCost(CI, VF, NeedToScalarize);
4385 bool UseVectorIntrinsic =
4386 ID && Cost->getVectorIntrinsicCost(CI, VF) <= CallCost;
4387 assert((UseVectorIntrinsic || !NeedToScalarize) &&
4388 "Instruction should be scalarized elsewhere.");
4389
4390 for (unsigned Part = 0; Part < UF; ++Part) {
4391 SmallVector<Value *, 4> Args;
4392 for (auto &I : enumerate(ArgOperands.operands())) {
4393 // Some intrinsics have a scalar argument - don't replace it with a
4394 // vector.
4395 Value *Arg;
4396 if (!UseVectorIntrinsic || !hasVectorInstrinsicScalarOpd(ID, I.index()))
4397 Arg = State.get(I.value(), Part);
4398 else
4399 Arg = State.get(I.value(), {0, 0});
4400 Args.push_back(Arg);
4401 }
4402
4403 Function *VectorF;
4404 if (UseVectorIntrinsic) {
4405 // Use vector version of the intrinsic.
4406 Type *TysForDecl[] = {CI->getType()};
4407 if (VF > 1)
4408 TysForDecl[0] =
4409 FixedVectorType::get(CI->getType()->getScalarType(), VF);
4410 VectorF = Intrinsic::getDeclaration(M, ID, TysForDecl);
4411 assert(VectorF && "Can't retrieve vector intrinsic.");
4412 } else {
4413 // Use vector version of the function call.
4414 const VFShape Shape =
4415 VFShape::get(*CI, {VF, false} /*EC*/, false /*HasGlobalPred*/);
4416 #ifndef NDEBUG
4417 assert(VFDatabase(*CI).getVectorizedFunction(Shape) != nullptr &&
4418 "Can't create vector function.");
4419 #endif
4420 VectorF = VFDatabase(*CI).getVectorizedFunction(Shape);
4421 }
4422 SmallVector<OperandBundleDef, 1> OpBundles;
4423 CI->getOperandBundlesAsDefs(OpBundles);
4424 CallInst *V = Builder.CreateCall(VectorF, Args, OpBundles);
4425
4426 if (isa<FPMathOperator>(V))
4427 V->copyFastMathFlags(CI);
4428
4429 VectorLoopValueMap.setVectorValue(&I, Part, V);
4430 addMetadata(V, &I);
4431 }
4432 }
4433
widenSelectInstruction(SelectInst & I,VPUser & Operands,bool InvariantCond,VPTransformState & State)4434 void InnerLoopVectorizer::widenSelectInstruction(SelectInst &I,
4435 VPUser &Operands,
4436 bool InvariantCond,
4437 VPTransformState &State) {
4438 setDebugLocFromInst(Builder, &I);
4439
4440 // The condition can be loop invariant but still defined inside the
4441 // loop. This means that we can't just use the original 'cond' value.
4442 // We have to take the 'vectorized' value and pick the first lane.
4443 // Instcombine will make this a no-op.
4444 auto *InvarCond =
4445 InvariantCond ? State.get(Operands.getOperand(0), {0, 0}) : nullptr;
4446
4447 for (unsigned Part = 0; Part < UF; ++Part) {
4448 Value *Cond =
4449 InvarCond ? InvarCond : State.get(Operands.getOperand(0), Part);
4450 Value *Op0 = State.get(Operands.getOperand(1), Part);
4451 Value *Op1 = State.get(Operands.getOperand(2), Part);
4452 Value *Sel = Builder.CreateSelect(Cond, Op0, Op1);
4453 VectorLoopValueMap.setVectorValue(&I, Part, Sel);
4454 addMetadata(Sel, &I);
4455 }
4456 }
4457
collectLoopScalars(unsigned VF)4458 void LoopVectorizationCostModel::collectLoopScalars(unsigned VF) {
4459 // We should not collect Scalars more than once per VF. Right now, this
4460 // function is called from collectUniformsAndScalars(), which already does
4461 // this check. Collecting Scalars for VF=1 does not make any sense.
4462 assert(VF >= 2 && Scalars.find(VF) == Scalars.end() &&
4463 "This function should not be visited twice for the same VF");
4464
4465 SmallSetVector<Instruction *, 8> Worklist;
4466
4467 // These sets are used to seed the analysis with pointers used by memory
4468 // accesses that will remain scalar.
4469 SmallSetVector<Instruction *, 8> ScalarPtrs;
4470 SmallPtrSet<Instruction *, 8> PossibleNonScalarPtrs;
4471
4472 // A helper that returns true if the use of Ptr by MemAccess will be scalar.
4473 // The pointer operands of loads and stores will be scalar as long as the
4474 // memory access is not a gather or scatter operation. The value operand of a
4475 // store will remain scalar if the store is scalarized.
4476 auto isScalarUse = [&](Instruction *MemAccess, Value *Ptr) {
4477 InstWidening WideningDecision = getWideningDecision(MemAccess, VF);
4478 assert(WideningDecision != CM_Unknown &&
4479 "Widening decision should be ready at this moment");
4480 if (auto *Store = dyn_cast<StoreInst>(MemAccess))
4481 if (Ptr == Store->getValueOperand())
4482 return WideningDecision == CM_Scalarize;
4483 assert(Ptr == getLoadStorePointerOperand(MemAccess) &&
4484 "Ptr is neither a value or pointer operand");
4485 return WideningDecision != CM_GatherScatter;
4486 };
4487
4488 // A helper that returns true if the given value is a bitcast or
4489 // getelementptr instruction contained in the loop.
4490 auto isLoopVaryingBitCastOrGEP = [&](Value *V) {
4491 return ((isa<BitCastInst>(V) && V->getType()->isPointerTy()) ||
4492 isa<GetElementPtrInst>(V)) &&
4493 !TheLoop->isLoopInvariant(V);
4494 };
4495
4496 // A helper that evaluates a memory access's use of a pointer. If the use
4497 // will be a scalar use, and the pointer is only used by memory accesses, we
4498 // place the pointer in ScalarPtrs. Otherwise, the pointer is placed in
4499 // PossibleNonScalarPtrs.
4500 auto evaluatePtrUse = [&](Instruction *MemAccess, Value *Ptr) {
4501 // We only care about bitcast and getelementptr instructions contained in
4502 // the loop.
4503 if (!isLoopVaryingBitCastOrGEP(Ptr))
4504 return;
4505
4506 // If the pointer has already been identified as scalar (e.g., if it was
4507 // also identified as uniform), there's nothing to do.
4508 auto *I = cast<Instruction>(Ptr);
4509 if (Worklist.count(I))
4510 return;
4511
4512 // If the use of the pointer will be a scalar use, and all users of the
4513 // pointer are memory accesses, place the pointer in ScalarPtrs. Otherwise,
4514 // place the pointer in PossibleNonScalarPtrs.
4515 if (isScalarUse(MemAccess, Ptr) && llvm::all_of(I->users(), [&](User *U) {
4516 return isa<LoadInst>(U) || isa<StoreInst>(U);
4517 }))
4518 ScalarPtrs.insert(I);
4519 else
4520 PossibleNonScalarPtrs.insert(I);
4521 };
4522
4523 // We seed the scalars analysis with three classes of instructions: (1)
4524 // instructions marked uniform-after-vectorization, (2) bitcast and
4525 // getelementptr instructions used by memory accesses requiring a scalar use,
4526 // and (3) pointer induction variables and their update instructions (we
4527 // currently only scalarize these).
4528 //
4529 // (1) Add to the worklist all instructions that have been identified as
4530 // uniform-after-vectorization.
4531 Worklist.insert(Uniforms[VF].begin(), Uniforms[VF].end());
4532
4533 // (2) Add to the worklist all bitcast and getelementptr instructions used by
4534 // memory accesses requiring a scalar use. The pointer operands of loads and
4535 // stores will be scalar as long as the memory accesses is not a gather or
4536 // scatter operation. The value operand of a store will remain scalar if the
4537 // store is scalarized.
4538 for (auto *BB : TheLoop->blocks())
4539 for (auto &I : *BB) {
4540 if (auto *Load = dyn_cast<LoadInst>(&I)) {
4541 evaluatePtrUse(Load, Load->getPointerOperand());
4542 } else if (auto *Store = dyn_cast<StoreInst>(&I)) {
4543 evaluatePtrUse(Store, Store->getPointerOperand());
4544 evaluatePtrUse(Store, Store->getValueOperand());
4545 }
4546 }
4547 for (auto *I : ScalarPtrs)
4548 if (!PossibleNonScalarPtrs.count(I)) {
4549 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *I << "\n");
4550 Worklist.insert(I);
4551 }
4552
4553 // (3) Add to the worklist all pointer induction variables and their update
4554 // instructions.
4555 //
4556 // TODO: Once we are able to vectorize pointer induction variables we should
4557 // no longer insert them into the worklist here.
4558 auto *Latch = TheLoop->getLoopLatch();
4559 for (auto &Induction : Legal->getInductionVars()) {
4560 auto *Ind = Induction.first;
4561 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
4562 if (Induction.second.getKind() != InductionDescriptor::IK_PtrInduction)
4563 continue;
4564 Worklist.insert(Ind);
4565 Worklist.insert(IndUpdate);
4566 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n");
4567 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate
4568 << "\n");
4569 }
4570
4571 // Insert the forced scalars.
4572 // FIXME: Currently widenPHIInstruction() often creates a dead vector
4573 // induction variable when the PHI user is scalarized.
4574 auto ForcedScalar = ForcedScalars.find(VF);
4575 if (ForcedScalar != ForcedScalars.end())
4576 for (auto *I : ForcedScalar->second)
4577 Worklist.insert(I);
4578
4579 // Expand the worklist by looking through any bitcasts and getelementptr
4580 // instructions we've already identified as scalar. This is similar to the
4581 // expansion step in collectLoopUniforms(); however, here we're only
4582 // expanding to include additional bitcasts and getelementptr instructions.
4583 unsigned Idx = 0;
4584 while (Idx != Worklist.size()) {
4585 Instruction *Dst = Worklist[Idx++];
4586 if (!isLoopVaryingBitCastOrGEP(Dst->getOperand(0)))
4587 continue;
4588 auto *Src = cast<Instruction>(Dst->getOperand(0));
4589 if (llvm::all_of(Src->users(), [&](User *U) -> bool {
4590 auto *J = cast<Instruction>(U);
4591 return !TheLoop->contains(J) || Worklist.count(J) ||
4592 ((isa<LoadInst>(J) || isa<StoreInst>(J)) &&
4593 isScalarUse(J, Src));
4594 })) {
4595 Worklist.insert(Src);
4596 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Src << "\n");
4597 }
4598 }
4599
4600 // An induction variable will remain scalar if all users of the induction
4601 // variable and induction variable update remain scalar.
4602 for (auto &Induction : Legal->getInductionVars()) {
4603 auto *Ind = Induction.first;
4604 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
4605
4606 // We already considered pointer induction variables, so there's no reason
4607 // to look at their users again.
4608 //
4609 // TODO: Once we are able to vectorize pointer induction variables we
4610 // should no longer skip over them here.
4611 if (Induction.second.getKind() == InductionDescriptor::IK_PtrInduction)
4612 continue;
4613
4614 // If tail-folding is applied, the primary induction variable will be used
4615 // to feed a vector compare.
4616 if (Ind == Legal->getPrimaryInduction() && foldTailByMasking())
4617 continue;
4618
4619 // Determine if all users of the induction variable are scalar after
4620 // vectorization.
4621 auto ScalarInd = llvm::all_of(Ind->users(), [&](User *U) -> bool {
4622 auto *I = cast<Instruction>(U);
4623 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I);
4624 });
4625 if (!ScalarInd)
4626 continue;
4627
4628 // Determine if all users of the induction variable update instruction are
4629 // scalar after vectorization.
4630 auto ScalarIndUpdate =
4631 llvm::all_of(IndUpdate->users(), [&](User *U) -> bool {
4632 auto *I = cast<Instruction>(U);
4633 return I == Ind || !TheLoop->contains(I) || Worklist.count(I);
4634 });
4635 if (!ScalarIndUpdate)
4636 continue;
4637
4638 // The induction variable and its update instruction will remain scalar.
4639 Worklist.insert(Ind);
4640 Worklist.insert(IndUpdate);
4641 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n");
4642 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate
4643 << "\n");
4644 }
4645
4646 Scalars[VF].insert(Worklist.begin(), Worklist.end());
4647 }
4648
isScalarWithPredication(Instruction * I,unsigned VF)4649 bool LoopVectorizationCostModel::isScalarWithPredication(Instruction *I, unsigned VF) {
4650 if (!blockNeedsPredication(I->getParent()))
4651 return false;
4652 switch(I->getOpcode()) {
4653 default:
4654 break;
4655 case Instruction::Load:
4656 case Instruction::Store: {
4657 if (!Legal->isMaskRequired(I))
4658 return false;
4659 auto *Ptr = getLoadStorePointerOperand(I);
4660 auto *Ty = getMemInstValueType(I);
4661 // We have already decided how to vectorize this instruction, get that
4662 // result.
4663 if (VF > 1) {
4664 InstWidening WideningDecision = getWideningDecision(I, VF);
4665 assert(WideningDecision != CM_Unknown &&
4666 "Widening decision should be ready at this moment");
4667 return WideningDecision == CM_Scalarize;
4668 }
4669 const Align Alignment = getLoadStoreAlignment(I);
4670 return isa<LoadInst>(I) ? !(isLegalMaskedLoad(Ty, Ptr, Alignment) ||
4671 isLegalMaskedGather(Ty, Alignment))
4672 : !(isLegalMaskedStore(Ty, Ptr, Alignment) ||
4673 isLegalMaskedScatter(Ty, Alignment));
4674 }
4675 case Instruction::UDiv:
4676 case Instruction::SDiv:
4677 case Instruction::SRem:
4678 case Instruction::URem:
4679 return mayDivideByZero(*I);
4680 }
4681 return false;
4682 }
4683
interleavedAccessCanBeWidened(Instruction * I,unsigned VF)4684 bool LoopVectorizationCostModel::interleavedAccessCanBeWidened(Instruction *I,
4685 unsigned VF) {
4686 assert(isAccessInterleaved(I) && "Expecting interleaved access.");
4687 assert(getWideningDecision(I, VF) == CM_Unknown &&
4688 "Decision should not be set yet.");
4689 auto *Group = getInterleavedAccessGroup(I);
4690 assert(Group && "Must have a group.");
4691
4692 // If the instruction's allocated size doesn't equal it's type size, it
4693 // requires padding and will be scalarized.
4694 auto &DL = I->getModule()->getDataLayout();
4695 auto *ScalarTy = getMemInstValueType(I);
4696 if (hasIrregularType(ScalarTy, DL, VF))
4697 return false;
4698
4699 // Check if masking is required.
4700 // A Group may need masking for one of two reasons: it resides in a block that
4701 // needs predication, or it was decided to use masking to deal with gaps.
4702 bool PredicatedAccessRequiresMasking =
4703 Legal->blockNeedsPredication(I->getParent()) && Legal->isMaskRequired(I);
4704 bool AccessWithGapsRequiresMasking =
4705 Group->requiresScalarEpilogue() && !isScalarEpilogueAllowed();
4706 if (!PredicatedAccessRequiresMasking && !AccessWithGapsRequiresMasking)
4707 return true;
4708
4709 // If masked interleaving is required, we expect that the user/target had
4710 // enabled it, because otherwise it either wouldn't have been created or
4711 // it should have been invalidated by the CostModel.
4712 assert(useMaskedInterleavedAccesses(TTI) &&
4713 "Masked interleave-groups for predicated accesses are not enabled.");
4714
4715 auto *Ty = getMemInstValueType(I);
4716 const Align Alignment = getLoadStoreAlignment(I);
4717 return isa<LoadInst>(I) ? TTI.isLegalMaskedLoad(Ty, Alignment)
4718 : TTI.isLegalMaskedStore(Ty, Alignment);
4719 }
4720
memoryInstructionCanBeWidened(Instruction * I,unsigned VF)4721 bool LoopVectorizationCostModel::memoryInstructionCanBeWidened(Instruction *I,
4722 unsigned VF) {
4723 // Get and ensure we have a valid memory instruction.
4724 LoadInst *LI = dyn_cast<LoadInst>(I);
4725 StoreInst *SI = dyn_cast<StoreInst>(I);
4726 assert((LI || SI) && "Invalid memory instruction");
4727
4728 auto *Ptr = getLoadStorePointerOperand(I);
4729
4730 // In order to be widened, the pointer should be consecutive, first of all.
4731 if (!Legal->isConsecutivePtr(Ptr))
4732 return false;
4733
4734 // If the instruction is a store located in a predicated block, it will be
4735 // scalarized.
4736 if (isScalarWithPredication(I))
4737 return false;
4738
4739 // If the instruction's allocated size doesn't equal it's type size, it
4740 // requires padding and will be scalarized.
4741 auto &DL = I->getModule()->getDataLayout();
4742 auto *ScalarTy = LI ? LI->getType() : SI->getValueOperand()->getType();
4743 if (hasIrregularType(ScalarTy, DL, VF))
4744 return false;
4745
4746 return true;
4747 }
4748
collectLoopUniforms(unsigned VF)4749 void LoopVectorizationCostModel::collectLoopUniforms(unsigned VF) {
4750 // We should not collect Uniforms more than once per VF. Right now,
4751 // this function is called from collectUniformsAndScalars(), which
4752 // already does this check. Collecting Uniforms for VF=1 does not make any
4753 // sense.
4754
4755 assert(VF >= 2 && Uniforms.find(VF) == Uniforms.end() &&
4756 "This function should not be visited twice for the same VF");
4757
4758 // Visit the list of Uniforms. If we'll not find any uniform value, we'll
4759 // not analyze again. Uniforms.count(VF) will return 1.
4760 Uniforms[VF].clear();
4761
4762 // We now know that the loop is vectorizable!
4763 // Collect instructions inside the loop that will remain uniform after
4764 // vectorization.
4765
4766 // Global values, params and instructions outside of current loop are out of
4767 // scope.
4768 auto isOutOfScope = [&](Value *V) -> bool {
4769 Instruction *I = dyn_cast<Instruction>(V);
4770 return (!I || !TheLoop->contains(I));
4771 };
4772
4773 SetVector<Instruction *> Worklist;
4774 BasicBlock *Latch = TheLoop->getLoopLatch();
4775
4776 // Instructions that are scalar with predication must not be considered
4777 // uniform after vectorization, because that would create an erroneous
4778 // replicating region where only a single instance out of VF should be formed.
4779 // TODO: optimize such seldom cases if found important, see PR40816.
4780 auto addToWorklistIfAllowed = [&](Instruction *I) -> void {
4781 if (isScalarWithPredication(I, VF)) {
4782 LLVM_DEBUG(dbgs() << "LV: Found not uniform being ScalarWithPredication: "
4783 << *I << "\n");
4784 return;
4785 }
4786 LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *I << "\n");
4787 Worklist.insert(I);
4788 };
4789
4790 // Start with the conditional branch. If the branch condition is an
4791 // instruction contained in the loop that is only used by the branch, it is
4792 // uniform.
4793 auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0));
4794 if (Cmp && TheLoop->contains(Cmp) && Cmp->hasOneUse())
4795 addToWorklistIfAllowed(Cmp);
4796
4797 // Holds consecutive and consecutive-like pointers. Consecutive-like pointers
4798 // are pointers that are treated like consecutive pointers during
4799 // vectorization. The pointer operands of interleaved accesses are an
4800 // example.
4801 SmallSetVector<Instruction *, 8> ConsecutiveLikePtrs;
4802
4803 // Holds pointer operands of instructions that are possibly non-uniform.
4804 SmallPtrSet<Instruction *, 8> PossibleNonUniformPtrs;
4805
4806 auto isUniformDecision = [&](Instruction *I, unsigned VF) {
4807 InstWidening WideningDecision = getWideningDecision(I, VF);
4808 assert(WideningDecision != CM_Unknown &&
4809 "Widening decision should be ready at this moment");
4810
4811 return (WideningDecision == CM_Widen ||
4812 WideningDecision == CM_Widen_Reverse ||
4813 WideningDecision == CM_Interleave);
4814 };
4815 // Iterate over the instructions in the loop, and collect all
4816 // consecutive-like pointer operands in ConsecutiveLikePtrs. If it's possible
4817 // that a consecutive-like pointer operand will be scalarized, we collect it
4818 // in PossibleNonUniformPtrs instead. We use two sets here because a single
4819 // getelementptr instruction can be used by both vectorized and scalarized
4820 // memory instructions. For example, if a loop loads and stores from the same
4821 // location, but the store is conditional, the store will be scalarized, and
4822 // the getelementptr won't remain uniform.
4823 for (auto *BB : TheLoop->blocks())
4824 for (auto &I : *BB) {
4825 // If there's no pointer operand, there's nothing to do.
4826 auto *Ptr = dyn_cast_or_null<Instruction>(getLoadStorePointerOperand(&I));
4827 if (!Ptr)
4828 continue;
4829
4830 // True if all users of Ptr are memory accesses that have Ptr as their
4831 // pointer operand.
4832 auto UsersAreMemAccesses =
4833 llvm::all_of(Ptr->users(), [&](User *U) -> bool {
4834 return getLoadStorePointerOperand(U) == Ptr;
4835 });
4836
4837 // Ensure the memory instruction will not be scalarized or used by
4838 // gather/scatter, making its pointer operand non-uniform. If the pointer
4839 // operand is used by any instruction other than a memory access, we
4840 // conservatively assume the pointer operand may be non-uniform.
4841 if (!UsersAreMemAccesses || !isUniformDecision(&I, VF))
4842 PossibleNonUniformPtrs.insert(Ptr);
4843
4844 // If the memory instruction will be vectorized and its pointer operand
4845 // is consecutive-like, or interleaving - the pointer operand should
4846 // remain uniform.
4847 else
4848 ConsecutiveLikePtrs.insert(Ptr);
4849 }
4850
4851 // Add to the Worklist all consecutive and consecutive-like pointers that
4852 // aren't also identified as possibly non-uniform.
4853 for (auto *V : ConsecutiveLikePtrs)
4854 if (!PossibleNonUniformPtrs.count(V))
4855 addToWorklistIfAllowed(V);
4856
4857 // Expand Worklist in topological order: whenever a new instruction
4858 // is added , its users should be already inside Worklist. It ensures
4859 // a uniform instruction will only be used by uniform instructions.
4860 unsigned idx = 0;
4861 while (idx != Worklist.size()) {
4862 Instruction *I = Worklist[idx++];
4863
4864 for (auto OV : I->operand_values()) {
4865 // isOutOfScope operands cannot be uniform instructions.
4866 if (isOutOfScope(OV))
4867 continue;
4868 // First order recurrence Phi's should typically be considered
4869 // non-uniform.
4870 auto *OP = dyn_cast<PHINode>(OV);
4871 if (OP && Legal->isFirstOrderRecurrence(OP))
4872 continue;
4873 // If all the users of the operand are uniform, then add the
4874 // operand into the uniform worklist.
4875 auto *OI = cast<Instruction>(OV);
4876 if (llvm::all_of(OI->users(), [&](User *U) -> bool {
4877 auto *J = cast<Instruction>(U);
4878 return Worklist.count(J) ||
4879 (OI == getLoadStorePointerOperand(J) &&
4880 isUniformDecision(J, VF));
4881 }))
4882 addToWorklistIfAllowed(OI);
4883 }
4884 }
4885
4886 // Returns true if Ptr is the pointer operand of a memory access instruction
4887 // I, and I is known to not require scalarization.
4888 auto isVectorizedMemAccessUse = [&](Instruction *I, Value *Ptr) -> bool {
4889 return getLoadStorePointerOperand(I) == Ptr && isUniformDecision(I, VF);
4890 };
4891
4892 // For an instruction to be added into Worklist above, all its users inside
4893 // the loop should also be in Worklist. However, this condition cannot be
4894 // true for phi nodes that form a cyclic dependence. We must process phi
4895 // nodes separately. An induction variable will remain uniform if all users
4896 // of the induction variable and induction variable update remain uniform.
4897 // The code below handles both pointer and non-pointer induction variables.
4898 for (auto &Induction : Legal->getInductionVars()) {
4899 auto *Ind = Induction.first;
4900 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
4901
4902 // Determine if all users of the induction variable are uniform after
4903 // vectorization.
4904 auto UniformInd = llvm::all_of(Ind->users(), [&](User *U) -> bool {
4905 auto *I = cast<Instruction>(U);
4906 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) ||
4907 isVectorizedMemAccessUse(I, Ind);
4908 });
4909 if (!UniformInd)
4910 continue;
4911
4912 // Determine if all users of the induction variable update instruction are
4913 // uniform after vectorization.
4914 auto UniformIndUpdate =
4915 llvm::all_of(IndUpdate->users(), [&](User *U) -> bool {
4916 auto *I = cast<Instruction>(U);
4917 return I == Ind || !TheLoop->contains(I) || Worklist.count(I) ||
4918 isVectorizedMemAccessUse(I, IndUpdate);
4919 });
4920 if (!UniformIndUpdate)
4921 continue;
4922
4923 // The induction variable and its update instruction will remain uniform.
4924 addToWorklistIfAllowed(Ind);
4925 addToWorklistIfAllowed(IndUpdate);
4926 }
4927
4928 Uniforms[VF].insert(Worklist.begin(), Worklist.end());
4929 }
4930
runtimeChecksRequired()4931 bool LoopVectorizationCostModel::runtimeChecksRequired() {
4932 LLVM_DEBUG(dbgs() << "LV: Performing code size checks.\n");
4933
4934 if (Legal->getRuntimePointerChecking()->Need) {
4935 reportVectorizationFailure("Runtime ptr check is required with -Os/-Oz",
4936 "runtime pointer checks needed. Enable vectorization of this "
4937 "loop with '#pragma clang loop vectorize(enable)' when "
4938 "compiling with -Os/-Oz",
4939 "CantVersionLoopWithOptForSize", ORE, TheLoop);
4940 return true;
4941 }
4942
4943 if (!PSE.getUnionPredicate().getPredicates().empty()) {
4944 reportVectorizationFailure("Runtime SCEV check is required with -Os/-Oz",
4945 "runtime SCEV checks needed. Enable vectorization of this "
4946 "loop with '#pragma clang loop vectorize(enable)' when "
4947 "compiling with -Os/-Oz",
4948 "CantVersionLoopWithOptForSize", ORE, TheLoop);
4949 return true;
4950 }
4951
4952 // FIXME: Avoid specializing for stride==1 instead of bailing out.
4953 if (!Legal->getLAI()->getSymbolicStrides().empty()) {
4954 reportVectorizationFailure("Runtime stride check for small trip count",
4955 "runtime stride == 1 checks needed. Enable vectorization of "
4956 "this loop without such check by compiling with -Os/-Oz",
4957 "CantVersionLoopWithOptForSize", ORE, TheLoop);
4958 return true;
4959 }
4960
4961 return false;
4962 }
4963
computeMaxVF(unsigned UserVF,unsigned UserIC)4964 Optional<unsigned> LoopVectorizationCostModel::computeMaxVF(unsigned UserVF,
4965 unsigned UserIC) {
4966 if (Legal->getRuntimePointerChecking()->Need && TTI.hasBranchDivergence()) {
4967 // TODO: It may by useful to do since it's still likely to be dynamically
4968 // uniform if the target can skip.
4969 reportVectorizationFailure(
4970 "Not inserting runtime ptr check for divergent target",
4971 "runtime pointer checks needed. Not enabled for divergent target",
4972 "CantVersionLoopWithDivergentTarget", ORE, TheLoop);
4973 return None;
4974 }
4975
4976 unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop);
4977 LLVM_DEBUG(dbgs() << "LV: Found trip count: " << TC << '\n');
4978 if (TC == 1) {
4979 reportVectorizationFailure("Single iteration (non) loop",
4980 "loop trip count is one, irrelevant for vectorization",
4981 "SingleIterationLoop", ORE, TheLoop);
4982 return None;
4983 }
4984
4985 switch (ScalarEpilogueStatus) {
4986 case CM_ScalarEpilogueAllowed:
4987 return UserVF ? UserVF : computeFeasibleMaxVF(TC);
4988 case CM_ScalarEpilogueNotNeededUsePredicate:
4989 LLVM_DEBUG(
4990 dbgs() << "LV: vector predicate hint/switch found.\n"
4991 << "LV: Not allowing scalar epilogue, creating predicated "
4992 << "vector loop.\n");
4993 break;
4994 case CM_ScalarEpilogueNotAllowedLowTripLoop:
4995 // fallthrough as a special case of OptForSize
4996 case CM_ScalarEpilogueNotAllowedOptSize:
4997 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedOptSize)
4998 LLVM_DEBUG(
4999 dbgs() << "LV: Not allowing scalar epilogue due to -Os/-Oz.\n");
5000 else
5001 LLVM_DEBUG(dbgs() << "LV: Not allowing scalar epilogue due to low trip "
5002 << "count.\n");
5003
5004 // Bail if runtime checks are required, which are not good when optimising
5005 // for size.
5006 if (runtimeChecksRequired())
5007 return None;
5008 break;
5009 }
5010
5011 // Now try the tail folding
5012
5013 // Invalidate interleave groups that require an epilogue if we can't mask
5014 // the interleave-group.
5015 if (!useMaskedInterleavedAccesses(TTI)) {
5016 assert(WideningDecisions.empty() && Uniforms.empty() && Scalars.empty() &&
5017 "No decisions should have been taken at this point");
5018 // Note: There is no need to invalidate any cost modeling decisions here, as
5019 // non where taken so far.
5020 InterleaveInfo.invalidateGroupsRequiringScalarEpilogue();
5021 }
5022
5023 unsigned MaxVF = UserVF ? UserVF : computeFeasibleMaxVF(TC);
5024 assert((UserVF || isPowerOf2_32(MaxVF)) && "MaxVF must be a power of 2");
5025 unsigned MaxVFtimesIC = UserIC ? MaxVF * UserIC : MaxVF;
5026 if (TC > 0 && TC % MaxVFtimesIC == 0) {
5027 // Accept MaxVF if we do not have a tail.
5028 LLVM_DEBUG(dbgs() << "LV: No tail will remain for any chosen VF.\n");
5029 return MaxVF;
5030 }
5031
5032 // If we don't know the precise trip count, or if the trip count that we
5033 // found modulo the vectorization factor is not zero, try to fold the tail
5034 // by masking.
5035 // FIXME: look for a smaller MaxVF that does divide TC rather than masking.
5036 if (Legal->prepareToFoldTailByMasking()) {
5037 FoldTailByMasking = true;
5038 return MaxVF;
5039 }
5040
5041 if (TC == 0) {
5042 reportVectorizationFailure(
5043 "Unable to calculate the loop count due to complex control flow",
5044 "unable to calculate the loop count due to complex control flow",
5045 "UnknownLoopCountComplexCFG", ORE, TheLoop);
5046 return None;
5047 }
5048
5049 reportVectorizationFailure(
5050 "Cannot optimize for size and vectorize at the same time.",
5051 "cannot optimize for size and vectorize at the same time. "
5052 "Enable vectorization of this loop with '#pragma clang loop "
5053 "vectorize(enable)' when compiling with -Os/-Oz",
5054 "NoTailLoopWithOptForSize", ORE, TheLoop);
5055 return None;
5056 }
5057
5058 unsigned
computeFeasibleMaxVF(unsigned ConstTripCount)5059 LoopVectorizationCostModel::computeFeasibleMaxVF(unsigned ConstTripCount) {
5060 MinBWs = computeMinimumValueSizes(TheLoop->getBlocks(), *DB, &TTI);
5061 unsigned SmallestType, WidestType;
5062 std::tie(SmallestType, WidestType) = getSmallestAndWidestTypes();
5063 unsigned WidestRegister = TTI.getRegisterBitWidth(true);
5064
5065 // Get the maximum safe dependence distance in bits computed by LAA.
5066 // It is computed by MaxVF * sizeOf(type) * 8, where type is taken from
5067 // the memory accesses that is most restrictive (involved in the smallest
5068 // dependence distance).
5069 unsigned MaxSafeRegisterWidth = Legal->getMaxSafeRegisterWidth();
5070
5071 WidestRegister = std::min(WidestRegister, MaxSafeRegisterWidth);
5072
5073 // Ensure MaxVF is a power of 2; the dependence distance bound may not be.
5074 // Note that both WidestRegister and WidestType may not be a powers of 2.
5075 unsigned MaxVectorSize = PowerOf2Floor(WidestRegister / WidestType);
5076
5077 LLVM_DEBUG(dbgs() << "LV: The Smallest and Widest types: " << SmallestType
5078 << " / " << WidestType << " bits.\n");
5079 LLVM_DEBUG(dbgs() << "LV: The Widest register safe to use is: "
5080 << WidestRegister << " bits.\n");
5081
5082 assert(MaxVectorSize <= 256 && "Did not expect to pack so many elements"
5083 " into one vector!");
5084 if (MaxVectorSize == 0) {
5085 LLVM_DEBUG(dbgs() << "LV: The target has no vector registers.\n");
5086 MaxVectorSize = 1;
5087 return MaxVectorSize;
5088 } else if (ConstTripCount && ConstTripCount < MaxVectorSize &&
5089 isPowerOf2_32(ConstTripCount)) {
5090 // We need to clamp the VF to be the ConstTripCount. There is no point in
5091 // choosing a higher viable VF as done in the loop below.
5092 LLVM_DEBUG(dbgs() << "LV: Clamping the MaxVF to the constant trip count: "
5093 << ConstTripCount << "\n");
5094 MaxVectorSize = ConstTripCount;
5095 return MaxVectorSize;
5096 }
5097
5098 unsigned MaxVF = MaxVectorSize;
5099 if (TTI.shouldMaximizeVectorBandwidth(!isScalarEpilogueAllowed()) ||
5100 (MaximizeBandwidth && isScalarEpilogueAllowed())) {
5101 // Collect all viable vectorization factors larger than the default MaxVF
5102 // (i.e. MaxVectorSize).
5103 SmallVector<unsigned, 8> VFs;
5104 unsigned NewMaxVectorSize = WidestRegister / SmallestType;
5105 for (unsigned VS = MaxVectorSize * 2; VS <= NewMaxVectorSize; VS *= 2)
5106 VFs.push_back(VS);
5107
5108 // For each VF calculate its register usage.
5109 auto RUs = calculateRegisterUsage(VFs);
5110
5111 // Select the largest VF which doesn't require more registers than existing
5112 // ones.
5113 for (int i = RUs.size() - 1; i >= 0; --i) {
5114 bool Selected = true;
5115 for (auto& pair : RUs[i].MaxLocalUsers) {
5116 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first);
5117 if (pair.second > TargetNumRegisters)
5118 Selected = false;
5119 }
5120 if (Selected) {
5121 MaxVF = VFs[i];
5122 break;
5123 }
5124 }
5125 if (unsigned MinVF = TTI.getMinimumVF(SmallestType)) {
5126 if (MaxVF < MinVF) {
5127 LLVM_DEBUG(dbgs() << "LV: Overriding calculated MaxVF(" << MaxVF
5128 << ") with target's minimum: " << MinVF << '\n');
5129 MaxVF = MinVF;
5130 }
5131 }
5132 }
5133 return MaxVF;
5134 }
5135
5136 VectorizationFactor
selectVectorizationFactor(unsigned MaxVF)5137 LoopVectorizationCostModel::selectVectorizationFactor(unsigned MaxVF) {
5138 float Cost = expectedCost(1).first;
5139 const float ScalarCost = Cost;
5140 unsigned Width = 1;
5141 LLVM_DEBUG(dbgs() << "LV: Scalar loop costs: " << (int)ScalarCost << ".\n");
5142
5143 bool ForceVectorization = Hints->getForce() == LoopVectorizeHints::FK_Enabled;
5144 if (ForceVectorization && MaxVF > 1) {
5145 // Ignore scalar width, because the user explicitly wants vectorization.
5146 // Initialize cost to max so that VF = 2 is, at least, chosen during cost
5147 // evaluation.
5148 Cost = std::numeric_limits<float>::max();
5149 }
5150
5151 for (unsigned i = 2; i <= MaxVF; i *= 2) {
5152 // Notice that the vector loop needs to be executed less times, so
5153 // we need to divide the cost of the vector loops by the width of
5154 // the vector elements.
5155 VectorizationCostTy C = expectedCost(i);
5156 float VectorCost = C.first / (float)i;
5157 LLVM_DEBUG(dbgs() << "LV: Vector loop of width " << i
5158 << " costs: " << (int)VectorCost << ".\n");
5159 if (!C.second && !ForceVectorization) {
5160 LLVM_DEBUG(
5161 dbgs() << "LV: Not considering vector loop of width " << i
5162 << " because it will not generate any vector instructions.\n");
5163 continue;
5164 }
5165 if (VectorCost < Cost) {
5166 Cost = VectorCost;
5167 Width = i;
5168 }
5169 }
5170
5171 if (!EnableCondStoresVectorization && NumPredStores) {
5172 reportVectorizationFailure("There are conditional stores.",
5173 "store that is conditionally executed prevents vectorization",
5174 "ConditionalStore", ORE, TheLoop);
5175 Width = 1;
5176 Cost = ScalarCost;
5177 }
5178
5179 LLVM_DEBUG(if (ForceVectorization && Width > 1 && Cost >= ScalarCost) dbgs()
5180 << "LV: Vectorization seems to be not beneficial, "
5181 << "but was forced by a user.\n");
5182 LLVM_DEBUG(dbgs() << "LV: Selecting VF: " << Width << ".\n");
5183 VectorizationFactor Factor = {Width, (unsigned)(Width * Cost)};
5184 return Factor;
5185 }
5186
5187 std::pair<unsigned, unsigned>
getSmallestAndWidestTypes()5188 LoopVectorizationCostModel::getSmallestAndWidestTypes() {
5189 unsigned MinWidth = -1U;
5190 unsigned MaxWidth = 8;
5191 const DataLayout &DL = TheFunction->getParent()->getDataLayout();
5192
5193 // For each block.
5194 for (BasicBlock *BB : TheLoop->blocks()) {
5195 // For each instruction in the loop.
5196 for (Instruction &I : BB->instructionsWithoutDebug()) {
5197 Type *T = I.getType();
5198
5199 // Skip ignored values.
5200 if (ValuesToIgnore.count(&I))
5201 continue;
5202
5203 // Only examine Loads, Stores and PHINodes.
5204 if (!isa<LoadInst>(I) && !isa<StoreInst>(I) && !isa<PHINode>(I))
5205 continue;
5206
5207 // Examine PHI nodes that are reduction variables. Update the type to
5208 // account for the recurrence type.
5209 if (auto *PN = dyn_cast<PHINode>(&I)) {
5210 if (!Legal->isReductionVariable(PN))
5211 continue;
5212 RecurrenceDescriptor RdxDesc = Legal->getReductionVars()[PN];
5213 T = RdxDesc.getRecurrenceType();
5214 }
5215
5216 // Examine the stored values.
5217 if (auto *ST = dyn_cast<StoreInst>(&I))
5218 T = ST->getValueOperand()->getType();
5219
5220 // Ignore loaded pointer types and stored pointer types that are not
5221 // vectorizable.
5222 //
5223 // FIXME: The check here attempts to predict whether a load or store will
5224 // be vectorized. We only know this for certain after a VF has
5225 // been selected. Here, we assume that if an access can be
5226 // vectorized, it will be. We should also look at extending this
5227 // optimization to non-pointer types.
5228 //
5229 if (T->isPointerTy() && !isConsecutiveLoadOrStore(&I) &&
5230 !isAccessInterleaved(&I) && !isLegalGatherOrScatter(&I))
5231 continue;
5232
5233 MinWidth = std::min(MinWidth,
5234 (unsigned)DL.getTypeSizeInBits(T->getScalarType()));
5235 MaxWidth = std::max(MaxWidth,
5236 (unsigned)DL.getTypeSizeInBits(T->getScalarType()));
5237 }
5238 }
5239
5240 return {MinWidth, MaxWidth};
5241 }
5242
selectInterleaveCount(unsigned VF,unsigned LoopCost)5243 unsigned LoopVectorizationCostModel::selectInterleaveCount(unsigned VF,
5244 unsigned LoopCost) {
5245 // -- The interleave heuristics --
5246 // We interleave the loop in order to expose ILP and reduce the loop overhead.
5247 // There are many micro-architectural considerations that we can't predict
5248 // at this level. For example, frontend pressure (on decode or fetch) due to
5249 // code size, or the number and capabilities of the execution ports.
5250 //
5251 // We use the following heuristics to select the interleave count:
5252 // 1. If the code has reductions, then we interleave to break the cross
5253 // iteration dependency.
5254 // 2. If the loop is really small, then we interleave to reduce the loop
5255 // overhead.
5256 // 3. We don't interleave if we think that we will spill registers to memory
5257 // due to the increased register pressure.
5258
5259 if (!isScalarEpilogueAllowed())
5260 return 1;
5261
5262 // We used the distance for the interleave count.
5263 if (Legal->getMaxSafeDepDistBytes() != -1U)
5264 return 1;
5265
5266 // Do not interleave loops with a relatively small known or estimated trip
5267 // count.
5268 auto BestKnownTC = getSmallBestKnownTC(*PSE.getSE(), TheLoop);
5269 if (BestKnownTC && *BestKnownTC < TinyTripCountInterleaveThreshold)
5270 return 1;
5271
5272 RegisterUsage R = calculateRegisterUsage({VF})[0];
5273 // We divide by these constants so assume that we have at least one
5274 // instruction that uses at least one register.
5275 for (auto& pair : R.MaxLocalUsers) {
5276 pair.second = std::max(pair.second, 1U);
5277 }
5278
5279 // We calculate the interleave count using the following formula.
5280 // Subtract the number of loop invariants from the number of available
5281 // registers. These registers are used by all of the interleaved instances.
5282 // Next, divide the remaining registers by the number of registers that is
5283 // required by the loop, in order to estimate how many parallel instances
5284 // fit without causing spills. All of this is rounded down if necessary to be
5285 // a power of two. We want power of two interleave count to simplify any
5286 // addressing operations or alignment considerations.
5287 // We also want power of two interleave counts to ensure that the induction
5288 // variable of the vector loop wraps to zero, when tail is folded by masking;
5289 // this currently happens when OptForSize, in which case IC is set to 1 above.
5290 unsigned IC = UINT_MAX;
5291
5292 for (auto& pair : R.MaxLocalUsers) {
5293 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first);
5294 LLVM_DEBUG(dbgs() << "LV: The target has " << TargetNumRegisters
5295 << " registers of "
5296 << TTI.getRegisterClassName(pair.first) << " register class\n");
5297 if (VF == 1) {
5298 if (ForceTargetNumScalarRegs.getNumOccurrences() > 0)
5299 TargetNumRegisters = ForceTargetNumScalarRegs;
5300 } else {
5301 if (ForceTargetNumVectorRegs.getNumOccurrences() > 0)
5302 TargetNumRegisters = ForceTargetNumVectorRegs;
5303 }
5304 unsigned MaxLocalUsers = pair.second;
5305 unsigned LoopInvariantRegs = 0;
5306 if (R.LoopInvariantRegs.find(pair.first) != R.LoopInvariantRegs.end())
5307 LoopInvariantRegs = R.LoopInvariantRegs[pair.first];
5308
5309 unsigned TmpIC = PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs) / MaxLocalUsers);
5310 // Don't count the induction variable as interleaved.
5311 if (EnableIndVarRegisterHeur) {
5312 TmpIC =
5313 PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs - 1) /
5314 std::max(1U, (MaxLocalUsers - 1)));
5315 }
5316
5317 IC = std::min(IC, TmpIC);
5318 }
5319
5320 // Clamp the interleave ranges to reasonable counts.
5321 unsigned MaxInterleaveCount = TTI.getMaxInterleaveFactor(VF);
5322
5323 // Check if the user has overridden the max.
5324 if (VF == 1) {
5325 if (ForceTargetMaxScalarInterleaveFactor.getNumOccurrences() > 0)
5326 MaxInterleaveCount = ForceTargetMaxScalarInterleaveFactor;
5327 } else {
5328 if (ForceTargetMaxVectorInterleaveFactor.getNumOccurrences() > 0)
5329 MaxInterleaveCount = ForceTargetMaxVectorInterleaveFactor;
5330 }
5331
5332 // If trip count is known or estimated compile time constant, limit the
5333 // interleave count to be less than the trip count divided by VF.
5334 if (BestKnownTC) {
5335 MaxInterleaveCount = std::min(*BestKnownTC / VF, MaxInterleaveCount);
5336 }
5337
5338 // If we did not calculate the cost for VF (because the user selected the VF)
5339 // then we calculate the cost of VF here.
5340 if (LoopCost == 0)
5341 LoopCost = expectedCost(VF).first;
5342
5343 assert(LoopCost && "Non-zero loop cost expected");
5344
5345 // Clamp the calculated IC to be between the 1 and the max interleave count
5346 // that the target and trip count allows.
5347 if (IC > MaxInterleaveCount)
5348 IC = MaxInterleaveCount;
5349 else if (IC < 1)
5350 IC = 1;
5351
5352 // Interleave if we vectorized this loop and there is a reduction that could
5353 // benefit from interleaving.
5354 if (VF > 1 && !Legal->getReductionVars().empty()) {
5355 LLVM_DEBUG(dbgs() << "LV: Interleaving because of reductions.\n");
5356 return IC;
5357 }
5358
5359 // Note that if we've already vectorized the loop we will have done the
5360 // runtime check and so interleaving won't require further checks.
5361 bool InterleavingRequiresRuntimePointerCheck =
5362 (VF == 1 && Legal->getRuntimePointerChecking()->Need);
5363
5364 // We want to interleave small loops in order to reduce the loop overhead and
5365 // potentially expose ILP opportunities.
5366 LLVM_DEBUG(dbgs() << "LV: Loop cost is " << LoopCost << '\n');
5367 if (!InterleavingRequiresRuntimePointerCheck && LoopCost < SmallLoopCost) {
5368 // We assume that the cost overhead is 1 and we use the cost model
5369 // to estimate the cost of the loop and interleave until the cost of the
5370 // loop overhead is about 5% of the cost of the loop.
5371 unsigned SmallIC =
5372 std::min(IC, (unsigned)PowerOf2Floor(SmallLoopCost / LoopCost));
5373
5374 // Interleave until store/load ports (estimated by max interleave count) are
5375 // saturated.
5376 unsigned NumStores = Legal->getNumStores();
5377 unsigned NumLoads = Legal->getNumLoads();
5378 unsigned StoresIC = IC / (NumStores ? NumStores : 1);
5379 unsigned LoadsIC = IC / (NumLoads ? NumLoads : 1);
5380
5381 // If we have a scalar reduction (vector reductions are already dealt with
5382 // by this point), we can increase the critical path length if the loop
5383 // we're interleaving is inside another loop. Limit, by default to 2, so the
5384 // critical path only gets increased by one reduction operation.
5385 if (!Legal->getReductionVars().empty() && TheLoop->getLoopDepth() > 1) {
5386 unsigned F = static_cast<unsigned>(MaxNestedScalarReductionIC);
5387 SmallIC = std::min(SmallIC, F);
5388 StoresIC = std::min(StoresIC, F);
5389 LoadsIC = std::min(LoadsIC, F);
5390 }
5391
5392 if (EnableLoadStoreRuntimeInterleave &&
5393 std::max(StoresIC, LoadsIC) > SmallIC) {
5394 LLVM_DEBUG(
5395 dbgs() << "LV: Interleaving to saturate store or load ports.\n");
5396 return std::max(StoresIC, LoadsIC);
5397 }
5398
5399 LLVM_DEBUG(dbgs() << "LV: Interleaving to reduce branch cost.\n");
5400 return SmallIC;
5401 }
5402
5403 // Interleave if this is a large loop (small loops are already dealt with by
5404 // this point) that could benefit from interleaving.
5405 bool HasReductions = !Legal->getReductionVars().empty();
5406 if (TTI.enableAggressiveInterleaving(HasReductions)) {
5407 LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n");
5408 return IC;
5409 }
5410
5411 LLVM_DEBUG(dbgs() << "LV: Not Interleaving.\n");
5412 return 1;
5413 }
5414
5415 SmallVector<LoopVectorizationCostModel::RegisterUsage, 8>
calculateRegisterUsage(ArrayRef<unsigned> VFs)5416 LoopVectorizationCostModel::calculateRegisterUsage(ArrayRef<unsigned> VFs) {
5417 // This function calculates the register usage by measuring the highest number
5418 // of values that are alive at a single location. Obviously, this is a very
5419 // rough estimation. We scan the loop in a topological order in order and
5420 // assign a number to each instruction. We use RPO to ensure that defs are
5421 // met before their users. We assume that each instruction that has in-loop
5422 // users starts an interval. We record every time that an in-loop value is
5423 // used, so we have a list of the first and last occurrences of each
5424 // instruction. Next, we transpose this data structure into a multi map that
5425 // holds the list of intervals that *end* at a specific location. This multi
5426 // map allows us to perform a linear search. We scan the instructions linearly
5427 // and record each time that a new interval starts, by placing it in a set.
5428 // If we find this value in the multi-map then we remove it from the set.
5429 // The max register usage is the maximum size of the set.
5430 // We also search for instructions that are defined outside the loop, but are
5431 // used inside the loop. We need this number separately from the max-interval
5432 // usage number because when we unroll, loop-invariant values do not take
5433 // more register.
5434 LoopBlocksDFS DFS(TheLoop);
5435 DFS.perform(LI);
5436
5437 RegisterUsage RU;
5438
5439 // Each 'key' in the map opens a new interval. The values
5440 // of the map are the index of the 'last seen' usage of the
5441 // instruction that is the key.
5442 using IntervalMap = DenseMap<Instruction *, unsigned>;
5443
5444 // Maps instruction to its index.
5445 SmallVector<Instruction *, 64> IdxToInstr;
5446 // Marks the end of each interval.
5447 IntervalMap EndPoint;
5448 // Saves the list of instruction indices that are used in the loop.
5449 SmallPtrSet<Instruction *, 8> Ends;
5450 // Saves the list of values that are used in the loop but are
5451 // defined outside the loop, such as arguments and constants.
5452 SmallPtrSet<Value *, 8> LoopInvariants;
5453
5454 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) {
5455 for (Instruction &I : BB->instructionsWithoutDebug()) {
5456 IdxToInstr.push_back(&I);
5457
5458 // Save the end location of each USE.
5459 for (Value *U : I.operands()) {
5460 auto *Instr = dyn_cast<Instruction>(U);
5461
5462 // Ignore non-instruction values such as arguments, constants, etc.
5463 if (!Instr)
5464 continue;
5465
5466 // If this instruction is outside the loop then record it and continue.
5467 if (!TheLoop->contains(Instr)) {
5468 LoopInvariants.insert(Instr);
5469 continue;
5470 }
5471
5472 // Overwrite previous end points.
5473 EndPoint[Instr] = IdxToInstr.size();
5474 Ends.insert(Instr);
5475 }
5476 }
5477 }
5478
5479 // Saves the list of intervals that end with the index in 'key'.
5480 using InstrList = SmallVector<Instruction *, 2>;
5481 DenseMap<unsigned, InstrList> TransposeEnds;
5482
5483 // Transpose the EndPoints to a list of values that end at each index.
5484 for (auto &Interval : EndPoint)
5485 TransposeEnds[Interval.second].push_back(Interval.first);
5486
5487 SmallPtrSet<Instruction *, 8> OpenIntervals;
5488
5489 // Get the size of the widest register.
5490 unsigned MaxSafeDepDist = -1U;
5491 if (Legal->getMaxSafeDepDistBytes() != -1U)
5492 MaxSafeDepDist = Legal->getMaxSafeDepDistBytes() * 8;
5493 unsigned WidestRegister =
5494 std::min(TTI.getRegisterBitWidth(true), MaxSafeDepDist);
5495 const DataLayout &DL = TheFunction->getParent()->getDataLayout();
5496
5497 SmallVector<RegisterUsage, 8> RUs(VFs.size());
5498 SmallVector<SmallMapVector<unsigned, unsigned, 4>, 8> MaxUsages(VFs.size());
5499
5500 LLVM_DEBUG(dbgs() << "LV(REG): Calculating max register usage:\n");
5501
5502 // A lambda that gets the register usage for the given type and VF.
5503 auto GetRegUsage = [&DL, WidestRegister](Type *Ty, unsigned VF) {
5504 if (Ty->isTokenTy())
5505 return 0U;
5506 unsigned TypeSize = DL.getTypeSizeInBits(Ty->getScalarType());
5507 return std::max<unsigned>(1, VF * TypeSize / WidestRegister);
5508 };
5509
5510 for (unsigned int i = 0, s = IdxToInstr.size(); i < s; ++i) {
5511 Instruction *I = IdxToInstr[i];
5512
5513 // Remove all of the instructions that end at this location.
5514 InstrList &List = TransposeEnds[i];
5515 for (Instruction *ToRemove : List)
5516 OpenIntervals.erase(ToRemove);
5517
5518 // Ignore instructions that are never used within the loop.
5519 if (!Ends.count(I))
5520 continue;
5521
5522 // Skip ignored values.
5523 if (ValuesToIgnore.count(I))
5524 continue;
5525
5526 // For each VF find the maximum usage of registers.
5527 for (unsigned j = 0, e = VFs.size(); j < e; ++j) {
5528 // Count the number of live intervals.
5529 SmallMapVector<unsigned, unsigned, 4> RegUsage;
5530
5531 if (VFs[j] == 1) {
5532 for (auto Inst : OpenIntervals) {
5533 unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType());
5534 if (RegUsage.find(ClassID) == RegUsage.end())
5535 RegUsage[ClassID] = 1;
5536 else
5537 RegUsage[ClassID] += 1;
5538 }
5539 } else {
5540 collectUniformsAndScalars(VFs[j]);
5541 for (auto Inst : OpenIntervals) {
5542 // Skip ignored values for VF > 1.
5543 if (VecValuesToIgnore.count(Inst))
5544 continue;
5545 if (isScalarAfterVectorization(Inst, VFs[j])) {
5546 unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType());
5547 if (RegUsage.find(ClassID) == RegUsage.end())
5548 RegUsage[ClassID] = 1;
5549 else
5550 RegUsage[ClassID] += 1;
5551 } else {
5552 unsigned ClassID = TTI.getRegisterClassForType(true, Inst->getType());
5553 if (RegUsage.find(ClassID) == RegUsage.end())
5554 RegUsage[ClassID] = GetRegUsage(Inst->getType(), VFs[j]);
5555 else
5556 RegUsage[ClassID] += GetRegUsage(Inst->getType(), VFs[j]);
5557 }
5558 }
5559 }
5560
5561 for (auto& pair : RegUsage) {
5562 if (MaxUsages[j].find(pair.first) != MaxUsages[j].end())
5563 MaxUsages[j][pair.first] = std::max(MaxUsages[j][pair.first], pair.second);
5564 else
5565 MaxUsages[j][pair.first] = pair.second;
5566 }
5567 }
5568
5569 LLVM_DEBUG(dbgs() << "LV(REG): At #" << i << " Interval # "
5570 << OpenIntervals.size() << '\n');
5571
5572 // Add the current instruction to the list of open intervals.
5573 OpenIntervals.insert(I);
5574 }
5575
5576 for (unsigned i = 0, e = VFs.size(); i < e; ++i) {
5577 SmallMapVector<unsigned, unsigned, 4> Invariant;
5578
5579 for (auto Inst : LoopInvariants) {
5580 unsigned Usage = VFs[i] == 1 ? 1 : GetRegUsage(Inst->getType(), VFs[i]);
5581 unsigned ClassID = TTI.getRegisterClassForType(VFs[i] > 1, Inst->getType());
5582 if (Invariant.find(ClassID) == Invariant.end())
5583 Invariant[ClassID] = Usage;
5584 else
5585 Invariant[ClassID] += Usage;
5586 }
5587
5588 LLVM_DEBUG({
5589 dbgs() << "LV(REG): VF = " << VFs[i] << '\n';
5590 dbgs() << "LV(REG): Found max usage: " << MaxUsages[i].size()
5591 << " item\n";
5592 for (const auto &pair : MaxUsages[i]) {
5593 dbgs() << "LV(REG): RegisterClass: "
5594 << TTI.getRegisterClassName(pair.first) << ", " << pair.second
5595 << " registers\n";
5596 }
5597 dbgs() << "LV(REG): Found invariant usage: " << Invariant.size()
5598 << " item\n";
5599 for (const auto &pair : Invariant) {
5600 dbgs() << "LV(REG): RegisterClass: "
5601 << TTI.getRegisterClassName(pair.first) << ", " << pair.second
5602 << " registers\n";
5603 }
5604 });
5605
5606 RU.LoopInvariantRegs = Invariant;
5607 RU.MaxLocalUsers = MaxUsages[i];
5608 RUs[i] = RU;
5609 }
5610
5611 return RUs;
5612 }
5613
useEmulatedMaskMemRefHack(Instruction * I)5614 bool LoopVectorizationCostModel::useEmulatedMaskMemRefHack(Instruction *I){
5615 // TODO: Cost model for emulated masked load/store is completely
5616 // broken. This hack guides the cost model to use an artificially
5617 // high enough value to practically disable vectorization with such
5618 // operations, except where previously deployed legality hack allowed
5619 // using very low cost values. This is to avoid regressions coming simply
5620 // from moving "masked load/store" check from legality to cost model.
5621 // Masked Load/Gather emulation was previously never allowed.
5622 // Limited number of Masked Store/Scatter emulation was allowed.
5623 assert(isPredicatedInst(I) && "Expecting a scalar emulated instruction");
5624 return isa<LoadInst>(I) ||
5625 (isa<StoreInst>(I) &&
5626 NumPredStores > NumberOfStoresToPredicate);
5627 }
5628
collectInstsToScalarize(unsigned VF)5629 void LoopVectorizationCostModel::collectInstsToScalarize(unsigned VF) {
5630 // If we aren't vectorizing the loop, or if we've already collected the
5631 // instructions to scalarize, there's nothing to do. Collection may already
5632 // have occurred if we have a user-selected VF and are now computing the
5633 // expected cost for interleaving.
5634 if (VF < 2 || InstsToScalarize.find(VF) != InstsToScalarize.end())
5635 return;
5636
5637 // Initialize a mapping for VF in InstsToScalalarize. If we find that it's
5638 // not profitable to scalarize any instructions, the presence of VF in the
5639 // map will indicate that we've analyzed it already.
5640 ScalarCostsTy &ScalarCostsVF = InstsToScalarize[VF];
5641
5642 // Find all the instructions that are scalar with predication in the loop and
5643 // determine if it would be better to not if-convert the blocks they are in.
5644 // If so, we also record the instructions to scalarize.
5645 for (BasicBlock *BB : TheLoop->blocks()) {
5646 if (!blockNeedsPredication(BB))
5647 continue;
5648 for (Instruction &I : *BB)
5649 if (isScalarWithPredication(&I)) {
5650 ScalarCostsTy ScalarCosts;
5651 // Do not apply discount logic if hacked cost is needed
5652 // for emulated masked memrefs.
5653 if (!useEmulatedMaskMemRefHack(&I) &&
5654 computePredInstDiscount(&I, ScalarCosts, VF) >= 0)
5655 ScalarCostsVF.insert(ScalarCosts.begin(), ScalarCosts.end());
5656 // Remember that BB will remain after vectorization.
5657 PredicatedBBsAfterVectorization.insert(BB);
5658 }
5659 }
5660 }
5661
computePredInstDiscount(Instruction * PredInst,DenseMap<Instruction *,unsigned> & ScalarCosts,unsigned VF)5662 int LoopVectorizationCostModel::computePredInstDiscount(
5663 Instruction *PredInst, DenseMap<Instruction *, unsigned> &ScalarCosts,
5664 unsigned VF) {
5665 assert(!isUniformAfterVectorization(PredInst, VF) &&
5666 "Instruction marked uniform-after-vectorization will be predicated");
5667
5668 // Initialize the discount to zero, meaning that the scalar version and the
5669 // vector version cost the same.
5670 int Discount = 0;
5671
5672 // Holds instructions to analyze. The instructions we visit are mapped in
5673 // ScalarCosts. Those instructions are the ones that would be scalarized if
5674 // we find that the scalar version costs less.
5675 SmallVector<Instruction *, 8> Worklist;
5676
5677 // Returns true if the given instruction can be scalarized.
5678 auto canBeScalarized = [&](Instruction *I) -> bool {
5679 // We only attempt to scalarize instructions forming a single-use chain
5680 // from the original predicated block that would otherwise be vectorized.
5681 // Although not strictly necessary, we give up on instructions we know will
5682 // already be scalar to avoid traversing chains that are unlikely to be
5683 // beneficial.
5684 if (!I->hasOneUse() || PredInst->getParent() != I->getParent() ||
5685 isScalarAfterVectorization(I, VF))
5686 return false;
5687
5688 // If the instruction is scalar with predication, it will be analyzed
5689 // separately. We ignore it within the context of PredInst.
5690 if (isScalarWithPredication(I))
5691 return false;
5692
5693 // If any of the instruction's operands are uniform after vectorization,
5694 // the instruction cannot be scalarized. This prevents, for example, a
5695 // masked load from being scalarized.
5696 //
5697 // We assume we will only emit a value for lane zero of an instruction
5698 // marked uniform after vectorization, rather than VF identical values.
5699 // Thus, if we scalarize an instruction that uses a uniform, we would
5700 // create uses of values corresponding to the lanes we aren't emitting code
5701 // for. This behavior can be changed by allowing getScalarValue to clone
5702 // the lane zero values for uniforms rather than asserting.
5703 for (Use &U : I->operands())
5704 if (auto *J = dyn_cast<Instruction>(U.get()))
5705 if (isUniformAfterVectorization(J, VF))
5706 return false;
5707
5708 // Otherwise, we can scalarize the instruction.
5709 return true;
5710 };
5711
5712 // Compute the expected cost discount from scalarizing the entire expression
5713 // feeding the predicated instruction. We currently only consider expressions
5714 // that are single-use instruction chains.
5715 Worklist.push_back(PredInst);
5716 while (!Worklist.empty()) {
5717 Instruction *I = Worklist.pop_back_val();
5718
5719 // If we've already analyzed the instruction, there's nothing to do.
5720 if (ScalarCosts.find(I) != ScalarCosts.end())
5721 continue;
5722
5723 // Compute the cost of the vector instruction. Note that this cost already
5724 // includes the scalarization overhead of the predicated instruction.
5725 unsigned VectorCost = getInstructionCost(I, VF).first;
5726
5727 // Compute the cost of the scalarized instruction. This cost is the cost of
5728 // the instruction as if it wasn't if-converted and instead remained in the
5729 // predicated block. We will scale this cost by block probability after
5730 // computing the scalarization overhead.
5731 unsigned ScalarCost = VF * getInstructionCost(I, 1).first;
5732
5733 // Compute the scalarization overhead of needed insertelement instructions
5734 // and phi nodes.
5735 if (isScalarWithPredication(I) && !I->getType()->isVoidTy()) {
5736 ScalarCost += TTI.getScalarizationOverhead(
5737 cast<VectorType>(ToVectorTy(I->getType(), VF)),
5738 APInt::getAllOnesValue(VF), true, false);
5739 ScalarCost += VF * TTI.getCFInstrCost(Instruction::PHI,
5740 TTI::TCK_RecipThroughput);
5741 }
5742
5743 // Compute the scalarization overhead of needed extractelement
5744 // instructions. For each of the instruction's operands, if the operand can
5745 // be scalarized, add it to the worklist; otherwise, account for the
5746 // overhead.
5747 for (Use &U : I->operands())
5748 if (auto *J = dyn_cast<Instruction>(U.get())) {
5749 assert(VectorType::isValidElementType(J->getType()) &&
5750 "Instruction has non-scalar type");
5751 if (canBeScalarized(J))
5752 Worklist.push_back(J);
5753 else if (needsExtract(J, VF))
5754 ScalarCost += TTI.getScalarizationOverhead(
5755 cast<VectorType>(ToVectorTy(J->getType(), VF)),
5756 APInt::getAllOnesValue(VF), false, true);
5757 }
5758
5759 // Scale the total scalar cost by block probability.
5760 ScalarCost /= getReciprocalPredBlockProb();
5761
5762 // Compute the discount. A non-negative discount means the vector version
5763 // of the instruction costs more, and scalarizing would be beneficial.
5764 Discount += VectorCost - ScalarCost;
5765 ScalarCosts[I] = ScalarCost;
5766 }
5767
5768 return Discount;
5769 }
5770
5771 LoopVectorizationCostModel::VectorizationCostTy
expectedCost(unsigned VF)5772 LoopVectorizationCostModel::expectedCost(unsigned VF) {
5773 VectorizationCostTy Cost;
5774
5775 // For each block.
5776 for (BasicBlock *BB : TheLoop->blocks()) {
5777 VectorizationCostTy BlockCost;
5778
5779 // For each instruction in the old loop.
5780 for (Instruction &I : BB->instructionsWithoutDebug()) {
5781 // Skip ignored values.
5782 if (ValuesToIgnore.count(&I) || (VF > 1 && VecValuesToIgnore.count(&I)))
5783 continue;
5784
5785 VectorizationCostTy C = getInstructionCost(&I, VF);
5786
5787 // Check if we should override the cost.
5788 if (ForceTargetInstructionCost.getNumOccurrences() > 0)
5789 C.first = ForceTargetInstructionCost;
5790
5791 BlockCost.first += C.first;
5792 BlockCost.second |= C.second;
5793 LLVM_DEBUG(dbgs() << "LV: Found an estimated cost of " << C.first
5794 << " for VF " << VF << " For instruction: " << I
5795 << '\n');
5796 }
5797
5798 // If we are vectorizing a predicated block, it will have been
5799 // if-converted. This means that the block's instructions (aside from
5800 // stores and instructions that may divide by zero) will now be
5801 // unconditionally executed. For the scalar case, we may not always execute
5802 // the predicated block. Thus, scale the block's cost by the probability of
5803 // executing it.
5804 if (VF == 1 && blockNeedsPredication(BB))
5805 BlockCost.first /= getReciprocalPredBlockProb();
5806
5807 Cost.first += BlockCost.first;
5808 Cost.second |= BlockCost.second;
5809 }
5810
5811 return Cost;
5812 }
5813
5814 /// Gets Address Access SCEV after verifying that the access pattern
5815 /// is loop invariant except the induction variable dependence.
5816 ///
5817 /// This SCEV can be sent to the Target in order to estimate the address
5818 /// calculation cost.
getAddressAccessSCEV(Value * Ptr,LoopVectorizationLegality * Legal,PredicatedScalarEvolution & PSE,const Loop * TheLoop)5819 static const SCEV *getAddressAccessSCEV(
5820 Value *Ptr,
5821 LoopVectorizationLegality *Legal,
5822 PredicatedScalarEvolution &PSE,
5823 const Loop *TheLoop) {
5824
5825 auto *Gep = dyn_cast<GetElementPtrInst>(Ptr);
5826 if (!Gep)
5827 return nullptr;
5828
5829 // We are looking for a gep with all loop invariant indices except for one
5830 // which should be an induction variable.
5831 auto SE = PSE.getSE();
5832 unsigned NumOperands = Gep->getNumOperands();
5833 for (unsigned i = 1; i < NumOperands; ++i) {
5834 Value *Opd = Gep->getOperand(i);
5835 if (!SE->isLoopInvariant(SE->getSCEV(Opd), TheLoop) &&
5836 !Legal->isInductionVariable(Opd))
5837 return nullptr;
5838 }
5839
5840 // Now we know we have a GEP ptr, %inv, %ind, %inv. return the Ptr SCEV.
5841 return PSE.getSCEV(Ptr);
5842 }
5843
isStrideMul(Instruction * I,LoopVectorizationLegality * Legal)5844 static bool isStrideMul(Instruction *I, LoopVectorizationLegality *Legal) {
5845 return Legal->hasStride(I->getOperand(0)) ||
5846 Legal->hasStride(I->getOperand(1));
5847 }
5848
getMemInstScalarizationCost(Instruction * I,unsigned VF)5849 unsigned LoopVectorizationCostModel::getMemInstScalarizationCost(Instruction *I,
5850 unsigned VF) {
5851 assert(VF > 1 && "Scalarization cost of instruction implies vectorization.");
5852 Type *ValTy = getMemInstValueType(I);
5853 auto SE = PSE.getSE();
5854
5855 unsigned AS = getLoadStoreAddressSpace(I);
5856 Value *Ptr = getLoadStorePointerOperand(I);
5857 Type *PtrTy = ToVectorTy(Ptr->getType(), VF);
5858
5859 // Figure out whether the access is strided and get the stride value
5860 // if it's known in compile time
5861 const SCEV *PtrSCEV = getAddressAccessSCEV(Ptr, Legal, PSE, TheLoop);
5862
5863 // Get the cost of the scalar memory instruction and address computation.
5864 unsigned Cost = VF * TTI.getAddressComputationCost(PtrTy, SE, PtrSCEV);
5865
5866 // Don't pass *I here, since it is scalar but will actually be part of a
5867 // vectorized loop where the user of it is a vectorized instruction.
5868 const Align Alignment = getLoadStoreAlignment(I);
5869 Cost += VF * TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(),
5870 Alignment, AS,
5871 TTI::TCK_RecipThroughput);
5872
5873 // Get the overhead of the extractelement and insertelement instructions
5874 // we might create due to scalarization.
5875 Cost += getScalarizationOverhead(I, VF);
5876
5877 // If we have a predicated store, it may not be executed for each vector
5878 // lane. Scale the cost by the probability of executing the predicated
5879 // block.
5880 if (isPredicatedInst(I)) {
5881 Cost /= getReciprocalPredBlockProb();
5882
5883 if (useEmulatedMaskMemRefHack(I))
5884 // Artificially setting to a high enough value to practically disable
5885 // vectorization with such operations.
5886 Cost = 3000000;
5887 }
5888
5889 return Cost;
5890 }
5891
getConsecutiveMemOpCost(Instruction * I,unsigned VF)5892 unsigned LoopVectorizationCostModel::getConsecutiveMemOpCost(Instruction *I,
5893 unsigned VF) {
5894 Type *ValTy = getMemInstValueType(I);
5895 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
5896 Value *Ptr = getLoadStorePointerOperand(I);
5897 unsigned AS = getLoadStoreAddressSpace(I);
5898 int ConsecutiveStride = Legal->isConsecutivePtr(Ptr);
5899 enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
5900
5901 assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) &&
5902 "Stride should be 1 or -1 for consecutive memory access");
5903 const Align Alignment = getLoadStoreAlignment(I);
5904 unsigned Cost = 0;
5905 if (Legal->isMaskRequired(I))
5906 Cost += TTI.getMaskedMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS,
5907 CostKind);
5908 else
5909 Cost += TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS,
5910 CostKind, I);
5911
5912 bool Reverse = ConsecutiveStride < 0;
5913 if (Reverse)
5914 Cost += TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, 0);
5915 return Cost;
5916 }
5917
getUniformMemOpCost(Instruction * I,unsigned VF)5918 unsigned LoopVectorizationCostModel::getUniformMemOpCost(Instruction *I,
5919 unsigned VF) {
5920 Type *ValTy = getMemInstValueType(I);
5921 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
5922 const Align Alignment = getLoadStoreAlignment(I);
5923 unsigned AS = getLoadStoreAddressSpace(I);
5924 enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
5925 if (isa<LoadInst>(I)) {
5926 return TTI.getAddressComputationCost(ValTy) +
5927 TTI.getMemoryOpCost(Instruction::Load, ValTy, Alignment, AS,
5928 CostKind) +
5929 TTI.getShuffleCost(TargetTransformInfo::SK_Broadcast, VectorTy);
5930 }
5931 StoreInst *SI = cast<StoreInst>(I);
5932
5933 bool isLoopInvariantStoreValue = Legal->isUniform(SI->getValueOperand());
5934 return TTI.getAddressComputationCost(ValTy) +
5935 TTI.getMemoryOpCost(Instruction::Store, ValTy, Alignment, AS,
5936 CostKind) +
5937 (isLoopInvariantStoreValue
5938 ? 0
5939 : TTI.getVectorInstrCost(Instruction::ExtractElement, VectorTy,
5940 VF - 1));
5941 }
5942
getGatherScatterCost(Instruction * I,unsigned VF)5943 unsigned LoopVectorizationCostModel::getGatherScatterCost(Instruction *I,
5944 unsigned VF) {
5945 Type *ValTy = getMemInstValueType(I);
5946 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
5947 const Align Alignment = getLoadStoreAlignment(I);
5948 const Value *Ptr = getLoadStorePointerOperand(I);
5949
5950 return TTI.getAddressComputationCost(VectorTy) +
5951 TTI.getGatherScatterOpCost(
5952 I->getOpcode(), VectorTy, Ptr, Legal->isMaskRequired(I), Alignment,
5953 TargetTransformInfo::TCK_RecipThroughput, I);
5954 }
5955
getInterleaveGroupCost(Instruction * I,unsigned VF)5956 unsigned LoopVectorizationCostModel::getInterleaveGroupCost(Instruction *I,
5957 unsigned VF) {
5958 Type *ValTy = getMemInstValueType(I);
5959 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
5960 unsigned AS = getLoadStoreAddressSpace(I);
5961
5962 auto Group = getInterleavedAccessGroup(I);
5963 assert(Group && "Fail to get an interleaved access group.");
5964
5965 unsigned InterleaveFactor = Group->getFactor();
5966 auto *WideVecTy = FixedVectorType::get(ValTy, VF * InterleaveFactor);
5967
5968 // Holds the indices of existing members in an interleaved load group.
5969 // An interleaved store group doesn't need this as it doesn't allow gaps.
5970 SmallVector<unsigned, 4> Indices;
5971 if (isa<LoadInst>(I)) {
5972 for (unsigned i = 0; i < InterleaveFactor; i++)
5973 if (Group->getMember(i))
5974 Indices.push_back(i);
5975 }
5976
5977 // Calculate the cost of the whole interleaved group.
5978 bool UseMaskForGaps =
5979 Group->requiresScalarEpilogue() && !isScalarEpilogueAllowed();
5980 unsigned Cost = TTI.getInterleavedMemoryOpCost(
5981 I->getOpcode(), WideVecTy, Group->getFactor(), Indices, Group->getAlign(),
5982 AS, TTI::TCK_RecipThroughput, Legal->isMaskRequired(I), UseMaskForGaps);
5983
5984 if (Group->isReverse()) {
5985 // TODO: Add support for reversed masked interleaved access.
5986 assert(!Legal->isMaskRequired(I) &&
5987 "Reverse masked interleaved access not supported.");
5988 Cost += Group->getNumMembers() *
5989 TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, 0);
5990 }
5991 return Cost;
5992 }
5993
getMemoryInstructionCost(Instruction * I,unsigned VF)5994 unsigned LoopVectorizationCostModel::getMemoryInstructionCost(Instruction *I,
5995 unsigned VF) {
5996 // Calculate scalar cost only. Vectorization cost should be ready at this
5997 // moment.
5998 if (VF == 1) {
5999 Type *ValTy = getMemInstValueType(I);
6000 const Align Alignment = getLoadStoreAlignment(I);
6001 unsigned AS = getLoadStoreAddressSpace(I);
6002
6003 return TTI.getAddressComputationCost(ValTy) +
6004 TTI.getMemoryOpCost(I->getOpcode(), ValTy, Alignment, AS,
6005 TTI::TCK_RecipThroughput, I);
6006 }
6007 return getWideningCost(I, VF);
6008 }
6009
6010 LoopVectorizationCostModel::VectorizationCostTy
getInstructionCost(Instruction * I,unsigned VF)6011 LoopVectorizationCostModel::getInstructionCost(Instruction *I, unsigned VF) {
6012 // If we know that this instruction will remain uniform, check the cost of
6013 // the scalar version.
6014 if (isUniformAfterVectorization(I, VF))
6015 VF = 1;
6016
6017 if (VF > 1 && isProfitableToScalarize(I, VF))
6018 return VectorizationCostTy(InstsToScalarize[VF][I], false);
6019
6020 // Forced scalars do not have any scalarization overhead.
6021 auto ForcedScalar = ForcedScalars.find(VF);
6022 if (VF > 1 && ForcedScalar != ForcedScalars.end()) {
6023 auto InstSet = ForcedScalar->second;
6024 if (InstSet.count(I))
6025 return VectorizationCostTy((getInstructionCost(I, 1).first * VF), false);
6026 }
6027
6028 Type *VectorTy;
6029 unsigned C = getInstructionCost(I, VF, VectorTy);
6030
6031 bool TypeNotScalarized =
6032 VF > 1 && VectorTy->isVectorTy() && TTI.getNumberOfParts(VectorTy) < VF;
6033 return VectorizationCostTy(C, TypeNotScalarized);
6034 }
6035
getScalarizationOverhead(Instruction * I,unsigned VF)6036 unsigned LoopVectorizationCostModel::getScalarizationOverhead(Instruction *I,
6037 unsigned VF) {
6038
6039 if (VF == 1)
6040 return 0;
6041
6042 unsigned Cost = 0;
6043 Type *RetTy = ToVectorTy(I->getType(), VF);
6044 if (!RetTy->isVoidTy() &&
6045 (!isa<LoadInst>(I) || !TTI.supportsEfficientVectorElementLoadStore()))
6046 Cost += TTI.getScalarizationOverhead(
6047 cast<VectorType>(RetTy), APInt::getAllOnesValue(VF), true, false);
6048
6049 // Some targets keep addresses scalar.
6050 if (isa<LoadInst>(I) && !TTI.prefersVectorizedAddressing())
6051 return Cost;
6052
6053 // Some targets support efficient element stores.
6054 if (isa<StoreInst>(I) && TTI.supportsEfficientVectorElementLoadStore())
6055 return Cost;
6056
6057 // Collect operands to consider.
6058 CallInst *CI = dyn_cast<CallInst>(I);
6059 Instruction::op_range Ops = CI ? CI->arg_operands() : I->operands();
6060
6061 // Skip operands that do not require extraction/scalarization and do not incur
6062 // any overhead.
6063 return Cost + TTI.getOperandsScalarizationOverhead(
6064 filterExtractingOperands(Ops, VF), VF);
6065 }
6066
setCostBasedWideningDecision(unsigned VF)6067 void LoopVectorizationCostModel::setCostBasedWideningDecision(unsigned VF) {
6068 if (VF == 1)
6069 return;
6070 NumPredStores = 0;
6071 for (BasicBlock *BB : TheLoop->blocks()) {
6072 // For each instruction in the old loop.
6073 for (Instruction &I : *BB) {
6074 Value *Ptr = getLoadStorePointerOperand(&I);
6075 if (!Ptr)
6076 continue;
6077
6078 // TODO: We should generate better code and update the cost model for
6079 // predicated uniform stores. Today they are treated as any other
6080 // predicated store (see added test cases in
6081 // invariant-store-vectorization.ll).
6082 if (isa<StoreInst>(&I) && isScalarWithPredication(&I))
6083 NumPredStores++;
6084
6085 if (Legal->isUniform(Ptr) &&
6086 // Conditional loads and stores should be scalarized and predicated.
6087 // isScalarWithPredication cannot be used here since masked
6088 // gather/scatters are not considered scalar with predication.
6089 !Legal->blockNeedsPredication(I.getParent())) {
6090 // TODO: Avoid replicating loads and stores instead of
6091 // relying on instcombine to remove them.
6092 // Load: Scalar load + broadcast
6093 // Store: Scalar store + isLoopInvariantStoreValue ? 0 : extract
6094 unsigned Cost = getUniformMemOpCost(&I, VF);
6095 setWideningDecision(&I, VF, CM_Scalarize, Cost);
6096 continue;
6097 }
6098
6099 // We assume that widening is the best solution when possible.
6100 if (memoryInstructionCanBeWidened(&I, VF)) {
6101 unsigned Cost = getConsecutiveMemOpCost(&I, VF);
6102 int ConsecutiveStride =
6103 Legal->isConsecutivePtr(getLoadStorePointerOperand(&I));
6104 assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) &&
6105 "Expected consecutive stride.");
6106 InstWidening Decision =
6107 ConsecutiveStride == 1 ? CM_Widen : CM_Widen_Reverse;
6108 setWideningDecision(&I, VF, Decision, Cost);
6109 continue;
6110 }
6111
6112 // Choose between Interleaving, Gather/Scatter or Scalarization.
6113 unsigned InterleaveCost = std::numeric_limits<unsigned>::max();
6114 unsigned NumAccesses = 1;
6115 if (isAccessInterleaved(&I)) {
6116 auto Group = getInterleavedAccessGroup(&I);
6117 assert(Group && "Fail to get an interleaved access group.");
6118
6119 // Make one decision for the whole group.
6120 if (getWideningDecision(&I, VF) != CM_Unknown)
6121 continue;
6122
6123 NumAccesses = Group->getNumMembers();
6124 if (interleavedAccessCanBeWidened(&I, VF))
6125 InterleaveCost = getInterleaveGroupCost(&I, VF);
6126 }
6127
6128 unsigned GatherScatterCost =
6129 isLegalGatherOrScatter(&I)
6130 ? getGatherScatterCost(&I, VF) * NumAccesses
6131 : std::numeric_limits<unsigned>::max();
6132
6133 unsigned ScalarizationCost =
6134 getMemInstScalarizationCost(&I, VF) * NumAccesses;
6135
6136 // Choose better solution for the current VF,
6137 // write down this decision and use it during vectorization.
6138 unsigned Cost;
6139 InstWidening Decision;
6140 if (InterleaveCost <= GatherScatterCost &&
6141 InterleaveCost < ScalarizationCost) {
6142 Decision = CM_Interleave;
6143 Cost = InterleaveCost;
6144 } else if (GatherScatterCost < ScalarizationCost) {
6145 Decision = CM_GatherScatter;
6146 Cost = GatherScatterCost;
6147 } else {
6148 Decision = CM_Scalarize;
6149 Cost = ScalarizationCost;
6150 }
6151 // If the instructions belongs to an interleave group, the whole group
6152 // receives the same decision. The whole group receives the cost, but
6153 // the cost will actually be assigned to one instruction.
6154 if (auto Group = getInterleavedAccessGroup(&I))
6155 setWideningDecision(Group, VF, Decision, Cost);
6156 else
6157 setWideningDecision(&I, VF, Decision, Cost);
6158 }
6159 }
6160
6161 // Make sure that any load of address and any other address computation
6162 // remains scalar unless there is gather/scatter support. This avoids
6163 // inevitable extracts into address registers, and also has the benefit of
6164 // activating LSR more, since that pass can't optimize vectorized
6165 // addresses.
6166 if (TTI.prefersVectorizedAddressing())
6167 return;
6168
6169 // Start with all scalar pointer uses.
6170 SmallPtrSet<Instruction *, 8> AddrDefs;
6171 for (BasicBlock *BB : TheLoop->blocks())
6172 for (Instruction &I : *BB) {
6173 Instruction *PtrDef =
6174 dyn_cast_or_null<Instruction>(getLoadStorePointerOperand(&I));
6175 if (PtrDef && TheLoop->contains(PtrDef) &&
6176 getWideningDecision(&I, VF) != CM_GatherScatter)
6177 AddrDefs.insert(PtrDef);
6178 }
6179
6180 // Add all instructions used to generate the addresses.
6181 SmallVector<Instruction *, 4> Worklist;
6182 for (auto *I : AddrDefs)
6183 Worklist.push_back(I);
6184 while (!Worklist.empty()) {
6185 Instruction *I = Worklist.pop_back_val();
6186 for (auto &Op : I->operands())
6187 if (auto *InstOp = dyn_cast<Instruction>(Op))
6188 if ((InstOp->getParent() == I->getParent()) && !isa<PHINode>(InstOp) &&
6189 AddrDefs.insert(InstOp).second)
6190 Worklist.push_back(InstOp);
6191 }
6192
6193 for (auto *I : AddrDefs) {
6194 if (isa<LoadInst>(I)) {
6195 // Setting the desired widening decision should ideally be handled in
6196 // by cost functions, but since this involves the task of finding out
6197 // if the loaded register is involved in an address computation, it is
6198 // instead changed here when we know this is the case.
6199 InstWidening Decision = getWideningDecision(I, VF);
6200 if (Decision == CM_Widen || Decision == CM_Widen_Reverse)
6201 // Scalarize a widened load of address.
6202 setWideningDecision(I, VF, CM_Scalarize,
6203 (VF * getMemoryInstructionCost(I, 1)));
6204 else if (auto Group = getInterleavedAccessGroup(I)) {
6205 // Scalarize an interleave group of address loads.
6206 for (unsigned I = 0; I < Group->getFactor(); ++I) {
6207 if (Instruction *Member = Group->getMember(I))
6208 setWideningDecision(Member, VF, CM_Scalarize,
6209 (VF * getMemoryInstructionCost(Member, 1)));
6210 }
6211 }
6212 } else
6213 // Make sure I gets scalarized and a cost estimate without
6214 // scalarization overhead.
6215 ForcedScalars[VF].insert(I);
6216 }
6217 }
6218
getInstructionCost(Instruction * I,unsigned VF,Type * & VectorTy)6219 unsigned LoopVectorizationCostModel::getInstructionCost(Instruction *I,
6220 unsigned VF,
6221 Type *&VectorTy) {
6222 Type *RetTy = I->getType();
6223 if (canTruncateToMinimalBitwidth(I, VF))
6224 RetTy = IntegerType::get(RetTy->getContext(), MinBWs[I]);
6225 VectorTy = isScalarAfterVectorization(I, VF) ? RetTy : ToVectorTy(RetTy, VF);
6226 auto SE = PSE.getSE();
6227 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
6228
6229 // TODO: We need to estimate the cost of intrinsic calls.
6230 switch (I->getOpcode()) {
6231 case Instruction::GetElementPtr:
6232 // We mark this instruction as zero-cost because the cost of GEPs in
6233 // vectorized code depends on whether the corresponding memory instruction
6234 // is scalarized or not. Therefore, we handle GEPs with the memory
6235 // instruction cost.
6236 return 0;
6237 case Instruction::Br: {
6238 // In cases of scalarized and predicated instructions, there will be VF
6239 // predicated blocks in the vectorized loop. Each branch around these
6240 // blocks requires also an extract of its vector compare i1 element.
6241 bool ScalarPredicatedBB = false;
6242 BranchInst *BI = cast<BranchInst>(I);
6243 if (VF > 1 && BI->isConditional() &&
6244 (PredicatedBBsAfterVectorization.count(BI->getSuccessor(0)) ||
6245 PredicatedBBsAfterVectorization.count(BI->getSuccessor(1))))
6246 ScalarPredicatedBB = true;
6247
6248 if (ScalarPredicatedBB) {
6249 // Return cost for branches around scalarized and predicated blocks.
6250 auto *Vec_i1Ty =
6251 FixedVectorType::get(IntegerType::getInt1Ty(RetTy->getContext()), VF);
6252 return (TTI.getScalarizationOverhead(Vec_i1Ty, APInt::getAllOnesValue(VF),
6253 false, true) +
6254 (TTI.getCFInstrCost(Instruction::Br, CostKind) * VF));
6255 } else if (I->getParent() == TheLoop->getLoopLatch() || VF == 1)
6256 // The back-edge branch will remain, as will all scalar branches.
6257 return TTI.getCFInstrCost(Instruction::Br, CostKind);
6258 else
6259 // This branch will be eliminated by if-conversion.
6260 return 0;
6261 // Note: We currently assume zero cost for an unconditional branch inside
6262 // a predicated block since it will become a fall-through, although we
6263 // may decide in the future to call TTI for all branches.
6264 }
6265 case Instruction::PHI: {
6266 auto *Phi = cast<PHINode>(I);
6267
6268 // First-order recurrences are replaced by vector shuffles inside the loop.
6269 // NOTE: Don't use ToVectorTy as SK_ExtractSubvector expects a vector type.
6270 if (VF > 1 && Legal->isFirstOrderRecurrence(Phi))
6271 return TTI.getShuffleCost(TargetTransformInfo::SK_ExtractSubvector,
6272 cast<VectorType>(VectorTy), VF - 1,
6273 FixedVectorType::get(RetTy, 1));
6274
6275 // Phi nodes in non-header blocks (not inductions, reductions, etc.) are
6276 // converted into select instructions. We require N - 1 selects per phi
6277 // node, where N is the number of incoming values.
6278 if (VF > 1 && Phi->getParent() != TheLoop->getHeader())
6279 return (Phi->getNumIncomingValues() - 1) *
6280 TTI.getCmpSelInstrCost(
6281 Instruction::Select, ToVectorTy(Phi->getType(), VF),
6282 ToVectorTy(Type::getInt1Ty(Phi->getContext()), VF),
6283 CostKind);
6284
6285 return TTI.getCFInstrCost(Instruction::PHI, CostKind);
6286 }
6287 case Instruction::UDiv:
6288 case Instruction::SDiv:
6289 case Instruction::URem:
6290 case Instruction::SRem:
6291 // If we have a predicated instruction, it may not be executed for each
6292 // vector lane. Get the scalarization cost and scale this amount by the
6293 // probability of executing the predicated block. If the instruction is not
6294 // predicated, we fall through to the next case.
6295 if (VF > 1 && isScalarWithPredication(I)) {
6296 unsigned Cost = 0;
6297
6298 // These instructions have a non-void type, so account for the phi nodes
6299 // that we will create. This cost is likely to be zero. The phi node
6300 // cost, if any, should be scaled by the block probability because it
6301 // models a copy at the end of each predicated block.
6302 Cost += VF * TTI.getCFInstrCost(Instruction::PHI, CostKind);
6303
6304 // The cost of the non-predicated instruction.
6305 Cost += VF * TTI.getArithmeticInstrCost(I->getOpcode(), RetTy, CostKind);
6306
6307 // The cost of insertelement and extractelement instructions needed for
6308 // scalarization.
6309 Cost += getScalarizationOverhead(I, VF);
6310
6311 // Scale the cost by the probability of executing the predicated blocks.
6312 // This assumes the predicated block for each vector lane is equally
6313 // likely.
6314 return Cost / getReciprocalPredBlockProb();
6315 }
6316 LLVM_FALLTHROUGH;
6317 case Instruction::Add:
6318 case Instruction::FAdd:
6319 case Instruction::Sub:
6320 case Instruction::FSub:
6321 case Instruction::Mul:
6322 case Instruction::FMul:
6323 case Instruction::FDiv:
6324 case Instruction::FRem:
6325 case Instruction::Shl:
6326 case Instruction::LShr:
6327 case Instruction::AShr:
6328 case Instruction::And:
6329 case Instruction::Or:
6330 case Instruction::Xor: {
6331 // Since we will replace the stride by 1 the multiplication should go away.
6332 if (I->getOpcode() == Instruction::Mul && isStrideMul(I, Legal))
6333 return 0;
6334 // Certain instructions can be cheaper to vectorize if they have a constant
6335 // second vector operand. One example of this are shifts on x86.
6336 Value *Op2 = I->getOperand(1);
6337 TargetTransformInfo::OperandValueProperties Op2VP;
6338 TargetTransformInfo::OperandValueKind Op2VK =
6339 TTI.getOperandInfo(Op2, Op2VP);
6340 if (Op2VK == TargetTransformInfo::OK_AnyValue && Legal->isUniform(Op2))
6341 Op2VK = TargetTransformInfo::OK_UniformValue;
6342
6343 SmallVector<const Value *, 4> Operands(I->operand_values());
6344 unsigned N = isScalarAfterVectorization(I, VF) ? VF : 1;
6345 return N * TTI.getArithmeticInstrCost(
6346 I->getOpcode(), VectorTy, CostKind,
6347 TargetTransformInfo::OK_AnyValue,
6348 Op2VK, TargetTransformInfo::OP_None, Op2VP, Operands, I);
6349 }
6350 case Instruction::FNeg: {
6351 unsigned N = isScalarAfterVectorization(I, VF) ? VF : 1;
6352 return N * TTI.getArithmeticInstrCost(
6353 I->getOpcode(), VectorTy, CostKind,
6354 TargetTransformInfo::OK_AnyValue,
6355 TargetTransformInfo::OK_AnyValue,
6356 TargetTransformInfo::OP_None, TargetTransformInfo::OP_None,
6357 I->getOperand(0), I);
6358 }
6359 case Instruction::Select: {
6360 SelectInst *SI = cast<SelectInst>(I);
6361 const SCEV *CondSCEV = SE->getSCEV(SI->getCondition());
6362 bool ScalarCond = (SE->isLoopInvariant(CondSCEV, TheLoop));
6363 Type *CondTy = SI->getCondition()->getType();
6364 if (!ScalarCond)
6365 CondTy = FixedVectorType::get(CondTy, VF);
6366
6367 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, CondTy,
6368 CostKind, I);
6369 }
6370 case Instruction::ICmp:
6371 case Instruction::FCmp: {
6372 Type *ValTy = I->getOperand(0)->getType();
6373 Instruction *Op0AsInstruction = dyn_cast<Instruction>(I->getOperand(0));
6374 if (canTruncateToMinimalBitwidth(Op0AsInstruction, VF))
6375 ValTy = IntegerType::get(ValTy->getContext(), MinBWs[Op0AsInstruction]);
6376 VectorTy = ToVectorTy(ValTy, VF);
6377 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, nullptr, CostKind,
6378 I);
6379 }
6380 case Instruction::Store:
6381 case Instruction::Load: {
6382 unsigned Width = VF;
6383 if (Width > 1) {
6384 InstWidening Decision = getWideningDecision(I, Width);
6385 assert(Decision != CM_Unknown &&
6386 "CM decision should be taken at this point");
6387 if (Decision == CM_Scalarize)
6388 Width = 1;
6389 }
6390 VectorTy = ToVectorTy(getMemInstValueType(I), Width);
6391 return getMemoryInstructionCost(I, VF);
6392 }
6393 case Instruction::ZExt:
6394 case Instruction::SExt:
6395 case Instruction::FPToUI:
6396 case Instruction::FPToSI:
6397 case Instruction::FPExt:
6398 case Instruction::PtrToInt:
6399 case Instruction::IntToPtr:
6400 case Instruction::SIToFP:
6401 case Instruction::UIToFP:
6402 case Instruction::Trunc:
6403 case Instruction::FPTrunc:
6404 case Instruction::BitCast: {
6405 // We optimize the truncation of induction variables having constant
6406 // integer steps. The cost of these truncations is the same as the scalar
6407 // operation.
6408 if (isOptimizableIVTruncate(I, VF)) {
6409 auto *Trunc = cast<TruncInst>(I);
6410 return TTI.getCastInstrCost(Instruction::Trunc, Trunc->getDestTy(),
6411 Trunc->getSrcTy(), CostKind, Trunc);
6412 }
6413
6414 Type *SrcScalarTy = I->getOperand(0)->getType();
6415 Type *SrcVecTy =
6416 VectorTy->isVectorTy() ? ToVectorTy(SrcScalarTy, VF) : SrcScalarTy;
6417 if (canTruncateToMinimalBitwidth(I, VF)) {
6418 // This cast is going to be shrunk. This may remove the cast or it might
6419 // turn it into slightly different cast. For example, if MinBW == 16,
6420 // "zext i8 %1 to i32" becomes "zext i8 %1 to i16".
6421 //
6422 // Calculate the modified src and dest types.
6423 Type *MinVecTy = VectorTy;
6424 if (I->getOpcode() == Instruction::Trunc) {
6425 SrcVecTy = smallestIntegerVectorType(SrcVecTy, MinVecTy);
6426 VectorTy =
6427 largestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy);
6428 } else if (I->getOpcode() == Instruction::ZExt ||
6429 I->getOpcode() == Instruction::SExt) {
6430 SrcVecTy = largestIntegerVectorType(SrcVecTy, MinVecTy);
6431 VectorTy =
6432 smallestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy);
6433 }
6434 }
6435
6436 unsigned N = isScalarAfterVectorization(I, VF) ? VF : 1;
6437 return N * TTI.getCastInstrCost(I->getOpcode(), VectorTy, SrcVecTy,
6438 CostKind, I);
6439 }
6440 case Instruction::Call: {
6441 bool NeedToScalarize;
6442 CallInst *CI = cast<CallInst>(I);
6443 unsigned CallCost = getVectorCallCost(CI, VF, NeedToScalarize);
6444 if (getVectorIntrinsicIDForCall(CI, TLI))
6445 return std::min(CallCost, getVectorIntrinsicCost(CI, VF));
6446 return CallCost;
6447 }
6448 default:
6449 // The cost of executing VF copies of the scalar instruction. This opcode
6450 // is unknown. Assume that it is the same as 'mul'.
6451 return VF * TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy,
6452 CostKind) +
6453 getScalarizationOverhead(I, VF);
6454 } // end of switch.
6455 }
6456
6457 char LoopVectorize::ID = 0;
6458
6459 static const char lv_name[] = "Loop Vectorization";
6460
6461 INITIALIZE_PASS_BEGIN(LoopVectorize, LV_NAME, lv_name, false, false)
6462 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
6463 INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass)
6464 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
6465 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)
6466 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
6467 INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass)
6468 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
6469 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
6470 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
6471 INITIALIZE_PASS_DEPENDENCY(LoopAccessLegacyAnalysis)
6472 INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass)
6473 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass)
6474 INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass)
6475 INITIALIZE_PASS_DEPENDENCY(InjectTLIMappingsLegacy)
6476 INITIALIZE_PASS_END(LoopVectorize, LV_NAME, lv_name, false, false)
6477
6478 namespace llvm {
6479
createLoopVectorizePass()6480 Pass *createLoopVectorizePass() { return new LoopVectorize(); }
6481
createLoopVectorizePass(bool InterleaveOnlyWhenForced,bool VectorizeOnlyWhenForced)6482 Pass *createLoopVectorizePass(bool InterleaveOnlyWhenForced,
6483 bool VectorizeOnlyWhenForced) {
6484 return new LoopVectorize(InterleaveOnlyWhenForced, VectorizeOnlyWhenForced);
6485 }
6486
6487 } // end namespace llvm
6488
isConsecutiveLoadOrStore(Instruction * Inst)6489 bool LoopVectorizationCostModel::isConsecutiveLoadOrStore(Instruction *Inst) {
6490 // Check if the pointer operand of a load or store instruction is
6491 // consecutive.
6492 if (auto *Ptr = getLoadStorePointerOperand(Inst))
6493 return Legal->isConsecutivePtr(Ptr);
6494 return false;
6495 }
6496
collectValuesToIgnore()6497 void LoopVectorizationCostModel::collectValuesToIgnore() {
6498 // Ignore ephemeral values.
6499 CodeMetrics::collectEphemeralValues(TheLoop, AC, ValuesToIgnore);
6500
6501 // Ignore type-promoting instructions we identified during reduction
6502 // detection.
6503 for (auto &Reduction : Legal->getReductionVars()) {
6504 RecurrenceDescriptor &RedDes = Reduction.second;
6505 SmallPtrSetImpl<Instruction *> &Casts = RedDes.getCastInsts();
6506 VecValuesToIgnore.insert(Casts.begin(), Casts.end());
6507 }
6508 // Ignore type-casting instructions we identified during induction
6509 // detection.
6510 for (auto &Induction : Legal->getInductionVars()) {
6511 InductionDescriptor &IndDes = Induction.second;
6512 const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts();
6513 VecValuesToIgnore.insert(Casts.begin(), Casts.end());
6514 }
6515 }
6516
6517 // TODO: we could return a pair of values that specify the max VF and
6518 // min VF, to be used in `buildVPlans(MinVF, MaxVF)` instead of
6519 // `buildVPlans(VF, VF)`. We cannot do it because VPLAN at the moment
6520 // doesn't have a cost model that can choose which plan to execute if
6521 // more than one is generated.
determineVPlanVF(const unsigned WidestVectorRegBits,LoopVectorizationCostModel & CM)6522 static unsigned determineVPlanVF(const unsigned WidestVectorRegBits,
6523 LoopVectorizationCostModel &CM) {
6524 unsigned WidestType;
6525 std::tie(std::ignore, WidestType) = CM.getSmallestAndWidestTypes();
6526 return WidestVectorRegBits / WidestType;
6527 }
6528
6529 VectorizationFactor
planInVPlanNativePath(unsigned UserVF)6530 LoopVectorizationPlanner::planInVPlanNativePath(unsigned UserVF) {
6531 unsigned VF = UserVF;
6532 // Outer loop handling: They may require CFG and instruction level
6533 // transformations before even evaluating whether vectorization is profitable.
6534 // Since we cannot modify the incoming IR, we need to build VPlan upfront in
6535 // the vectorization pipeline.
6536 if (!OrigLoop->empty()) {
6537 // If the user doesn't provide a vectorization factor, determine a
6538 // reasonable one.
6539 if (!UserVF) {
6540 VF = determineVPlanVF(TTI->getRegisterBitWidth(true /* Vector*/), CM);
6541 LLVM_DEBUG(dbgs() << "LV: VPlan computed VF " << VF << ".\n");
6542
6543 // Make sure we have a VF > 1 for stress testing.
6544 if (VPlanBuildStressTest && VF < 2) {
6545 LLVM_DEBUG(dbgs() << "LV: VPlan stress testing: "
6546 << "overriding computed VF.\n");
6547 VF = 4;
6548 }
6549 }
6550 assert(EnableVPlanNativePath && "VPlan-native path is not enabled.");
6551 assert(isPowerOf2_32(VF) && "VF needs to be a power of two");
6552 LLVM_DEBUG(dbgs() << "LV: Using " << (UserVF ? "user " : "") << "VF " << VF
6553 << " to build VPlans.\n");
6554 buildVPlans(VF, VF);
6555
6556 // For VPlan build stress testing, we bail out after VPlan construction.
6557 if (VPlanBuildStressTest)
6558 return VectorizationFactor::Disabled();
6559
6560 return {VF, 0};
6561 }
6562
6563 LLVM_DEBUG(
6564 dbgs() << "LV: Not vectorizing. Inner loops aren't supported in the "
6565 "VPlan-native path.\n");
6566 return VectorizationFactor::Disabled();
6567 }
6568
plan(unsigned UserVF,unsigned UserIC)6569 Optional<VectorizationFactor> LoopVectorizationPlanner::plan(unsigned UserVF,
6570 unsigned UserIC) {
6571 assert(OrigLoop->empty() && "Inner loop expected.");
6572 Optional<unsigned> MaybeMaxVF = CM.computeMaxVF(UserVF, UserIC);
6573 if (!MaybeMaxVF) // Cases that should not to be vectorized nor interleaved.
6574 return None;
6575
6576 // Invalidate interleave groups if all blocks of loop will be predicated.
6577 if (CM.blockNeedsPredication(OrigLoop->getHeader()) &&
6578 !useMaskedInterleavedAccesses(*TTI)) {
6579 LLVM_DEBUG(
6580 dbgs()
6581 << "LV: Invalidate all interleaved groups due to fold-tail by masking "
6582 "which requires masked-interleaved support.\n");
6583 if (CM.InterleaveInfo.invalidateGroups())
6584 // Invalidating interleave groups also requires invalidating all decisions
6585 // based on them, which includes widening decisions and uniform and scalar
6586 // values.
6587 CM.invalidateCostModelingDecisions();
6588 }
6589
6590 if (UserVF) {
6591 LLVM_DEBUG(dbgs() << "LV: Using user VF " << UserVF << ".\n");
6592 assert(isPowerOf2_32(UserVF) && "VF needs to be a power of two");
6593 // Collect the instructions (and their associated costs) that will be more
6594 // profitable to scalarize.
6595 CM.selectUserVectorizationFactor(UserVF);
6596 buildVPlansWithVPRecipes(UserVF, UserVF);
6597 LLVM_DEBUG(printPlans(dbgs()));
6598 return {{UserVF, 0}};
6599 }
6600
6601 unsigned MaxVF = MaybeMaxVF.getValue();
6602 assert(MaxVF != 0 && "MaxVF is zero.");
6603
6604 for (unsigned VF = 1; VF <= MaxVF; VF *= 2) {
6605 // Collect Uniform and Scalar instructions after vectorization with VF.
6606 CM.collectUniformsAndScalars(VF);
6607
6608 // Collect the instructions (and their associated costs) that will be more
6609 // profitable to scalarize.
6610 if (VF > 1)
6611 CM.collectInstsToScalarize(VF);
6612 }
6613
6614 buildVPlansWithVPRecipes(1, MaxVF);
6615 LLVM_DEBUG(printPlans(dbgs()));
6616 if (MaxVF == 1)
6617 return VectorizationFactor::Disabled();
6618
6619 // Select the optimal vectorization factor.
6620 return CM.selectVectorizationFactor(MaxVF);
6621 }
6622
setBestPlan(unsigned VF,unsigned UF)6623 void LoopVectorizationPlanner::setBestPlan(unsigned VF, unsigned UF) {
6624 LLVM_DEBUG(dbgs() << "Setting best plan to VF=" << VF << ", UF=" << UF
6625 << '\n');
6626 BestVF = VF;
6627 BestUF = UF;
6628
6629 erase_if(VPlans, [VF](const VPlanPtr &Plan) {
6630 return !Plan->hasVF(VF);
6631 });
6632 assert(VPlans.size() == 1 && "Best VF has not a single VPlan.");
6633 }
6634
executePlan(InnerLoopVectorizer & ILV,DominatorTree * DT)6635 void LoopVectorizationPlanner::executePlan(InnerLoopVectorizer &ILV,
6636 DominatorTree *DT) {
6637 // Perform the actual loop transformation.
6638
6639 // 1. Create a new empty loop. Unlink the old loop and connect the new one.
6640 VPCallbackILV CallbackILV(ILV);
6641
6642 VPTransformState State{BestVF, BestUF, LI,
6643 DT, ILV.Builder, ILV.VectorLoopValueMap,
6644 &ILV, CallbackILV};
6645 State.CFG.PrevBB = ILV.createVectorizedLoopSkeleton();
6646 State.TripCount = ILV.getOrCreateTripCount(nullptr);
6647 State.CanonicalIV = ILV.Induction;
6648
6649 //===------------------------------------------------===//
6650 //
6651 // Notice: any optimization or new instruction that go
6652 // into the code below should also be implemented in
6653 // the cost-model.
6654 //
6655 //===------------------------------------------------===//
6656
6657 // 2. Copy and widen instructions from the old loop into the new loop.
6658 assert(VPlans.size() == 1 && "Not a single VPlan to execute.");
6659 VPlans.front()->execute(&State);
6660
6661 // 3. Fix the vectorized code: take care of header phi's, live-outs,
6662 // predication, updating analyses.
6663 ILV.fixVectorizedLoop();
6664 }
6665
collectTriviallyDeadInstructions(SmallPtrSetImpl<Instruction * > & DeadInstructions)6666 void LoopVectorizationPlanner::collectTriviallyDeadInstructions(
6667 SmallPtrSetImpl<Instruction *> &DeadInstructions) {
6668 BasicBlock *Latch = OrigLoop->getLoopLatch();
6669
6670 // We create new control-flow for the vectorized loop, so the original
6671 // condition will be dead after vectorization if it's only used by the
6672 // branch.
6673 auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0));
6674 if (Cmp && Cmp->hasOneUse())
6675 DeadInstructions.insert(Cmp);
6676
6677 // We create new "steps" for induction variable updates to which the original
6678 // induction variables map. An original update instruction will be dead if
6679 // all its users except the induction variable are dead.
6680 for (auto &Induction : Legal->getInductionVars()) {
6681 PHINode *Ind = Induction.first;
6682 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
6683 if (llvm::all_of(IndUpdate->users(), [&](User *U) -> bool {
6684 return U == Ind || DeadInstructions.count(cast<Instruction>(U));
6685 }))
6686 DeadInstructions.insert(IndUpdate);
6687
6688 // We record as "Dead" also the type-casting instructions we had identified
6689 // during induction analysis. We don't need any handling for them in the
6690 // vectorized loop because we have proven that, under a proper runtime
6691 // test guarding the vectorized loop, the value of the phi, and the casted
6692 // value of the phi, are the same. The last instruction in this casting chain
6693 // will get its scalar/vector/widened def from the scalar/vector/widened def
6694 // of the respective phi node. Any other casts in the induction def-use chain
6695 // have no other uses outside the phi update chain, and will be ignored.
6696 InductionDescriptor &IndDes = Induction.second;
6697 const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts();
6698 DeadInstructions.insert(Casts.begin(), Casts.end());
6699 }
6700 }
6701
reverseVector(Value * Vec)6702 Value *InnerLoopUnroller::reverseVector(Value *Vec) { return Vec; }
6703
getBroadcastInstrs(Value * V)6704 Value *InnerLoopUnroller::getBroadcastInstrs(Value *V) { return V; }
6705
getStepVector(Value * Val,int StartIdx,Value * Step,Instruction::BinaryOps BinOp)6706 Value *InnerLoopUnroller::getStepVector(Value *Val, int StartIdx, Value *Step,
6707 Instruction::BinaryOps BinOp) {
6708 // When unrolling and the VF is 1, we only need to add a simple scalar.
6709 Type *Ty = Val->getType();
6710 assert(!Ty->isVectorTy() && "Val must be a scalar");
6711
6712 if (Ty->isFloatingPointTy()) {
6713 Constant *C = ConstantFP::get(Ty, (double)StartIdx);
6714
6715 // Floating point operations had to be 'fast' to enable the unrolling.
6716 Value *MulOp = addFastMathFlag(Builder.CreateFMul(C, Step));
6717 return addFastMathFlag(Builder.CreateBinOp(BinOp, Val, MulOp));
6718 }
6719 Constant *C = ConstantInt::get(Ty, StartIdx);
6720 return Builder.CreateAdd(Val, Builder.CreateMul(C, Step), "induction");
6721 }
6722
AddRuntimeUnrollDisableMetaData(Loop * L)6723 static void AddRuntimeUnrollDisableMetaData(Loop *L) {
6724 SmallVector<Metadata *, 4> MDs;
6725 // Reserve first location for self reference to the LoopID metadata node.
6726 MDs.push_back(nullptr);
6727 bool IsUnrollMetadata = false;
6728 MDNode *LoopID = L->getLoopID();
6729 if (LoopID) {
6730 // First find existing loop unrolling disable metadata.
6731 for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) {
6732 auto *MD = dyn_cast<MDNode>(LoopID->getOperand(i));
6733 if (MD) {
6734 const auto *S = dyn_cast<MDString>(MD->getOperand(0));
6735 IsUnrollMetadata =
6736 S && S->getString().startswith("llvm.loop.unroll.disable");
6737 }
6738 MDs.push_back(LoopID->getOperand(i));
6739 }
6740 }
6741
6742 if (!IsUnrollMetadata) {
6743 // Add runtime unroll disable metadata.
6744 LLVMContext &Context = L->getHeader()->getContext();
6745 SmallVector<Metadata *, 1> DisableOperands;
6746 DisableOperands.push_back(
6747 MDString::get(Context, "llvm.loop.unroll.runtime.disable"));
6748 MDNode *DisableNode = MDNode::get(Context, DisableOperands);
6749 MDs.push_back(DisableNode);
6750 MDNode *NewLoopID = MDNode::get(Context, MDs);
6751 // Set operand 0 to refer to the loop id itself.
6752 NewLoopID->replaceOperandWith(0, NewLoopID);
6753 L->setLoopID(NewLoopID);
6754 }
6755 }
6756
getDecisionAndClampRange(const std::function<bool (unsigned)> & Predicate,VFRange & Range)6757 bool LoopVectorizationPlanner::getDecisionAndClampRange(
6758 const std::function<bool(unsigned)> &Predicate, VFRange &Range) {
6759 assert(Range.End > Range.Start && "Trying to test an empty VF range.");
6760 bool PredicateAtRangeStart = Predicate(Range.Start);
6761
6762 for (unsigned TmpVF = Range.Start * 2; TmpVF < Range.End; TmpVF *= 2)
6763 if (Predicate(TmpVF) != PredicateAtRangeStart) {
6764 Range.End = TmpVF;
6765 break;
6766 }
6767
6768 return PredicateAtRangeStart;
6769 }
6770
6771 /// Build VPlans for the full range of feasible VF's = {\p MinVF, 2 * \p MinVF,
6772 /// 4 * \p MinVF, ..., \p MaxVF} by repeatedly building a VPlan for a sub-range
6773 /// of VF's starting at a given VF and extending it as much as possible. Each
6774 /// vectorization decision can potentially shorten this sub-range during
6775 /// buildVPlan().
buildVPlans(unsigned MinVF,unsigned MaxVF)6776 void LoopVectorizationPlanner::buildVPlans(unsigned MinVF, unsigned MaxVF) {
6777 for (unsigned VF = MinVF; VF < MaxVF + 1;) {
6778 VFRange SubRange = {VF, MaxVF + 1};
6779 VPlans.push_back(buildVPlan(SubRange));
6780 VF = SubRange.End;
6781 }
6782 }
6783
createEdgeMask(BasicBlock * Src,BasicBlock * Dst,VPlanPtr & Plan)6784 VPValue *VPRecipeBuilder::createEdgeMask(BasicBlock *Src, BasicBlock *Dst,
6785 VPlanPtr &Plan) {
6786 assert(is_contained(predecessors(Dst), Src) && "Invalid edge");
6787
6788 // Look for cached value.
6789 std::pair<BasicBlock *, BasicBlock *> Edge(Src, Dst);
6790 EdgeMaskCacheTy::iterator ECEntryIt = EdgeMaskCache.find(Edge);
6791 if (ECEntryIt != EdgeMaskCache.end())
6792 return ECEntryIt->second;
6793
6794 VPValue *SrcMask = createBlockInMask(Src, Plan);
6795
6796 // The terminator has to be a branch inst!
6797 BranchInst *BI = dyn_cast<BranchInst>(Src->getTerminator());
6798 assert(BI && "Unexpected terminator found");
6799
6800 if (!BI->isConditional() || BI->getSuccessor(0) == BI->getSuccessor(1))
6801 return EdgeMaskCache[Edge] = SrcMask;
6802
6803 VPValue *EdgeMask = Plan->getVPValue(BI->getCondition());
6804 assert(EdgeMask && "No Edge Mask found for condition");
6805
6806 if (BI->getSuccessor(0) != Dst)
6807 EdgeMask = Builder.createNot(EdgeMask);
6808
6809 if (SrcMask) // Otherwise block in-mask is all-one, no need to AND.
6810 EdgeMask = Builder.createAnd(EdgeMask, SrcMask);
6811
6812 return EdgeMaskCache[Edge] = EdgeMask;
6813 }
6814
createBlockInMask(BasicBlock * BB,VPlanPtr & Plan)6815 VPValue *VPRecipeBuilder::createBlockInMask(BasicBlock *BB, VPlanPtr &Plan) {
6816 assert(OrigLoop->contains(BB) && "Block is not a part of a loop");
6817
6818 // Look for cached value.
6819 BlockMaskCacheTy::iterator BCEntryIt = BlockMaskCache.find(BB);
6820 if (BCEntryIt != BlockMaskCache.end())
6821 return BCEntryIt->second;
6822
6823 // All-one mask is modelled as no-mask following the convention for masked
6824 // load/store/gather/scatter. Initialize BlockMask to no-mask.
6825 VPValue *BlockMask = nullptr;
6826
6827 if (OrigLoop->getHeader() == BB) {
6828 if (!CM.blockNeedsPredication(BB))
6829 return BlockMaskCache[BB] = BlockMask; // Loop incoming mask is all-one.
6830
6831 // Introduce the early-exit compare IV <= BTC to form header block mask.
6832 // This is used instead of IV < TC because TC may wrap, unlike BTC.
6833 // Start by constructing the desired canonical IV.
6834 VPValue *IV = nullptr;
6835 if (Legal->getPrimaryInduction())
6836 IV = Plan->getVPValue(Legal->getPrimaryInduction());
6837 else {
6838 auto IVRecipe = new VPWidenCanonicalIVRecipe();
6839 Builder.getInsertBlock()->appendRecipe(IVRecipe);
6840 IV = IVRecipe->getVPValue();
6841 }
6842 VPValue *BTC = Plan->getOrCreateBackedgeTakenCount();
6843 bool TailFolded = !CM.isScalarEpilogueAllowed();
6844 if (TailFolded && CM.TTI.emitGetActiveLaneMask())
6845 BlockMask = Builder.createNaryOp(VPInstruction::ActiveLaneMask, {IV, BTC});
6846 else
6847 BlockMask = Builder.createNaryOp(VPInstruction::ICmpULE, {IV, BTC});
6848 return BlockMaskCache[BB] = BlockMask;
6849 }
6850
6851 // This is the block mask. We OR all incoming edges.
6852 for (auto *Predecessor : predecessors(BB)) {
6853 VPValue *EdgeMask = createEdgeMask(Predecessor, BB, Plan);
6854 if (!EdgeMask) // Mask of predecessor is all-one so mask of block is too.
6855 return BlockMaskCache[BB] = EdgeMask;
6856
6857 if (!BlockMask) { // BlockMask has its initialized nullptr value.
6858 BlockMask = EdgeMask;
6859 continue;
6860 }
6861
6862 BlockMask = Builder.createOr(BlockMask, EdgeMask);
6863 }
6864
6865 return BlockMaskCache[BB] = BlockMask;
6866 }
6867
6868 VPWidenMemoryInstructionRecipe *
tryToWidenMemory(Instruction * I,VFRange & Range,VPlanPtr & Plan)6869 VPRecipeBuilder::tryToWidenMemory(Instruction *I, VFRange &Range,
6870 VPlanPtr &Plan) {
6871 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
6872 "Must be called with either a load or store");
6873
6874 auto willWiden = [&](unsigned VF) -> bool {
6875 if (VF == 1)
6876 return false;
6877 LoopVectorizationCostModel::InstWidening Decision =
6878 CM.getWideningDecision(I, VF);
6879 assert(Decision != LoopVectorizationCostModel::CM_Unknown &&
6880 "CM decision should be taken at this point.");
6881 if (Decision == LoopVectorizationCostModel::CM_Interleave)
6882 return true;
6883 if (CM.isScalarAfterVectorization(I, VF) ||
6884 CM.isProfitableToScalarize(I, VF))
6885 return false;
6886 return Decision != LoopVectorizationCostModel::CM_Scalarize;
6887 };
6888
6889 if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range))
6890 return nullptr;
6891
6892 VPValue *Mask = nullptr;
6893 if (Legal->isMaskRequired(I))
6894 Mask = createBlockInMask(I->getParent(), Plan);
6895
6896 VPValue *Addr = Plan->getOrAddVPValue(getLoadStorePointerOperand(I));
6897 if (LoadInst *Load = dyn_cast<LoadInst>(I))
6898 return new VPWidenMemoryInstructionRecipe(*Load, Addr, Mask);
6899
6900 StoreInst *Store = cast<StoreInst>(I);
6901 VPValue *StoredValue = Plan->getOrAddVPValue(Store->getValueOperand());
6902 return new VPWidenMemoryInstructionRecipe(*Store, Addr, StoredValue, Mask);
6903 }
6904
6905 VPWidenIntOrFpInductionRecipe *
tryToOptimizeInductionPHI(PHINode * Phi) const6906 VPRecipeBuilder::tryToOptimizeInductionPHI(PHINode *Phi) const {
6907 // Check if this is an integer or fp induction. If so, build the recipe that
6908 // produces its scalar and vector values.
6909 InductionDescriptor II = Legal->getInductionVars().lookup(Phi);
6910 if (II.getKind() == InductionDescriptor::IK_IntInduction ||
6911 II.getKind() == InductionDescriptor::IK_FpInduction)
6912 return new VPWidenIntOrFpInductionRecipe(Phi);
6913
6914 return nullptr;
6915 }
6916
6917 VPWidenIntOrFpInductionRecipe *
tryToOptimizeInductionTruncate(TruncInst * I,VFRange & Range) const6918 VPRecipeBuilder::tryToOptimizeInductionTruncate(TruncInst *I,
6919 VFRange &Range) const {
6920 // Optimize the special case where the source is a constant integer
6921 // induction variable. Notice that we can only optimize the 'trunc' case
6922 // because (a) FP conversions lose precision, (b) sext/zext may wrap, and
6923 // (c) other casts depend on pointer size.
6924
6925 // Determine whether \p K is a truncation based on an induction variable that
6926 // can be optimized.
6927 auto isOptimizableIVTruncate =
6928 [&](Instruction *K) -> std::function<bool(unsigned)> {
6929 return
6930 [=](unsigned VF) -> bool { return CM.isOptimizableIVTruncate(K, VF); };
6931 };
6932
6933 if (LoopVectorizationPlanner::getDecisionAndClampRange(
6934 isOptimizableIVTruncate(I), Range))
6935 return new VPWidenIntOrFpInductionRecipe(cast<PHINode>(I->getOperand(0)),
6936 I);
6937 return nullptr;
6938 }
6939
tryToBlend(PHINode * Phi,VPlanPtr & Plan)6940 VPBlendRecipe *VPRecipeBuilder::tryToBlend(PHINode *Phi, VPlanPtr &Plan) {
6941 // We know that all PHIs in non-header blocks are converted into selects, so
6942 // we don't have to worry about the insertion order and we can just use the
6943 // builder. At this point we generate the predication tree. There may be
6944 // duplications since this is a simple recursive scan, but future
6945 // optimizations will clean it up.
6946
6947 SmallVector<VPValue *, 2> Operands;
6948 unsigned NumIncoming = Phi->getNumIncomingValues();
6949 for (unsigned In = 0; In < NumIncoming; In++) {
6950 VPValue *EdgeMask =
6951 createEdgeMask(Phi->getIncomingBlock(In), Phi->getParent(), Plan);
6952 assert((EdgeMask || NumIncoming == 1) &&
6953 "Multiple predecessors with one having a full mask");
6954 Operands.push_back(Plan->getOrAddVPValue(Phi->getIncomingValue(In)));
6955 if (EdgeMask)
6956 Operands.push_back(EdgeMask);
6957 }
6958 return new VPBlendRecipe(Phi, Operands);
6959 }
6960
tryToWidenCall(CallInst * CI,VFRange & Range,VPlan & Plan) const6961 VPWidenCallRecipe *VPRecipeBuilder::tryToWidenCall(CallInst *CI, VFRange &Range,
6962 VPlan &Plan) const {
6963
6964 bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange(
6965 [this, CI](unsigned VF) { return CM.isScalarWithPredication(CI, VF); },
6966 Range);
6967
6968 if (IsPredicated)
6969 return nullptr;
6970
6971 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
6972 if (ID && (ID == Intrinsic::assume || ID == Intrinsic::lifetime_end ||
6973 ID == Intrinsic::lifetime_start || ID == Intrinsic::sideeffect))
6974 return nullptr;
6975
6976 auto willWiden = [&](unsigned VF) -> bool {
6977 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
6978 // The following case may be scalarized depending on the VF.
6979 // The flag shows whether we use Intrinsic or a usual Call for vectorized
6980 // version of the instruction.
6981 // Is it beneficial to perform intrinsic call compared to lib call?
6982 bool NeedToScalarize = false;
6983 unsigned CallCost = CM.getVectorCallCost(CI, VF, NeedToScalarize);
6984 bool UseVectorIntrinsic =
6985 ID && CM.getVectorIntrinsicCost(CI, VF) <= CallCost;
6986 return UseVectorIntrinsic || !NeedToScalarize;
6987 };
6988
6989 if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range))
6990 return nullptr;
6991
6992 return new VPWidenCallRecipe(*CI, Plan.mapToVPValues(CI->arg_operands()));
6993 }
6994
shouldWiden(Instruction * I,VFRange & Range) const6995 bool VPRecipeBuilder::shouldWiden(Instruction *I, VFRange &Range) const {
6996 assert(!isa<BranchInst>(I) && !isa<PHINode>(I) && !isa<LoadInst>(I) &&
6997 !isa<StoreInst>(I) && "Instruction should have been handled earlier");
6998 // Instruction should be widened, unless it is scalar after vectorization,
6999 // scalarization is profitable or it is predicated.
7000 auto WillScalarize = [this, I](unsigned VF) -> bool {
7001 return CM.isScalarAfterVectorization(I, VF) ||
7002 CM.isProfitableToScalarize(I, VF) ||
7003 CM.isScalarWithPredication(I, VF);
7004 };
7005 return !LoopVectorizationPlanner::getDecisionAndClampRange(WillScalarize,
7006 Range);
7007 }
7008
tryToWiden(Instruction * I,VPlan & Plan) const7009 VPWidenRecipe *VPRecipeBuilder::tryToWiden(Instruction *I, VPlan &Plan) const {
7010 auto IsVectorizableOpcode = [](unsigned Opcode) {
7011 switch (Opcode) {
7012 case Instruction::Add:
7013 case Instruction::And:
7014 case Instruction::AShr:
7015 case Instruction::BitCast:
7016 case Instruction::FAdd:
7017 case Instruction::FCmp:
7018 case Instruction::FDiv:
7019 case Instruction::FMul:
7020 case Instruction::FNeg:
7021 case Instruction::FPExt:
7022 case Instruction::FPToSI:
7023 case Instruction::FPToUI:
7024 case Instruction::FPTrunc:
7025 case Instruction::FRem:
7026 case Instruction::FSub:
7027 case Instruction::ICmp:
7028 case Instruction::IntToPtr:
7029 case Instruction::LShr:
7030 case Instruction::Mul:
7031 case Instruction::Or:
7032 case Instruction::PtrToInt:
7033 case Instruction::SDiv:
7034 case Instruction::Select:
7035 case Instruction::SExt:
7036 case Instruction::Shl:
7037 case Instruction::SIToFP:
7038 case Instruction::SRem:
7039 case Instruction::Sub:
7040 case Instruction::Trunc:
7041 case Instruction::UDiv:
7042 case Instruction::UIToFP:
7043 case Instruction::URem:
7044 case Instruction::Xor:
7045 case Instruction::ZExt:
7046 return true;
7047 }
7048 return false;
7049 };
7050
7051 if (!IsVectorizableOpcode(I->getOpcode()))
7052 return nullptr;
7053
7054 // Success: widen this instruction.
7055 return new VPWidenRecipe(*I, Plan.mapToVPValues(I->operands()));
7056 }
7057
handleReplication(Instruction * I,VFRange & Range,VPBasicBlock * VPBB,DenseMap<Instruction *,VPReplicateRecipe * > & PredInst2Recipe,VPlanPtr & Plan)7058 VPBasicBlock *VPRecipeBuilder::handleReplication(
7059 Instruction *I, VFRange &Range, VPBasicBlock *VPBB,
7060 DenseMap<Instruction *, VPReplicateRecipe *> &PredInst2Recipe,
7061 VPlanPtr &Plan) {
7062 bool IsUniform = LoopVectorizationPlanner::getDecisionAndClampRange(
7063 [&](unsigned VF) { return CM.isUniformAfterVectorization(I, VF); },
7064 Range);
7065
7066 bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange(
7067 [&](unsigned VF) { return CM.isScalarWithPredication(I, VF); }, Range);
7068
7069 auto *Recipe = new VPReplicateRecipe(I, Plan->mapToVPValues(I->operands()),
7070 IsUniform, IsPredicated);
7071 setRecipe(I, Recipe);
7072
7073 // Find if I uses a predicated instruction. If so, it will use its scalar
7074 // value. Avoid hoisting the insert-element which packs the scalar value into
7075 // a vector value, as that happens iff all users use the vector value.
7076 for (auto &Op : I->operands())
7077 if (auto *PredInst = dyn_cast<Instruction>(Op))
7078 if (PredInst2Recipe.find(PredInst) != PredInst2Recipe.end())
7079 PredInst2Recipe[PredInst]->setAlsoPack(false);
7080
7081 // Finalize the recipe for Instr, first if it is not predicated.
7082 if (!IsPredicated) {
7083 LLVM_DEBUG(dbgs() << "LV: Scalarizing:" << *I << "\n");
7084 VPBB->appendRecipe(Recipe);
7085 return VPBB;
7086 }
7087 LLVM_DEBUG(dbgs() << "LV: Scalarizing and predicating:" << *I << "\n");
7088 assert(VPBB->getSuccessors().empty() &&
7089 "VPBB has successors when handling predicated replication.");
7090 // Record predicated instructions for above packing optimizations.
7091 PredInst2Recipe[I] = Recipe;
7092 VPBlockBase *Region = createReplicateRegion(I, Recipe, Plan);
7093 VPBlockUtils::insertBlockAfter(Region, VPBB);
7094 auto *RegSucc = new VPBasicBlock();
7095 VPBlockUtils::insertBlockAfter(RegSucc, Region);
7096 return RegSucc;
7097 }
7098
createReplicateRegion(Instruction * Instr,VPRecipeBase * PredRecipe,VPlanPtr & Plan)7099 VPRegionBlock *VPRecipeBuilder::createReplicateRegion(Instruction *Instr,
7100 VPRecipeBase *PredRecipe,
7101 VPlanPtr &Plan) {
7102 // Instructions marked for predication are replicated and placed under an
7103 // if-then construct to prevent side-effects.
7104
7105 // Generate recipes to compute the block mask for this region.
7106 VPValue *BlockInMask = createBlockInMask(Instr->getParent(), Plan);
7107
7108 // Build the triangular if-then region.
7109 std::string RegionName = (Twine("pred.") + Instr->getOpcodeName()).str();
7110 assert(Instr->getParent() && "Predicated instruction not in any basic block");
7111 auto *BOMRecipe = new VPBranchOnMaskRecipe(BlockInMask);
7112 auto *Entry = new VPBasicBlock(Twine(RegionName) + ".entry", BOMRecipe);
7113 auto *PHIRecipe =
7114 Instr->getType()->isVoidTy() ? nullptr : new VPPredInstPHIRecipe(Instr);
7115 auto *Exit = new VPBasicBlock(Twine(RegionName) + ".continue", PHIRecipe);
7116 auto *Pred = new VPBasicBlock(Twine(RegionName) + ".if", PredRecipe);
7117 VPRegionBlock *Region = new VPRegionBlock(Entry, Exit, RegionName, true);
7118
7119 // Note: first set Entry as region entry and then connect successors starting
7120 // from it in order, to propagate the "parent" of each VPBasicBlock.
7121 VPBlockUtils::insertTwoBlocksAfter(Pred, Exit, BlockInMask, Entry);
7122 VPBlockUtils::connectBlocks(Pred, Exit);
7123
7124 return Region;
7125 }
7126
tryToCreateWidenRecipe(Instruction * Instr,VFRange & Range,VPlanPtr & Plan)7127 VPRecipeBase *VPRecipeBuilder::tryToCreateWidenRecipe(Instruction *Instr,
7128 VFRange &Range,
7129 VPlanPtr &Plan) {
7130 // First, check for specific widening recipes that deal with calls, memory
7131 // operations, inductions and Phi nodes.
7132 if (auto *CI = dyn_cast<CallInst>(Instr))
7133 return tryToWidenCall(CI, Range, *Plan);
7134
7135 if (isa<LoadInst>(Instr) || isa<StoreInst>(Instr))
7136 return tryToWidenMemory(Instr, Range, Plan);
7137
7138 VPRecipeBase *Recipe;
7139 if (auto Phi = dyn_cast<PHINode>(Instr)) {
7140 if (Phi->getParent() != OrigLoop->getHeader())
7141 return tryToBlend(Phi, Plan);
7142 if ((Recipe = tryToOptimizeInductionPHI(Phi)))
7143 return Recipe;
7144 return new VPWidenPHIRecipe(Phi);
7145 }
7146
7147 if (isa<TruncInst>(Instr) &&
7148 (Recipe = tryToOptimizeInductionTruncate(cast<TruncInst>(Instr), Range)))
7149 return Recipe;
7150
7151 if (!shouldWiden(Instr, Range))
7152 return nullptr;
7153
7154 if (auto GEP = dyn_cast<GetElementPtrInst>(Instr))
7155 return new VPWidenGEPRecipe(GEP, Plan->mapToVPValues(GEP->operands()),
7156 OrigLoop);
7157
7158 if (auto *SI = dyn_cast<SelectInst>(Instr)) {
7159 bool InvariantCond =
7160 PSE.getSE()->isLoopInvariant(PSE.getSCEV(SI->getOperand(0)), OrigLoop);
7161 return new VPWidenSelectRecipe(*SI, Plan->mapToVPValues(SI->operands()),
7162 InvariantCond);
7163 }
7164
7165 return tryToWiden(Instr, *Plan);
7166 }
7167
buildVPlansWithVPRecipes(unsigned MinVF,unsigned MaxVF)7168 void LoopVectorizationPlanner::buildVPlansWithVPRecipes(unsigned MinVF,
7169 unsigned MaxVF) {
7170 assert(OrigLoop->empty() && "Inner loop expected.");
7171
7172 // Collect conditions feeding internal conditional branches; they need to be
7173 // represented in VPlan for it to model masking.
7174 SmallPtrSet<Value *, 1> NeedDef;
7175
7176 auto *Latch = OrigLoop->getLoopLatch();
7177 for (BasicBlock *BB : OrigLoop->blocks()) {
7178 if (BB == Latch)
7179 continue;
7180 BranchInst *Branch = dyn_cast<BranchInst>(BB->getTerminator());
7181 if (Branch && Branch->isConditional())
7182 NeedDef.insert(Branch->getCondition());
7183 }
7184
7185 // If the tail is to be folded by masking, the primary induction variable, if
7186 // exists needs to be represented in VPlan for it to model early-exit masking.
7187 // Also, both the Phi and the live-out instruction of each reduction are
7188 // required in order to introduce a select between them in VPlan.
7189 if (CM.foldTailByMasking()) {
7190 if (Legal->getPrimaryInduction())
7191 NeedDef.insert(Legal->getPrimaryInduction());
7192 for (auto &Reduction : Legal->getReductionVars()) {
7193 NeedDef.insert(Reduction.first);
7194 NeedDef.insert(Reduction.second.getLoopExitInstr());
7195 }
7196 }
7197
7198 // Collect instructions from the original loop that will become trivially dead
7199 // in the vectorized loop. We don't need to vectorize these instructions. For
7200 // example, original induction update instructions can become dead because we
7201 // separately emit induction "steps" when generating code for the new loop.
7202 // Similarly, we create a new latch condition when setting up the structure
7203 // of the new loop, so the old one can become dead.
7204 SmallPtrSet<Instruction *, 4> DeadInstructions;
7205 collectTriviallyDeadInstructions(DeadInstructions);
7206
7207 // Add assume instructions we need to drop to DeadInstructions, to prevent
7208 // them from being added to the VPlan.
7209 // TODO: We only need to drop assumes in blocks that get flattend. If the
7210 // control flow is preserved, we should keep them.
7211 auto &ConditionalAssumes = Legal->getConditionalAssumes();
7212 DeadInstructions.insert(ConditionalAssumes.begin(), ConditionalAssumes.end());
7213
7214 DenseMap<Instruction *, Instruction *> &SinkAfter = Legal->getSinkAfter();
7215 // Dead instructions do not need sinking. Remove them from SinkAfter.
7216 for (Instruction *I : DeadInstructions)
7217 SinkAfter.erase(I);
7218
7219 for (unsigned VF = MinVF; VF < MaxVF + 1;) {
7220 VFRange SubRange = {VF, MaxVF + 1};
7221 VPlans.push_back(buildVPlanWithVPRecipes(SubRange, NeedDef,
7222 DeadInstructions, SinkAfter));
7223 VF = SubRange.End;
7224 }
7225 }
7226
buildVPlanWithVPRecipes(VFRange & Range,SmallPtrSetImpl<Value * > & NeedDef,SmallPtrSetImpl<Instruction * > & DeadInstructions,const DenseMap<Instruction *,Instruction * > & SinkAfter)7227 VPlanPtr LoopVectorizationPlanner::buildVPlanWithVPRecipes(
7228 VFRange &Range, SmallPtrSetImpl<Value *> &NeedDef,
7229 SmallPtrSetImpl<Instruction *> &DeadInstructions,
7230 const DenseMap<Instruction *, Instruction *> &SinkAfter) {
7231
7232 // Hold a mapping from predicated instructions to their recipes, in order to
7233 // fix their AlsoPack behavior if a user is determined to replicate and use a
7234 // scalar instead of vector value.
7235 DenseMap<Instruction *, VPReplicateRecipe *> PredInst2Recipe;
7236
7237 SmallPtrSet<const InterleaveGroup<Instruction> *, 1> InterleaveGroups;
7238
7239 VPRecipeBuilder RecipeBuilder(OrigLoop, TLI, Legal, CM, PSE, Builder);
7240
7241 // ---------------------------------------------------------------------------
7242 // Pre-construction: record ingredients whose recipes we'll need to further
7243 // process after constructing the initial VPlan.
7244 // ---------------------------------------------------------------------------
7245
7246 // Mark instructions we'll need to sink later and their targets as
7247 // ingredients whose recipe we'll need to record.
7248 for (auto &Entry : SinkAfter) {
7249 RecipeBuilder.recordRecipeOf(Entry.first);
7250 RecipeBuilder.recordRecipeOf(Entry.second);
7251 }
7252
7253 // For each interleave group which is relevant for this (possibly trimmed)
7254 // Range, add it to the set of groups to be later applied to the VPlan and add
7255 // placeholders for its members' Recipes which we'll be replacing with a
7256 // single VPInterleaveRecipe.
7257 for (InterleaveGroup<Instruction> *IG : IAI.getInterleaveGroups()) {
7258 auto applyIG = [IG, this](unsigned VF) -> bool {
7259 return (VF >= 2 && // Query is illegal for VF == 1
7260 CM.getWideningDecision(IG->getInsertPos(), VF) ==
7261 LoopVectorizationCostModel::CM_Interleave);
7262 };
7263 if (!getDecisionAndClampRange(applyIG, Range))
7264 continue;
7265 InterleaveGroups.insert(IG);
7266 for (unsigned i = 0; i < IG->getFactor(); i++)
7267 if (Instruction *Member = IG->getMember(i))
7268 RecipeBuilder.recordRecipeOf(Member);
7269 };
7270
7271 // ---------------------------------------------------------------------------
7272 // Build initial VPlan: Scan the body of the loop in a topological order to
7273 // visit each basic block after having visited its predecessor basic blocks.
7274 // ---------------------------------------------------------------------------
7275
7276 // Create a dummy pre-entry VPBasicBlock to start building the VPlan.
7277 auto Plan = std::make_unique<VPlan>();
7278 VPBasicBlock *VPBB = new VPBasicBlock("Pre-Entry");
7279 Plan->setEntry(VPBB);
7280
7281 // Represent values that will have defs inside VPlan.
7282 for (Value *V : NeedDef)
7283 Plan->addVPValue(V);
7284
7285 // Scan the body of the loop in a topological order to visit each basic block
7286 // after having visited its predecessor basic blocks.
7287 LoopBlocksDFS DFS(OrigLoop);
7288 DFS.perform(LI);
7289
7290 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) {
7291 // Relevant instructions from basic block BB will be grouped into VPRecipe
7292 // ingredients and fill a new VPBasicBlock.
7293 unsigned VPBBsForBB = 0;
7294 auto *FirstVPBBForBB = new VPBasicBlock(BB->getName());
7295 VPBlockUtils::insertBlockAfter(FirstVPBBForBB, VPBB);
7296 VPBB = FirstVPBBForBB;
7297 Builder.setInsertPoint(VPBB);
7298
7299 // Introduce each ingredient into VPlan.
7300 // TODO: Model and preserve debug instrinsics in VPlan.
7301 for (Instruction &I : BB->instructionsWithoutDebug()) {
7302 Instruction *Instr = &I;
7303
7304 // First filter out irrelevant instructions, to ensure no recipes are
7305 // built for them.
7306 if (isa<BranchInst>(Instr) || DeadInstructions.count(Instr))
7307 continue;
7308
7309 if (auto Recipe =
7310 RecipeBuilder.tryToCreateWidenRecipe(Instr, Range, Plan)) {
7311 RecipeBuilder.setRecipe(Instr, Recipe);
7312 VPBB->appendRecipe(Recipe);
7313 continue;
7314 }
7315
7316 // Otherwise, if all widening options failed, Instruction is to be
7317 // replicated. This may create a successor for VPBB.
7318 VPBasicBlock *NextVPBB = RecipeBuilder.handleReplication(
7319 Instr, Range, VPBB, PredInst2Recipe, Plan);
7320 if (NextVPBB != VPBB) {
7321 VPBB = NextVPBB;
7322 VPBB->setName(BB->hasName() ? BB->getName() + "." + Twine(VPBBsForBB++)
7323 : "");
7324 }
7325 }
7326 }
7327
7328 // Discard empty dummy pre-entry VPBasicBlock. Note that other VPBasicBlocks
7329 // may also be empty, such as the last one VPBB, reflecting original
7330 // basic-blocks with no recipes.
7331 VPBasicBlock *PreEntry = cast<VPBasicBlock>(Plan->getEntry());
7332 assert(PreEntry->empty() && "Expecting empty pre-entry block.");
7333 VPBlockBase *Entry = Plan->setEntry(PreEntry->getSingleSuccessor());
7334 VPBlockUtils::disconnectBlocks(PreEntry, Entry);
7335 delete PreEntry;
7336
7337 // ---------------------------------------------------------------------------
7338 // Transform initial VPlan: Apply previously taken decisions, in order, to
7339 // bring the VPlan to its final state.
7340 // ---------------------------------------------------------------------------
7341
7342 // Apply Sink-After legal constraints.
7343 for (auto &Entry : SinkAfter) {
7344 VPRecipeBase *Sink = RecipeBuilder.getRecipe(Entry.first);
7345 VPRecipeBase *Target = RecipeBuilder.getRecipe(Entry.second);
7346 Sink->moveAfter(Target);
7347 }
7348
7349 // Interleave memory: for each Interleave Group we marked earlier as relevant
7350 // for this VPlan, replace the Recipes widening its memory instructions with a
7351 // single VPInterleaveRecipe at its insertion point.
7352 for (auto IG : InterleaveGroups) {
7353 auto *Recipe = cast<VPWidenMemoryInstructionRecipe>(
7354 RecipeBuilder.getRecipe(IG->getInsertPos()));
7355 (new VPInterleaveRecipe(IG, Recipe->getAddr(), Recipe->getMask()))
7356 ->insertBefore(Recipe);
7357
7358 for (unsigned i = 0; i < IG->getFactor(); ++i)
7359 if (Instruction *Member = IG->getMember(i)) {
7360 RecipeBuilder.getRecipe(Member)->eraseFromParent();
7361 }
7362 }
7363
7364 // Finally, if tail is folded by masking, introduce selects between the phi
7365 // and the live-out instruction of each reduction, at the end of the latch.
7366 if (CM.foldTailByMasking()) {
7367 Builder.setInsertPoint(VPBB);
7368 auto *Cond = RecipeBuilder.createBlockInMask(OrigLoop->getHeader(), Plan);
7369 for (auto &Reduction : Legal->getReductionVars()) {
7370 VPValue *Phi = Plan->getVPValue(Reduction.first);
7371 VPValue *Red = Plan->getVPValue(Reduction.second.getLoopExitInstr());
7372 Builder.createNaryOp(Instruction::Select, {Cond, Red, Phi});
7373 }
7374 }
7375
7376 std::string PlanName;
7377 raw_string_ostream RSO(PlanName);
7378 unsigned VF = Range.Start;
7379 Plan->addVF(VF);
7380 RSO << "Initial VPlan for VF={" << VF;
7381 for (VF *= 2; VF < Range.End; VF *= 2) {
7382 Plan->addVF(VF);
7383 RSO << "," << VF;
7384 }
7385 RSO << "},UF>=1";
7386 RSO.flush();
7387 Plan->setName(PlanName);
7388
7389 return Plan;
7390 }
7391
buildVPlan(VFRange & Range)7392 VPlanPtr LoopVectorizationPlanner::buildVPlan(VFRange &Range) {
7393 // Outer loop handling: They may require CFG and instruction level
7394 // transformations before even evaluating whether vectorization is profitable.
7395 // Since we cannot modify the incoming IR, we need to build VPlan upfront in
7396 // the vectorization pipeline.
7397 assert(!OrigLoop->empty());
7398 assert(EnableVPlanNativePath && "VPlan-native path is not enabled.");
7399
7400 // Create new empty VPlan
7401 auto Plan = std::make_unique<VPlan>();
7402
7403 // Build hierarchical CFG
7404 VPlanHCFGBuilder HCFGBuilder(OrigLoop, LI, *Plan);
7405 HCFGBuilder.buildHierarchicalCFG();
7406
7407 for (unsigned VF = Range.Start; VF < Range.End; VF *= 2)
7408 Plan->addVF(VF);
7409
7410 if (EnableVPlanPredication) {
7411 VPlanPredicator VPP(*Plan);
7412 VPP.predicate();
7413
7414 // Avoid running transformation to recipes until masked code generation in
7415 // VPlan-native path is in place.
7416 return Plan;
7417 }
7418
7419 SmallPtrSet<Instruction *, 1> DeadInstructions;
7420 VPlanTransforms::VPInstructionsToVPRecipes(
7421 OrigLoop, Plan, Legal->getInductionVars(), DeadInstructions);
7422 return Plan;
7423 }
7424
7425 Value* LoopVectorizationPlanner::VPCallbackILV::
getOrCreateVectorValues(Value * V,unsigned Part)7426 getOrCreateVectorValues(Value *V, unsigned Part) {
7427 return ILV.getOrCreateVectorValue(V, Part);
7428 }
7429
getOrCreateScalarValue(Value * V,const VPIteration & Instance)7430 Value *LoopVectorizationPlanner::VPCallbackILV::getOrCreateScalarValue(
7431 Value *V, const VPIteration &Instance) {
7432 return ILV.getOrCreateScalarValue(V, Instance);
7433 }
7434
print(raw_ostream & O,const Twine & Indent,VPSlotTracker & SlotTracker) const7435 void VPInterleaveRecipe::print(raw_ostream &O, const Twine &Indent,
7436 VPSlotTracker &SlotTracker) const {
7437 O << "\"INTERLEAVE-GROUP with factor " << IG->getFactor() << " at ";
7438 IG->getInsertPos()->printAsOperand(O, false);
7439 O << ", ";
7440 getAddr()->printAsOperand(O, SlotTracker);
7441 VPValue *Mask = getMask();
7442 if (Mask) {
7443 O << ", ";
7444 Mask->printAsOperand(O, SlotTracker);
7445 }
7446 for (unsigned i = 0; i < IG->getFactor(); ++i)
7447 if (Instruction *I = IG->getMember(i))
7448 O << "\\l\" +\n" << Indent << "\" " << VPlanIngredient(I) << " " << i;
7449 }
7450
execute(VPTransformState & State)7451 void VPWidenCallRecipe::execute(VPTransformState &State) {
7452 State.ILV->widenCallInstruction(Ingredient, User, State);
7453 }
7454
execute(VPTransformState & State)7455 void VPWidenSelectRecipe::execute(VPTransformState &State) {
7456 State.ILV->widenSelectInstruction(Ingredient, User, InvariantCond, State);
7457 }
7458
execute(VPTransformState & State)7459 void VPWidenRecipe::execute(VPTransformState &State) {
7460 State.ILV->widenInstruction(Ingredient, User, State);
7461 }
7462
execute(VPTransformState & State)7463 void VPWidenGEPRecipe::execute(VPTransformState &State) {
7464 State.ILV->widenGEP(GEP, User, State.UF, State.VF, IsPtrLoopInvariant,
7465 IsIndexLoopInvariant, State);
7466 }
7467
execute(VPTransformState & State)7468 void VPWidenIntOrFpInductionRecipe::execute(VPTransformState &State) {
7469 assert(!State.Instance && "Int or FP induction being replicated.");
7470 State.ILV->widenIntOrFpInduction(IV, Trunc);
7471 }
7472
execute(VPTransformState & State)7473 void VPWidenPHIRecipe::execute(VPTransformState &State) {
7474 State.ILV->widenPHIInstruction(Phi, State.UF, State.VF);
7475 }
7476
execute(VPTransformState & State)7477 void VPBlendRecipe::execute(VPTransformState &State) {
7478 State.ILV->setDebugLocFromInst(State.Builder, Phi);
7479 // We know that all PHIs in non-header blocks are converted into
7480 // selects, so we don't have to worry about the insertion order and we
7481 // can just use the builder.
7482 // At this point we generate the predication tree. There may be
7483 // duplications since this is a simple recursive scan, but future
7484 // optimizations will clean it up.
7485
7486 unsigned NumIncoming = getNumIncomingValues();
7487
7488 // Generate a sequence of selects of the form:
7489 // SELECT(Mask3, In3,
7490 // SELECT(Mask2, In2,
7491 // SELECT(Mask1, In1,
7492 // In0)))
7493 // Note that Mask0 is never used: lanes for which no path reaches this phi and
7494 // are essentially undef are taken from In0.
7495 InnerLoopVectorizer::VectorParts Entry(State.UF);
7496 for (unsigned In = 0; In < NumIncoming; ++In) {
7497 for (unsigned Part = 0; Part < State.UF; ++Part) {
7498 // We might have single edge PHIs (blocks) - use an identity
7499 // 'select' for the first PHI operand.
7500 Value *In0 = State.get(getIncomingValue(In), Part);
7501 if (In == 0)
7502 Entry[Part] = In0; // Initialize with the first incoming value.
7503 else {
7504 // Select between the current value and the previous incoming edge
7505 // based on the incoming mask.
7506 Value *Cond = State.get(getMask(In), Part);
7507 Entry[Part] =
7508 State.Builder.CreateSelect(Cond, In0, Entry[Part], "predphi");
7509 }
7510 }
7511 }
7512 for (unsigned Part = 0; Part < State.UF; ++Part)
7513 State.ValueMap.setVectorValue(Phi, Part, Entry[Part]);
7514 }
7515
execute(VPTransformState & State)7516 void VPInterleaveRecipe::execute(VPTransformState &State) {
7517 assert(!State.Instance && "Interleave group being replicated.");
7518 State.ILV->vectorizeInterleaveGroup(IG, State, getAddr(), getMask());
7519 }
7520
execute(VPTransformState & State)7521 void VPReplicateRecipe::execute(VPTransformState &State) {
7522 if (State.Instance) { // Generate a single instance.
7523 State.ILV->scalarizeInstruction(Ingredient, User, *State.Instance,
7524 IsPredicated, State);
7525 // Insert scalar instance packing it into a vector.
7526 if (AlsoPack && State.VF > 1) {
7527 // If we're constructing lane 0, initialize to start from undef.
7528 if (State.Instance->Lane == 0) {
7529 Value *Undef = UndefValue::get(
7530 FixedVectorType::get(Ingredient->getType(), State.VF));
7531 State.ValueMap.setVectorValue(Ingredient, State.Instance->Part, Undef);
7532 }
7533 State.ILV->packScalarIntoVectorValue(Ingredient, *State.Instance);
7534 }
7535 return;
7536 }
7537
7538 // Generate scalar instances for all VF lanes of all UF parts, unless the
7539 // instruction is uniform inwhich case generate only the first lane for each
7540 // of the UF parts.
7541 unsigned EndLane = IsUniform ? 1 : State.VF;
7542 for (unsigned Part = 0; Part < State.UF; ++Part)
7543 for (unsigned Lane = 0; Lane < EndLane; ++Lane)
7544 State.ILV->scalarizeInstruction(Ingredient, User, {Part, Lane},
7545 IsPredicated, State);
7546 }
7547
execute(VPTransformState & State)7548 void VPBranchOnMaskRecipe::execute(VPTransformState &State) {
7549 assert(State.Instance && "Branch on Mask works only on single instance.");
7550
7551 unsigned Part = State.Instance->Part;
7552 unsigned Lane = State.Instance->Lane;
7553
7554 Value *ConditionBit = nullptr;
7555 VPValue *BlockInMask = getMask();
7556 if (BlockInMask) {
7557 ConditionBit = State.get(BlockInMask, Part);
7558 if (ConditionBit->getType()->isVectorTy())
7559 ConditionBit = State.Builder.CreateExtractElement(
7560 ConditionBit, State.Builder.getInt32(Lane));
7561 } else // Block in mask is all-one.
7562 ConditionBit = State.Builder.getTrue();
7563
7564 // Replace the temporary unreachable terminator with a new conditional branch,
7565 // whose two destinations will be set later when they are created.
7566 auto *CurrentTerminator = State.CFG.PrevBB->getTerminator();
7567 assert(isa<UnreachableInst>(CurrentTerminator) &&
7568 "Expected to replace unreachable terminator with conditional branch.");
7569 auto *CondBr = BranchInst::Create(State.CFG.PrevBB, nullptr, ConditionBit);
7570 CondBr->setSuccessor(0, nullptr);
7571 ReplaceInstWithInst(CurrentTerminator, CondBr);
7572 }
7573
execute(VPTransformState & State)7574 void VPPredInstPHIRecipe::execute(VPTransformState &State) {
7575 assert(State.Instance && "Predicated instruction PHI works per instance.");
7576 Instruction *ScalarPredInst = cast<Instruction>(
7577 State.ValueMap.getScalarValue(PredInst, *State.Instance));
7578 BasicBlock *PredicatedBB = ScalarPredInst->getParent();
7579 BasicBlock *PredicatingBB = PredicatedBB->getSinglePredecessor();
7580 assert(PredicatingBB && "Predicated block has no single predecessor.");
7581
7582 // By current pack/unpack logic we need to generate only a single phi node: if
7583 // a vector value for the predicated instruction exists at this point it means
7584 // the instruction has vector users only, and a phi for the vector value is
7585 // needed. In this case the recipe of the predicated instruction is marked to
7586 // also do that packing, thereby "hoisting" the insert-element sequence.
7587 // Otherwise, a phi node for the scalar value is needed.
7588 unsigned Part = State.Instance->Part;
7589 if (State.ValueMap.hasVectorValue(PredInst, Part)) {
7590 Value *VectorValue = State.ValueMap.getVectorValue(PredInst, Part);
7591 InsertElementInst *IEI = cast<InsertElementInst>(VectorValue);
7592 PHINode *VPhi = State.Builder.CreatePHI(IEI->getType(), 2);
7593 VPhi->addIncoming(IEI->getOperand(0), PredicatingBB); // Unmodified vector.
7594 VPhi->addIncoming(IEI, PredicatedBB); // New vector with inserted element.
7595 State.ValueMap.resetVectorValue(PredInst, Part, VPhi); // Update cache.
7596 } else {
7597 Type *PredInstType = PredInst->getType();
7598 PHINode *Phi = State.Builder.CreatePHI(PredInstType, 2);
7599 Phi->addIncoming(UndefValue::get(ScalarPredInst->getType()), PredicatingBB);
7600 Phi->addIncoming(ScalarPredInst, PredicatedBB);
7601 State.ValueMap.resetScalarValue(PredInst, *State.Instance, Phi);
7602 }
7603 }
7604
execute(VPTransformState & State)7605 void VPWidenMemoryInstructionRecipe::execute(VPTransformState &State) {
7606 VPValue *StoredValue = isa<StoreInst>(Instr) ? getStoredValue() : nullptr;
7607 State.ILV->vectorizeMemoryInstruction(&Instr, State, getAddr(), StoredValue,
7608 getMask());
7609 }
7610
7611 // Determine how to lower the scalar epilogue, which depends on 1) optimising
7612 // for minimum code-size, 2) predicate compiler options, 3) loop hints forcing
7613 // predication, and 4) a TTI hook that analyses whether the loop is suitable
7614 // for predication.
getScalarEpilogueLowering(Function * F,Loop * L,LoopVectorizeHints & Hints,ProfileSummaryInfo * PSI,BlockFrequencyInfo * BFI,TargetTransformInfo * TTI,TargetLibraryInfo * TLI,AssumptionCache * AC,LoopInfo * LI,ScalarEvolution * SE,DominatorTree * DT,LoopVectorizationLegality & LVL)7615 static ScalarEpilogueLowering getScalarEpilogueLowering(
7616 Function *F, Loop *L, LoopVectorizeHints &Hints, ProfileSummaryInfo *PSI,
7617 BlockFrequencyInfo *BFI, TargetTransformInfo *TTI, TargetLibraryInfo *TLI,
7618 AssumptionCache *AC, LoopInfo *LI, ScalarEvolution *SE, DominatorTree *DT,
7619 LoopVectorizationLegality &LVL) {
7620 bool OptSize =
7621 F->hasOptSize() || llvm::shouldOptimizeForSize(L->getHeader(), PSI, BFI,
7622 PGSOQueryType::IRPass);
7623 // 1) OptSize takes precedence over all other options, i.e. if this is set,
7624 // don't look at hints or options, and don't request a scalar epilogue.
7625 if (OptSize)
7626 return CM_ScalarEpilogueNotAllowedOptSize;
7627
7628 bool PredicateOptDisabled = PreferPredicateOverEpilog.getNumOccurrences() &&
7629 !PreferPredicateOverEpilog;
7630
7631 // 2) Next, if disabling predication is requested on the command line, honour
7632 // this and request a scalar epilogue.
7633 if (PredicateOptDisabled)
7634 return CM_ScalarEpilogueAllowed;
7635
7636 // 3) and 4) look if enabling predication is requested on the command line,
7637 // with a loop hint, or if the TTI hook indicates this is profitable, request
7638 // predication .
7639 if (PreferPredicateOverEpilog ||
7640 Hints.getPredicate() == LoopVectorizeHints::FK_Enabled ||
7641 (TTI->preferPredicateOverEpilogue(L, LI, *SE, *AC, TLI, DT,
7642 LVL.getLAI()) &&
7643 Hints.getPredicate() != LoopVectorizeHints::FK_Disabled))
7644 return CM_ScalarEpilogueNotNeededUsePredicate;
7645
7646 return CM_ScalarEpilogueAllowed;
7647 }
7648
7649 // Process the loop in the VPlan-native vectorization path. This path builds
7650 // VPlan upfront in the vectorization pipeline, which allows to apply
7651 // VPlan-to-VPlan transformations from the very beginning without modifying the
7652 // input LLVM IR.
processLoopInVPlanNativePath(Loop * L,PredicatedScalarEvolution & PSE,LoopInfo * LI,DominatorTree * DT,LoopVectorizationLegality * LVL,TargetTransformInfo * TTI,TargetLibraryInfo * TLI,DemandedBits * DB,AssumptionCache * AC,OptimizationRemarkEmitter * ORE,BlockFrequencyInfo * BFI,ProfileSummaryInfo * PSI,LoopVectorizeHints & Hints)7653 static bool processLoopInVPlanNativePath(
7654 Loop *L, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT,
7655 LoopVectorizationLegality *LVL, TargetTransformInfo *TTI,
7656 TargetLibraryInfo *TLI, DemandedBits *DB, AssumptionCache *AC,
7657 OptimizationRemarkEmitter *ORE, BlockFrequencyInfo *BFI,
7658 ProfileSummaryInfo *PSI, LoopVectorizeHints &Hints) {
7659
7660 if (PSE.getBackedgeTakenCount() == PSE.getSE()->getCouldNotCompute()) {
7661 LLVM_DEBUG(dbgs() << "LV: cannot compute the outer-loop trip count\n");
7662 return false;
7663 }
7664 assert(EnableVPlanNativePath && "VPlan-native path is disabled.");
7665 Function *F = L->getHeader()->getParent();
7666 InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL->getLAI());
7667
7668 ScalarEpilogueLowering SEL = getScalarEpilogueLowering(
7669 F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, *LVL);
7670
7671 LoopVectorizationCostModel CM(SEL, L, PSE, LI, LVL, *TTI, TLI, DB, AC, ORE, F,
7672 &Hints, IAI);
7673 // Use the planner for outer loop vectorization.
7674 // TODO: CM is not used at this point inside the planner. Turn CM into an
7675 // optional argument if we don't need it in the future.
7676 LoopVectorizationPlanner LVP(L, LI, TLI, TTI, LVL, CM, IAI, PSE);
7677
7678 // Get user vectorization factor.
7679 const unsigned UserVF = Hints.getWidth();
7680
7681 // Plan how to best vectorize, return the best VF and its cost.
7682 const VectorizationFactor VF = LVP.planInVPlanNativePath(UserVF);
7683
7684 // If we are stress testing VPlan builds, do not attempt to generate vector
7685 // code. Masked vector code generation support will follow soon.
7686 // Also, do not attempt to vectorize if no vector code will be produced.
7687 if (VPlanBuildStressTest || EnableVPlanPredication ||
7688 VectorizationFactor::Disabled() == VF)
7689 return false;
7690
7691 LVP.setBestPlan(VF.Width, 1);
7692
7693 InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, 1, LVL,
7694 &CM);
7695 LLVM_DEBUG(dbgs() << "Vectorizing outer loop in \""
7696 << L->getHeader()->getParent()->getName() << "\"\n");
7697 LVP.executePlan(LB, DT);
7698
7699 // Mark the loop as already vectorized to avoid vectorizing again.
7700 Hints.setAlreadyVectorized();
7701
7702 assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs()));
7703 return true;
7704 }
7705
LoopVectorizePass(LoopVectorizeOptions Opts)7706 LoopVectorizePass::LoopVectorizePass(LoopVectorizeOptions Opts)
7707 : InterleaveOnlyWhenForced(Opts.InterleaveOnlyWhenForced ||
7708 !EnableLoopInterleaving),
7709 VectorizeOnlyWhenForced(Opts.VectorizeOnlyWhenForced ||
7710 !EnableLoopVectorization) {}
7711
processLoop(Loop * L)7712 bool LoopVectorizePass::processLoop(Loop *L) {
7713 assert((EnableVPlanNativePath || L->empty()) &&
7714 "VPlan-native path is not enabled. Only process inner loops.");
7715
7716 #ifndef NDEBUG
7717 const std::string DebugLocStr = getDebugLocString(L);
7718 #endif /* NDEBUG */
7719
7720 LLVM_DEBUG(dbgs() << "\nLV: Checking a loop in \""
7721 << L->getHeader()->getParent()->getName() << "\" from "
7722 << DebugLocStr << "\n");
7723
7724 LoopVectorizeHints Hints(L, InterleaveOnlyWhenForced, *ORE);
7725
7726 LLVM_DEBUG(
7727 dbgs() << "LV: Loop hints:"
7728 << " force="
7729 << (Hints.getForce() == LoopVectorizeHints::FK_Disabled
7730 ? "disabled"
7731 : (Hints.getForce() == LoopVectorizeHints::FK_Enabled
7732 ? "enabled"
7733 : "?"))
7734 << " width=" << Hints.getWidth()
7735 << " unroll=" << Hints.getInterleave() << "\n");
7736
7737 // Function containing loop
7738 Function *F = L->getHeader()->getParent();
7739
7740 // Looking at the diagnostic output is the only way to determine if a loop
7741 // was vectorized (other than looking at the IR or machine code), so it
7742 // is important to generate an optimization remark for each loop. Most of
7743 // these messages are generated as OptimizationRemarkAnalysis. Remarks
7744 // generated as OptimizationRemark and OptimizationRemarkMissed are
7745 // less verbose reporting vectorized loops and unvectorized loops that may
7746 // benefit from vectorization, respectively.
7747
7748 if (!Hints.allowVectorization(F, L, VectorizeOnlyWhenForced)) {
7749 LLVM_DEBUG(dbgs() << "LV: Loop hints prevent vectorization.\n");
7750 return false;
7751 }
7752
7753 PredicatedScalarEvolution PSE(*SE, *L);
7754
7755 // Check if it is legal to vectorize the loop.
7756 LoopVectorizationRequirements Requirements(*ORE);
7757 LoopVectorizationLegality LVL(L, PSE, DT, TTI, TLI, AA, F, GetLAA, LI, ORE,
7758 &Requirements, &Hints, DB, AC);
7759 if (!LVL.canVectorize(EnableVPlanNativePath)) {
7760 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n");
7761 Hints.emitRemarkWithHints();
7762 return false;
7763 }
7764
7765 // Check the function attributes and profiles to find out if this function
7766 // should be optimized for size.
7767 ScalarEpilogueLowering SEL = getScalarEpilogueLowering(
7768 F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, LVL);
7769
7770 // Entrance to the VPlan-native vectorization path. Outer loops are processed
7771 // here. They may require CFG and instruction level transformations before
7772 // even evaluating whether vectorization is profitable. Since we cannot modify
7773 // the incoming IR, we need to build VPlan upfront in the vectorization
7774 // pipeline.
7775 if (!L->empty())
7776 return processLoopInVPlanNativePath(L, PSE, LI, DT, &LVL, TTI, TLI, DB, AC,
7777 ORE, BFI, PSI, Hints);
7778
7779 assert(L->empty() && "Inner loop expected.");
7780
7781 // Check the loop for a trip count threshold: vectorize loops with a tiny trip
7782 // count by optimizing for size, to minimize overheads.
7783 auto ExpectedTC = getSmallBestKnownTC(*SE, L);
7784 if (ExpectedTC && *ExpectedTC < TinyTripCountVectorThreshold) {
7785 LLVM_DEBUG(dbgs() << "LV: Found a loop with a very small trip count. "
7786 << "This loop is worth vectorizing only if no scalar "
7787 << "iteration overheads are incurred.");
7788 if (Hints.getForce() == LoopVectorizeHints::FK_Enabled)
7789 LLVM_DEBUG(dbgs() << " But vectorizing was explicitly forced.\n");
7790 else {
7791 LLVM_DEBUG(dbgs() << "\n");
7792 SEL = CM_ScalarEpilogueNotAllowedLowTripLoop;
7793 }
7794 }
7795
7796 // Check the function attributes to see if implicit floats are allowed.
7797 // FIXME: This check doesn't seem possibly correct -- what if the loop is
7798 // an integer loop and the vector instructions selected are purely integer
7799 // vector instructions?
7800 if (F->hasFnAttribute(Attribute::NoImplicitFloat)) {
7801 reportVectorizationFailure(
7802 "Can't vectorize when the NoImplicitFloat attribute is used",
7803 "loop not vectorized due to NoImplicitFloat attribute",
7804 "NoImplicitFloat", ORE, L);
7805 Hints.emitRemarkWithHints();
7806 return false;
7807 }
7808
7809 // Check if the target supports potentially unsafe FP vectorization.
7810 // FIXME: Add a check for the type of safety issue (denormal, signaling)
7811 // for the target we're vectorizing for, to make sure none of the
7812 // additional fp-math flags can help.
7813 if (Hints.isPotentiallyUnsafe() &&
7814 TTI->isFPVectorizationPotentiallyUnsafe()) {
7815 reportVectorizationFailure(
7816 "Potentially unsafe FP op prevents vectorization",
7817 "loop not vectorized due to unsafe FP support.",
7818 "UnsafeFP", ORE, L);
7819 Hints.emitRemarkWithHints();
7820 return false;
7821 }
7822
7823 bool UseInterleaved = TTI->enableInterleavedAccessVectorization();
7824 InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL.getLAI());
7825
7826 // If an override option has been passed in for interleaved accesses, use it.
7827 if (EnableInterleavedMemAccesses.getNumOccurrences() > 0)
7828 UseInterleaved = EnableInterleavedMemAccesses;
7829
7830 // Analyze interleaved memory accesses.
7831 if (UseInterleaved) {
7832 IAI.analyzeInterleaving(useMaskedInterleavedAccesses(*TTI));
7833 }
7834
7835 // Use the cost model.
7836 LoopVectorizationCostModel CM(SEL, L, PSE, LI, &LVL, *TTI, TLI, DB, AC, ORE,
7837 F, &Hints, IAI);
7838 CM.collectValuesToIgnore();
7839
7840 // Use the planner for vectorization.
7841 LoopVectorizationPlanner LVP(L, LI, TLI, TTI, &LVL, CM, IAI, PSE);
7842
7843 // Get user vectorization factor and interleave count.
7844 unsigned UserVF = Hints.getWidth();
7845 unsigned UserIC = Hints.getInterleave();
7846
7847 // Plan how to best vectorize, return the best VF and its cost.
7848 Optional<VectorizationFactor> MaybeVF = LVP.plan(UserVF, UserIC);
7849
7850 VectorizationFactor VF = VectorizationFactor::Disabled();
7851 unsigned IC = 1;
7852
7853 if (MaybeVF) {
7854 VF = *MaybeVF;
7855 // Select the interleave count.
7856 IC = CM.selectInterleaveCount(VF.Width, VF.Cost);
7857 }
7858
7859 // Identify the diagnostic messages that should be produced.
7860 std::pair<StringRef, std::string> VecDiagMsg, IntDiagMsg;
7861 bool VectorizeLoop = true, InterleaveLoop = true;
7862 if (Requirements.doesNotMeet(F, L, Hints)) {
7863 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: loop did not meet vectorization "
7864 "requirements.\n");
7865 Hints.emitRemarkWithHints();
7866 return false;
7867 }
7868
7869 if (VF.Width == 1) {
7870 LLVM_DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n");
7871 VecDiagMsg = std::make_pair(
7872 "VectorizationNotBeneficial",
7873 "the cost-model indicates that vectorization is not beneficial");
7874 VectorizeLoop = false;
7875 }
7876
7877 if (!MaybeVF && UserIC > 1) {
7878 // Tell the user interleaving was avoided up-front, despite being explicitly
7879 // requested.
7880 LLVM_DEBUG(dbgs() << "LV: Ignoring UserIC, because vectorization and "
7881 "interleaving should be avoided up front\n");
7882 IntDiagMsg = std::make_pair(
7883 "InterleavingAvoided",
7884 "Ignoring UserIC, because interleaving was avoided up front");
7885 InterleaveLoop = false;
7886 } else if (IC == 1 && UserIC <= 1) {
7887 // Tell the user interleaving is not beneficial.
7888 LLVM_DEBUG(dbgs() << "LV: Interleaving is not beneficial.\n");
7889 IntDiagMsg = std::make_pair(
7890 "InterleavingNotBeneficial",
7891 "the cost-model indicates that interleaving is not beneficial");
7892 InterleaveLoop = false;
7893 if (UserIC == 1) {
7894 IntDiagMsg.first = "InterleavingNotBeneficialAndDisabled";
7895 IntDiagMsg.second +=
7896 " and is explicitly disabled or interleave count is set to 1";
7897 }
7898 } else if (IC > 1 && UserIC == 1) {
7899 // Tell the user interleaving is beneficial, but it explicitly disabled.
7900 LLVM_DEBUG(
7901 dbgs() << "LV: Interleaving is beneficial but is explicitly disabled.");
7902 IntDiagMsg = std::make_pair(
7903 "InterleavingBeneficialButDisabled",
7904 "the cost-model indicates that interleaving is beneficial "
7905 "but is explicitly disabled or interleave count is set to 1");
7906 InterleaveLoop = false;
7907 }
7908
7909 // Override IC if user provided an interleave count.
7910 IC = UserIC > 0 ? UserIC : IC;
7911
7912 // Emit diagnostic messages, if any.
7913 const char *VAPassName = Hints.vectorizeAnalysisPassName();
7914 if (!VectorizeLoop && !InterleaveLoop) {
7915 // Do not vectorize or interleaving the loop.
7916 ORE->emit([&]() {
7917 return OptimizationRemarkMissed(VAPassName, VecDiagMsg.first,
7918 L->getStartLoc(), L->getHeader())
7919 << VecDiagMsg.second;
7920 });
7921 ORE->emit([&]() {
7922 return OptimizationRemarkMissed(LV_NAME, IntDiagMsg.first,
7923 L->getStartLoc(), L->getHeader())
7924 << IntDiagMsg.second;
7925 });
7926 return false;
7927 } else if (!VectorizeLoop && InterleaveLoop) {
7928 LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n');
7929 ORE->emit([&]() {
7930 return OptimizationRemarkAnalysis(VAPassName, VecDiagMsg.first,
7931 L->getStartLoc(), L->getHeader())
7932 << VecDiagMsg.second;
7933 });
7934 } else if (VectorizeLoop && !InterleaveLoop) {
7935 LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width
7936 << ") in " << DebugLocStr << '\n');
7937 ORE->emit([&]() {
7938 return OptimizationRemarkAnalysis(LV_NAME, IntDiagMsg.first,
7939 L->getStartLoc(), L->getHeader())
7940 << IntDiagMsg.second;
7941 });
7942 } else if (VectorizeLoop && InterleaveLoop) {
7943 LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width
7944 << ") in " << DebugLocStr << '\n');
7945 LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n');
7946 }
7947
7948 LVP.setBestPlan(VF.Width, IC);
7949
7950 using namespace ore;
7951 bool DisableRuntimeUnroll = false;
7952 MDNode *OrigLoopID = L->getLoopID();
7953
7954 if (!VectorizeLoop) {
7955 assert(IC > 1 && "interleave count should not be 1 or 0");
7956 // If we decided that it is not legal to vectorize the loop, then
7957 // interleave it.
7958 InnerLoopUnroller Unroller(L, PSE, LI, DT, TLI, TTI, AC, ORE, IC, &LVL,
7959 &CM);
7960 LVP.executePlan(Unroller, DT);
7961
7962 ORE->emit([&]() {
7963 return OptimizationRemark(LV_NAME, "Interleaved", L->getStartLoc(),
7964 L->getHeader())
7965 << "interleaved loop (interleaved count: "
7966 << NV("InterleaveCount", IC) << ")";
7967 });
7968 } else {
7969 // If we decided that it is *legal* to vectorize the loop, then do it.
7970 InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, IC,
7971 &LVL, &CM);
7972 LVP.executePlan(LB, DT);
7973 ++LoopsVectorized;
7974
7975 // Add metadata to disable runtime unrolling a scalar loop when there are
7976 // no runtime checks about strides and memory. A scalar loop that is
7977 // rarely used is not worth unrolling.
7978 if (!LB.areSafetyChecksAdded())
7979 DisableRuntimeUnroll = true;
7980
7981 // Report the vectorization decision.
7982 ORE->emit([&]() {
7983 return OptimizationRemark(LV_NAME, "Vectorized", L->getStartLoc(),
7984 L->getHeader())
7985 << "vectorized loop (vectorization width: "
7986 << NV("VectorizationFactor", VF.Width)
7987 << ", interleaved count: " << NV("InterleaveCount", IC) << ")";
7988 });
7989 }
7990
7991 Optional<MDNode *> RemainderLoopID =
7992 makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll,
7993 LLVMLoopVectorizeFollowupEpilogue});
7994 if (RemainderLoopID.hasValue()) {
7995 L->setLoopID(RemainderLoopID.getValue());
7996 } else {
7997 if (DisableRuntimeUnroll)
7998 AddRuntimeUnrollDisableMetaData(L);
7999
8000 // Mark the loop as already vectorized to avoid vectorizing again.
8001 Hints.setAlreadyVectorized();
8002 }
8003
8004 assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs()));
8005 return true;
8006 }
8007
runImpl(Function & F,ScalarEvolution & SE_,LoopInfo & LI_,TargetTransformInfo & TTI_,DominatorTree & DT_,BlockFrequencyInfo & BFI_,TargetLibraryInfo * TLI_,DemandedBits & DB_,AAResults & AA_,AssumptionCache & AC_,std::function<const LoopAccessInfo & (Loop &)> & GetLAA_,OptimizationRemarkEmitter & ORE_,ProfileSummaryInfo * PSI_)8008 LoopVectorizeResult LoopVectorizePass::runImpl(
8009 Function &F, ScalarEvolution &SE_, LoopInfo &LI_, TargetTransformInfo &TTI_,
8010 DominatorTree &DT_, BlockFrequencyInfo &BFI_, TargetLibraryInfo *TLI_,
8011 DemandedBits &DB_, AAResults &AA_, AssumptionCache &AC_,
8012 std::function<const LoopAccessInfo &(Loop &)> &GetLAA_,
8013 OptimizationRemarkEmitter &ORE_, ProfileSummaryInfo *PSI_) {
8014 SE = &SE_;
8015 LI = &LI_;
8016 TTI = &TTI_;
8017 DT = &DT_;
8018 BFI = &BFI_;
8019 TLI = TLI_;
8020 AA = &AA_;
8021 AC = &AC_;
8022 GetLAA = &GetLAA_;
8023 DB = &DB_;
8024 ORE = &ORE_;
8025 PSI = PSI_;
8026
8027 // Don't attempt if
8028 // 1. the target claims to have no vector registers, and
8029 // 2. interleaving won't help ILP.
8030 //
8031 // The second condition is necessary because, even if the target has no
8032 // vector registers, loop vectorization may still enable scalar
8033 // interleaving.
8034 if (!TTI->getNumberOfRegisters(TTI->getRegisterClassForType(true)) &&
8035 TTI->getMaxInterleaveFactor(1) < 2)
8036 return LoopVectorizeResult(false, false);
8037
8038 bool Changed = false, CFGChanged = false;
8039
8040 // The vectorizer requires loops to be in simplified form.
8041 // Since simplification may add new inner loops, it has to run before the
8042 // legality and profitability checks. This means running the loop vectorizer
8043 // will simplify all loops, regardless of whether anything end up being
8044 // vectorized.
8045 for (auto &L : *LI)
8046 Changed |= CFGChanged |=
8047 simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */);
8048
8049 // Build up a worklist of inner-loops to vectorize. This is necessary as
8050 // the act of vectorizing or partially unrolling a loop creates new loops
8051 // and can invalidate iterators across the loops.
8052 SmallVector<Loop *, 8> Worklist;
8053
8054 for (Loop *L : *LI)
8055 collectSupportedLoops(*L, LI, ORE, Worklist);
8056
8057 LoopsAnalyzed += Worklist.size();
8058
8059 // Now walk the identified inner loops.
8060 while (!Worklist.empty()) {
8061 Loop *L = Worklist.pop_back_val();
8062
8063 // For the inner loops we actually process, form LCSSA to simplify the
8064 // transform.
8065 Changed |= formLCSSARecursively(*L, *DT, LI, SE);
8066
8067 Changed |= CFGChanged |= processLoop(L);
8068 }
8069
8070 // Process each loop nest in the function.
8071 return LoopVectorizeResult(Changed, CFGChanged);
8072 }
8073
run(Function & F,FunctionAnalysisManager & AM)8074 PreservedAnalyses LoopVectorizePass::run(Function &F,
8075 FunctionAnalysisManager &AM) {
8076 auto &SE = AM.getResult<ScalarEvolutionAnalysis>(F);
8077 auto &LI = AM.getResult<LoopAnalysis>(F);
8078 auto &TTI = AM.getResult<TargetIRAnalysis>(F);
8079 auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
8080 auto &BFI = AM.getResult<BlockFrequencyAnalysis>(F);
8081 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
8082 auto &AA = AM.getResult<AAManager>(F);
8083 auto &AC = AM.getResult<AssumptionAnalysis>(F);
8084 auto &DB = AM.getResult<DemandedBitsAnalysis>(F);
8085 auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F);
8086 MemorySSA *MSSA = EnableMSSALoopDependency
8087 ? &AM.getResult<MemorySSAAnalysis>(F).getMSSA()
8088 : nullptr;
8089
8090 auto &LAM = AM.getResult<LoopAnalysisManagerFunctionProxy>(F).getManager();
8091 std::function<const LoopAccessInfo &(Loop &)> GetLAA =
8092 [&](Loop &L) -> const LoopAccessInfo & {
8093 LoopStandardAnalysisResults AR = {AA, AC, DT, LI, SE, TLI, TTI, MSSA};
8094 return LAM.getResult<LoopAccessAnalysis>(L, AR);
8095 };
8096 auto &MAMProxy = AM.getResult<ModuleAnalysisManagerFunctionProxy>(F);
8097 ProfileSummaryInfo *PSI =
8098 MAMProxy.getCachedResult<ProfileSummaryAnalysis>(*F.getParent());
8099 LoopVectorizeResult Result =
8100 runImpl(F, SE, LI, TTI, DT, BFI, &TLI, DB, AA, AC, GetLAA, ORE, PSI);
8101 if (!Result.MadeAnyChange)
8102 return PreservedAnalyses::all();
8103 PreservedAnalyses PA;
8104
8105 // We currently do not preserve loopinfo/dominator analyses with outer loop
8106 // vectorization. Until this is addressed, mark these analyses as preserved
8107 // only for non-VPlan-native path.
8108 // TODO: Preserve Loop and Dominator analyses for VPlan-native path.
8109 if (!EnableVPlanNativePath) {
8110 PA.preserve<LoopAnalysis>();
8111 PA.preserve<DominatorTreeAnalysis>();
8112 }
8113 PA.preserve<BasicAA>();
8114 PA.preserve<GlobalsAA>();
8115 if (!Result.MadeCFGChange)
8116 PA.preserveSet<CFGAnalyses>();
8117 return PA;
8118 }
8119