1 //===- VPlan.h - Represent A Vectorizer Plan --------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file
10 /// This file contains the declarations of the Vectorization Plan base classes:
11 /// 1. VPBasicBlock and VPRegionBlock that inherit from a common pure virtual
12 /// VPBlockBase, together implementing a Hierarchical CFG;
13 /// 2. Specializations of GraphTraits that allow VPBlockBase graphs to be
14 /// treated as proper graphs for generic algorithms;
15 /// 3. Pure virtual VPRecipeBase serving as the base class for recipes contained
16 /// within VPBasicBlocks;
17 /// 4. VPInstruction, a concrete Recipe and VPUser modeling a single planned
18 /// instruction;
19 /// 5. The VPlan class holding a candidate for vectorization;
20 /// 6. The VPlanPrinter class providing a way to print a plan in dot format;
21 /// These are documented in docs/VectorizationPlan.rst.
22 //
23 //===----------------------------------------------------------------------===//
24
25 #ifndef LLVM_TRANSFORMS_VECTORIZE_VPLAN_H
26 #define LLVM_TRANSFORMS_VECTORIZE_VPLAN_H
27
28 #include "VPlanLoopInfo.h"
29 #include "VPlanValue.h"
30 #include "llvm/ADT/DenseMap.h"
31 #include "llvm/ADT/DepthFirstIterator.h"
32 #include "llvm/ADT/GraphTraits.h"
33 #include "llvm/ADT/Optional.h"
34 #include "llvm/ADT/SmallBitVector.h"
35 #include "llvm/ADT/SmallPtrSet.h"
36 #include "llvm/ADT/SmallSet.h"
37 #include "llvm/ADT/SmallVector.h"
38 #include "llvm/ADT/Twine.h"
39 #include "llvm/ADT/ilist.h"
40 #include "llvm/ADT/ilist_node.h"
41 #include "llvm/Analysis/VectorUtils.h"
42 #include "llvm/IR/IRBuilder.h"
43 #include "llvm/Support/InstructionCost.h"
44 #include <algorithm>
45 #include <cassert>
46 #include <cstddef>
47 #include <map>
48 #include <string>
49
50 namespace llvm {
51
52 class BasicBlock;
53 class DominatorTree;
54 class InnerLoopVectorizer;
55 class LoopInfo;
56 class raw_ostream;
57 class RecurrenceDescriptor;
58 class Value;
59 class VPBasicBlock;
60 class VPRegionBlock;
61 class VPlan;
62 class VPlanSlp;
63
64 /// Returns a calculation for the total number of elements for a given \p VF.
65 /// For fixed width vectors this value is a constant, whereas for scalable
66 /// vectors it is an expression determined at runtime.
67 Value *getRuntimeVF(IRBuilder<> &B, Type *Ty, ElementCount VF);
68
69 /// A range of powers-of-2 vectorization factors with fixed start and
70 /// adjustable end. The range includes start and excludes end, e.g.,:
71 /// [1, 9) = {1, 2, 4, 8}
72 struct VFRange {
73 // A power of 2.
74 const ElementCount Start;
75
76 // Need not be a power of 2. If End <= Start range is empty.
77 ElementCount End;
78
isEmptyVFRange79 bool isEmpty() const {
80 return End.getKnownMinValue() <= Start.getKnownMinValue();
81 }
82
VFRangeVFRange83 VFRange(const ElementCount &Start, const ElementCount &End)
84 : Start(Start), End(End) {
85 assert(Start.isScalable() == End.isScalable() &&
86 "Both Start and End should have the same scalable flag");
87 assert(isPowerOf2_32(Start.getKnownMinValue()) &&
88 "Expected Start to be a power of 2");
89 }
90 };
91
92 using VPlanPtr = std::unique_ptr<VPlan>;
93
94 /// In what follows, the term "input IR" refers to code that is fed into the
95 /// vectorizer whereas the term "output IR" refers to code that is generated by
96 /// the vectorizer.
97
98 /// VPLane provides a way to access lanes in both fixed width and scalable
99 /// vectors, where for the latter the lane index sometimes needs calculating
100 /// as a runtime expression.
101 class VPLane {
102 public:
103 /// Kind describes how to interpret Lane.
104 enum class Kind : uint8_t {
105 /// For First, Lane is the index into the first N elements of a
106 /// fixed-vector <N x <ElTy>> or a scalable vector <vscale x N x <ElTy>>.
107 First,
108 /// For ScalableLast, Lane is the offset from the start of the last
109 /// N-element subvector in a scalable vector <vscale x N x <ElTy>>. For
110 /// example, a Lane of 0 corresponds to lane `(vscale - 1) * N`, a Lane of
111 /// 1 corresponds to `((vscale - 1) * N) + 1`, etc.
112 ScalableLast
113 };
114
115 private:
116 /// in [0..VF)
117 unsigned Lane;
118
119 /// Indicates how the Lane should be interpreted, as described above.
120 Kind LaneKind;
121
122 public:
VPLane(unsigned Lane,Kind LaneKind)123 VPLane(unsigned Lane, Kind LaneKind) : Lane(Lane), LaneKind(LaneKind) {}
124
getFirstLane()125 static VPLane getFirstLane() { return VPLane(0, VPLane::Kind::First); }
126
getLastLaneForVF(const ElementCount & VF)127 static VPLane getLastLaneForVF(const ElementCount &VF) {
128 unsigned LaneOffset = VF.getKnownMinValue() - 1;
129 Kind LaneKind;
130 if (VF.isScalable())
131 // In this case 'LaneOffset' refers to the offset from the start of the
132 // last subvector with VF.getKnownMinValue() elements.
133 LaneKind = VPLane::Kind::ScalableLast;
134 else
135 LaneKind = VPLane::Kind::First;
136 return VPLane(LaneOffset, LaneKind);
137 }
138
139 /// Returns a compile-time known value for the lane index and asserts if the
140 /// lane can only be calculated at runtime.
getKnownLane()141 unsigned getKnownLane() const {
142 assert(LaneKind == Kind::First);
143 return Lane;
144 }
145
146 /// Returns an expression describing the lane index that can be used at
147 /// runtime.
148 Value *getAsRuntimeExpr(IRBuilder<> &Builder, const ElementCount &VF) const;
149
150 /// Returns the Kind of lane offset.
getKind()151 Kind getKind() const { return LaneKind; }
152
153 /// Returns true if this is the first lane of the whole vector.
isFirstLane()154 bool isFirstLane() const { return Lane == 0 && LaneKind == Kind::First; }
155
156 /// Maps the lane to a cache index based on \p VF.
mapToCacheIndex(const ElementCount & VF)157 unsigned mapToCacheIndex(const ElementCount &VF) const {
158 switch (LaneKind) {
159 case VPLane::Kind::ScalableLast:
160 assert(VF.isScalable() && Lane < VF.getKnownMinValue());
161 return VF.getKnownMinValue() + Lane;
162 default:
163 assert(Lane < VF.getKnownMinValue());
164 return Lane;
165 }
166 }
167
168 /// Returns the maxmimum number of lanes that we are able to consider
169 /// caching for \p VF.
getNumCachedLanes(const ElementCount & VF)170 static unsigned getNumCachedLanes(const ElementCount &VF) {
171 return VF.getKnownMinValue() * (VF.isScalable() ? 2 : 1);
172 }
173 };
174
175 /// VPIteration represents a single point in the iteration space of the output
176 /// (vectorized and/or unrolled) IR loop.
177 struct VPIteration {
178 /// in [0..UF)
179 unsigned Part;
180
181 VPLane Lane;
182
183 VPIteration(unsigned Part, unsigned Lane,
184 VPLane::Kind Kind = VPLane::Kind::First)
PartVPIteration185 : Part(Part), Lane(Lane, Kind) {}
186
VPIterationVPIteration187 VPIteration(unsigned Part, const VPLane &Lane) : Part(Part), Lane(Lane) {}
188
isFirstIterationVPIteration189 bool isFirstIteration() const { return Part == 0 && Lane.isFirstLane(); }
190 };
191
192 /// VPTransformState holds information passed down when "executing" a VPlan,
193 /// needed for generating the output IR.
194 struct VPTransformState {
VPTransformStateVPTransformState195 VPTransformState(ElementCount VF, unsigned UF, LoopInfo *LI,
196 DominatorTree *DT, IRBuilder<> &Builder,
197 InnerLoopVectorizer *ILV, VPlan *Plan)
198 : VF(VF), UF(UF), Instance(), LI(LI), DT(DT), Builder(Builder), ILV(ILV),
199 Plan(Plan) {}
200
201 /// The chosen Vectorization and Unroll Factors of the loop being vectorized.
202 ElementCount VF;
203 unsigned UF;
204
205 /// Hold the indices to generate specific scalar instructions. Null indicates
206 /// that all instances are to be generated, using either scalar or vector
207 /// instructions.
208 Optional<VPIteration> Instance;
209
210 struct DataState {
211 /// A type for vectorized values in the new loop. Each value from the
212 /// original loop, when vectorized, is represented by UF vector values in
213 /// the new unrolled loop, where UF is the unroll factor.
214 typedef SmallVector<Value *, 2> PerPartValuesTy;
215
216 DenseMap<VPValue *, PerPartValuesTy> PerPartOutput;
217
218 using ScalarsPerPartValuesTy = SmallVector<SmallVector<Value *, 4>, 2>;
219 DenseMap<VPValue *, ScalarsPerPartValuesTy> PerPartScalars;
220 } Data;
221
222 /// Get the generated Value for a given VPValue and a given Part. Note that
223 /// as some Defs are still created by ILV and managed in its ValueMap, this
224 /// method will delegate the call to ILV in such cases in order to provide
225 /// callers a consistent API.
226 /// \see set.
227 Value *get(VPValue *Def, unsigned Part);
228
229 /// Get the generated Value for a given VPValue and given Part and Lane.
230 Value *get(VPValue *Def, const VPIteration &Instance);
231
hasVectorValueVPTransformState232 bool hasVectorValue(VPValue *Def, unsigned Part) {
233 auto I = Data.PerPartOutput.find(Def);
234 return I != Data.PerPartOutput.end() && Part < I->second.size() &&
235 I->second[Part];
236 }
237
hasAnyVectorValueVPTransformState238 bool hasAnyVectorValue(VPValue *Def) const {
239 return Data.PerPartOutput.find(Def) != Data.PerPartOutput.end();
240 }
241
hasScalarValueVPTransformState242 bool hasScalarValue(VPValue *Def, VPIteration Instance) {
243 auto I = Data.PerPartScalars.find(Def);
244 if (I == Data.PerPartScalars.end())
245 return false;
246 unsigned CacheIdx = Instance.Lane.mapToCacheIndex(VF);
247 return Instance.Part < I->second.size() &&
248 CacheIdx < I->second[Instance.Part].size() &&
249 I->second[Instance.Part][CacheIdx];
250 }
251
252 /// Set the generated Value for a given VPValue and a given Part.
setVPTransformState253 void set(VPValue *Def, Value *V, unsigned Part) {
254 if (!Data.PerPartOutput.count(Def)) {
255 DataState::PerPartValuesTy Entry(UF);
256 Data.PerPartOutput[Def] = Entry;
257 }
258 Data.PerPartOutput[Def][Part] = V;
259 }
260 /// Reset an existing vector value for \p Def and a given \p Part.
resetVPTransformState261 void reset(VPValue *Def, Value *V, unsigned Part) {
262 auto Iter = Data.PerPartOutput.find(Def);
263 assert(Iter != Data.PerPartOutput.end() &&
264 "need to overwrite existing value");
265 Iter->second[Part] = V;
266 }
267
268 /// Set the generated scalar \p V for \p Def and the given \p Instance.
setVPTransformState269 void set(VPValue *Def, Value *V, const VPIteration &Instance) {
270 auto Iter = Data.PerPartScalars.insert({Def, {}});
271 auto &PerPartVec = Iter.first->second;
272 while (PerPartVec.size() <= Instance.Part)
273 PerPartVec.emplace_back();
274 auto &Scalars = PerPartVec[Instance.Part];
275 unsigned CacheIdx = Instance.Lane.mapToCacheIndex(VF);
276 while (Scalars.size() <= CacheIdx)
277 Scalars.push_back(nullptr);
278 assert(!Scalars[CacheIdx] && "should overwrite existing value");
279 Scalars[CacheIdx] = V;
280 }
281
282 /// Reset an existing scalar value for \p Def and a given \p Instance.
resetVPTransformState283 void reset(VPValue *Def, Value *V, const VPIteration &Instance) {
284 auto Iter = Data.PerPartScalars.find(Def);
285 assert(Iter != Data.PerPartScalars.end() &&
286 "need to overwrite existing value");
287 assert(Instance.Part < Iter->second.size() &&
288 "need to overwrite existing value");
289 unsigned CacheIdx = Instance.Lane.mapToCacheIndex(VF);
290 assert(CacheIdx < Iter->second[Instance.Part].size() &&
291 "need to overwrite existing value");
292 Iter->second[Instance.Part][CacheIdx] = V;
293 }
294
295 /// Hold state information used when constructing the CFG of the output IR,
296 /// traversing the VPBasicBlocks and generating corresponding IR BasicBlocks.
297 struct CFGState {
298 /// The previous VPBasicBlock visited. Initially set to null.
299 VPBasicBlock *PrevVPBB = nullptr;
300
301 /// The previous IR BasicBlock created or used. Initially set to the new
302 /// header BasicBlock.
303 BasicBlock *PrevBB = nullptr;
304
305 /// The last IR BasicBlock in the output IR. Set to the new latch
306 /// BasicBlock, used for placing the newly created BasicBlocks.
307 BasicBlock *LastBB = nullptr;
308
309 /// The IR BasicBlock that is the preheader of the vector loop in the output
310 /// IR.
311 /// FIXME: The vector preheader should also be modeled in VPlan, so any code
312 /// that needs to be added to the preheader gets directly generated by
313 /// VPlan. There should be no need to manage a pointer to the IR BasicBlock.
314 BasicBlock *VectorPreHeader = nullptr;
315
316 /// A mapping of each VPBasicBlock to the corresponding BasicBlock. In case
317 /// of replication, maps the BasicBlock of the last replica created.
318 SmallDenseMap<VPBasicBlock *, BasicBlock *> VPBB2IRBB;
319
320 /// Vector of VPBasicBlocks whose terminator instruction needs to be fixed
321 /// up at the end of vector code generation.
322 SmallVector<VPBasicBlock *, 8> VPBBsToFix;
323
324 CFGState() = default;
325 } CFG;
326
327 /// Hold a pointer to LoopInfo to register new basic blocks in the loop.
328 LoopInfo *LI;
329
330 /// Hold a pointer to Dominator Tree to register new basic blocks in the loop.
331 DominatorTree *DT;
332
333 /// Hold a reference to the IRBuilder used to generate output IR code.
334 IRBuilder<> &Builder;
335
336 VPValue2ValueTy VPValue2Value;
337
338 /// Hold the canonical scalar IV of the vector loop (start=0, step=VF*UF).
339 Value *CanonicalIV = nullptr;
340
341 /// Hold the trip count of the scalar loop.
342 Value *TripCount = nullptr;
343
344 /// Hold a pointer to InnerLoopVectorizer to reuse its IR generation methods.
345 InnerLoopVectorizer *ILV;
346
347 /// Pointer to the VPlan code is generated for.
348 VPlan *Plan;
349 };
350
351 /// VPUsers instance used by VPBlockBase to manage CondBit and the block
352 /// predicate. Currently VPBlockUsers are used in VPBlockBase for historical
353 /// reasons, but in the future the only VPUsers should either be recipes or
354 /// live-outs.VPBlockBase uses.
355 struct VPBlockUser : public VPUser {
VPBlockUserVPBlockUser356 VPBlockUser() : VPUser({}, VPUserID::Block) {}
357
getSingleOperandOrNullVPBlockUser358 VPValue *getSingleOperandOrNull() {
359 if (getNumOperands() == 1)
360 return getOperand(0);
361
362 return nullptr;
363 }
getSingleOperandOrNullVPBlockUser364 const VPValue *getSingleOperandOrNull() const {
365 if (getNumOperands() == 1)
366 return getOperand(0);
367
368 return nullptr;
369 }
370
resetSingleOpUserVPBlockUser371 void resetSingleOpUser(VPValue *NewVal) {
372 assert(getNumOperands() <= 1 && "Didn't expect more than one operand!");
373 if (!NewVal) {
374 if (getNumOperands() == 1)
375 removeLastOperand();
376 return;
377 }
378
379 if (getNumOperands() == 1)
380 setOperand(0, NewVal);
381 else
382 addOperand(NewVal);
383 }
384 };
385
386 /// VPBlockBase is the building block of the Hierarchical Control-Flow Graph.
387 /// A VPBlockBase can be either a VPBasicBlock or a VPRegionBlock.
388 class VPBlockBase {
389 friend class VPBlockUtils;
390
391 const unsigned char SubclassID; ///< Subclass identifier (for isa/dyn_cast).
392
393 /// An optional name for the block.
394 std::string Name;
395
396 /// The immediate VPRegionBlock which this VPBlockBase belongs to, or null if
397 /// it is a topmost VPBlockBase.
398 VPRegionBlock *Parent = nullptr;
399
400 /// List of predecessor blocks.
401 SmallVector<VPBlockBase *, 1> Predecessors;
402
403 /// List of successor blocks.
404 SmallVector<VPBlockBase *, 1> Successors;
405
406 /// Successor selector managed by a VPUser. For blocks with zero or one
407 /// successors, there is no operand. Otherwise there is exactly one operand
408 /// which is the branch condition.
409 VPBlockUser CondBitUser;
410
411 /// If the block is predicated, its predicate is stored as an operand of this
412 /// VPUser to maintain the def-use relations. Otherwise there is no operand
413 /// here.
414 VPBlockUser PredicateUser;
415
416 /// VPlan containing the block. Can only be set on the entry block of the
417 /// plan.
418 VPlan *Plan = nullptr;
419
420 /// Add \p Successor as the last successor to this block.
appendSuccessor(VPBlockBase * Successor)421 void appendSuccessor(VPBlockBase *Successor) {
422 assert(Successor && "Cannot add nullptr successor!");
423 Successors.push_back(Successor);
424 }
425
426 /// Add \p Predecessor as the last predecessor to this block.
appendPredecessor(VPBlockBase * Predecessor)427 void appendPredecessor(VPBlockBase *Predecessor) {
428 assert(Predecessor && "Cannot add nullptr predecessor!");
429 Predecessors.push_back(Predecessor);
430 }
431
432 /// Remove \p Predecessor from the predecessors of this block.
removePredecessor(VPBlockBase * Predecessor)433 void removePredecessor(VPBlockBase *Predecessor) {
434 auto Pos = find(Predecessors, Predecessor);
435 assert(Pos && "Predecessor does not exist");
436 Predecessors.erase(Pos);
437 }
438
439 /// Remove \p Successor from the successors of this block.
removeSuccessor(VPBlockBase * Successor)440 void removeSuccessor(VPBlockBase *Successor) {
441 auto Pos = find(Successors, Successor);
442 assert(Pos && "Successor does not exist");
443 Successors.erase(Pos);
444 }
445
446 protected:
VPBlockBase(const unsigned char SC,const std::string & N)447 VPBlockBase(const unsigned char SC, const std::string &N)
448 : SubclassID(SC), Name(N) {}
449
450 public:
451 /// An enumeration for keeping track of the concrete subclass of VPBlockBase
452 /// that are actually instantiated. Values of this enumeration are kept in the
453 /// SubclassID field of the VPBlockBase objects. They are used for concrete
454 /// type identification.
455 using VPBlockTy = enum { VPBasicBlockSC, VPRegionBlockSC };
456
457 using VPBlocksTy = SmallVectorImpl<VPBlockBase *>;
458
459 virtual ~VPBlockBase() = default;
460
getName()461 const std::string &getName() const { return Name; }
462
setName(const Twine & newName)463 void setName(const Twine &newName) { Name = newName.str(); }
464
465 /// \return an ID for the concrete type of this object.
466 /// This is used to implement the classof checks. This should not be used
467 /// for any other purpose, as the values may change as LLVM evolves.
getVPBlockID()468 unsigned getVPBlockID() const { return SubclassID; }
469
getParent()470 VPRegionBlock *getParent() { return Parent; }
getParent()471 const VPRegionBlock *getParent() const { return Parent; }
472
473 /// \return A pointer to the plan containing the current block.
474 VPlan *getPlan();
475 const VPlan *getPlan() const;
476
477 /// Sets the pointer of the plan containing the block. The block must be the
478 /// entry block into the VPlan.
479 void setPlan(VPlan *ParentPlan);
480
setParent(VPRegionBlock * P)481 void setParent(VPRegionBlock *P) { Parent = P; }
482
483 /// \return the VPBasicBlock that is the entry of this VPBlockBase,
484 /// recursively, if the latter is a VPRegionBlock. Otherwise, if this
485 /// VPBlockBase is a VPBasicBlock, it is returned.
486 const VPBasicBlock *getEntryBasicBlock() const;
487 VPBasicBlock *getEntryBasicBlock();
488
489 /// \return the VPBasicBlock that is the exit of this VPBlockBase,
490 /// recursively, if the latter is a VPRegionBlock. Otherwise, if this
491 /// VPBlockBase is a VPBasicBlock, it is returned.
492 const VPBasicBlock *getExitBasicBlock() const;
493 VPBasicBlock *getExitBasicBlock();
494
getSuccessors()495 const VPBlocksTy &getSuccessors() const { return Successors; }
getSuccessors()496 VPBlocksTy &getSuccessors() { return Successors; }
497
getPredecessors()498 const VPBlocksTy &getPredecessors() const { return Predecessors; }
getPredecessors()499 VPBlocksTy &getPredecessors() { return Predecessors; }
500
501 /// \return the successor of this VPBlockBase if it has a single successor.
502 /// Otherwise return a null pointer.
getSingleSuccessor()503 VPBlockBase *getSingleSuccessor() const {
504 return (Successors.size() == 1 ? *Successors.begin() : nullptr);
505 }
506
507 /// \return the predecessor of this VPBlockBase if it has a single
508 /// predecessor. Otherwise return a null pointer.
getSinglePredecessor()509 VPBlockBase *getSinglePredecessor() const {
510 return (Predecessors.size() == 1 ? *Predecessors.begin() : nullptr);
511 }
512
getNumSuccessors()513 size_t getNumSuccessors() const { return Successors.size(); }
getNumPredecessors()514 size_t getNumPredecessors() const { return Predecessors.size(); }
515
516 /// An Enclosing Block of a block B is any block containing B, including B
517 /// itself. \return the closest enclosing block starting from "this", which
518 /// has successors. \return the root enclosing block if all enclosing blocks
519 /// have no successors.
520 VPBlockBase *getEnclosingBlockWithSuccessors();
521
522 /// \return the closest enclosing block starting from "this", which has
523 /// predecessors. \return the root enclosing block if all enclosing blocks
524 /// have no predecessors.
525 VPBlockBase *getEnclosingBlockWithPredecessors();
526
527 /// \return the successors either attached directly to this VPBlockBase or, if
528 /// this VPBlockBase is the exit block of a VPRegionBlock and has no
529 /// successors of its own, search recursively for the first enclosing
530 /// VPRegionBlock that has successors and return them. If no such
531 /// VPRegionBlock exists, return the (empty) successors of the topmost
532 /// VPBlockBase reached.
getHierarchicalSuccessors()533 const VPBlocksTy &getHierarchicalSuccessors() {
534 return getEnclosingBlockWithSuccessors()->getSuccessors();
535 }
536
537 /// \return the hierarchical successor of this VPBlockBase if it has a single
538 /// hierarchical successor. Otherwise return a null pointer.
getSingleHierarchicalSuccessor()539 VPBlockBase *getSingleHierarchicalSuccessor() {
540 return getEnclosingBlockWithSuccessors()->getSingleSuccessor();
541 }
542
543 /// \return the predecessors either attached directly to this VPBlockBase or,
544 /// if this VPBlockBase is the entry block of a VPRegionBlock and has no
545 /// predecessors of its own, search recursively for the first enclosing
546 /// VPRegionBlock that has predecessors and return them. If no such
547 /// VPRegionBlock exists, return the (empty) predecessors of the topmost
548 /// VPBlockBase reached.
getHierarchicalPredecessors()549 const VPBlocksTy &getHierarchicalPredecessors() {
550 return getEnclosingBlockWithPredecessors()->getPredecessors();
551 }
552
553 /// \return the hierarchical predecessor of this VPBlockBase if it has a
554 /// single hierarchical predecessor. Otherwise return a null pointer.
getSingleHierarchicalPredecessor()555 VPBlockBase *getSingleHierarchicalPredecessor() {
556 return getEnclosingBlockWithPredecessors()->getSinglePredecessor();
557 }
558
559 /// \return the condition bit selecting the successor.
560 VPValue *getCondBit();
561 /// \return the condition bit selecting the successor.
562 const VPValue *getCondBit() const;
563 /// Set the condition bit selecting the successor.
564 void setCondBit(VPValue *CV);
565
566 /// \return the block's predicate.
567 VPValue *getPredicate();
568 /// \return the block's predicate.
569 const VPValue *getPredicate() const;
570 /// Set the block's predicate.
571 void setPredicate(VPValue *Pred);
572
573 /// Set a given VPBlockBase \p Successor as the single successor of this
574 /// VPBlockBase. This VPBlockBase is not added as predecessor of \p Successor.
575 /// This VPBlockBase must have no successors.
setOneSuccessor(VPBlockBase * Successor)576 void setOneSuccessor(VPBlockBase *Successor) {
577 assert(Successors.empty() && "Setting one successor when others exist.");
578 appendSuccessor(Successor);
579 }
580
581 /// Set two given VPBlockBases \p IfTrue and \p IfFalse to be the two
582 /// successors of this VPBlockBase. \p Condition is set as the successor
583 /// selector. This VPBlockBase is not added as predecessor of \p IfTrue or \p
584 /// IfFalse. This VPBlockBase must have no successors.
setTwoSuccessors(VPBlockBase * IfTrue,VPBlockBase * IfFalse,VPValue * Condition)585 void setTwoSuccessors(VPBlockBase *IfTrue, VPBlockBase *IfFalse,
586 VPValue *Condition) {
587 assert(Successors.empty() && "Setting two successors when others exist.");
588 assert(Condition && "Setting two successors without condition!");
589 setCondBit(Condition);
590 appendSuccessor(IfTrue);
591 appendSuccessor(IfFalse);
592 }
593
594 /// Set each VPBasicBlock in \p NewPreds as predecessor of this VPBlockBase.
595 /// This VPBlockBase must have no predecessors. This VPBlockBase is not added
596 /// as successor of any VPBasicBlock in \p NewPreds.
setPredecessors(ArrayRef<VPBlockBase * > NewPreds)597 void setPredecessors(ArrayRef<VPBlockBase *> NewPreds) {
598 assert(Predecessors.empty() && "Block predecessors already set.");
599 for (auto *Pred : NewPreds)
600 appendPredecessor(Pred);
601 }
602
603 /// Remove all the predecessor of this block.
clearPredecessors()604 void clearPredecessors() { Predecessors.clear(); }
605
606 /// Remove all the successors of this block and set to null its condition bit
clearSuccessors()607 void clearSuccessors() {
608 Successors.clear();
609 setCondBit(nullptr);
610 }
611
612 /// The method which generates the output IR that correspond to this
613 /// VPBlockBase, thereby "executing" the VPlan.
614 virtual void execute(struct VPTransformState *State) = 0;
615
616 /// Delete all blocks reachable from a given VPBlockBase, inclusive.
617 static void deleteCFG(VPBlockBase *Entry);
618
619 /// Return true if it is legal to hoist instructions into this block.
isLegalToHoistInto()620 bool isLegalToHoistInto() {
621 // There are currently no constraints that prevent an instruction to be
622 // hoisted into a VPBlockBase.
623 return true;
624 }
625
626 /// Replace all operands of VPUsers in the block with \p NewValue and also
627 /// replaces all uses of VPValues defined in the block with NewValue.
628 virtual void dropAllReferences(VPValue *NewValue) = 0;
629
630 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
printAsOperand(raw_ostream & OS,bool PrintType)631 void printAsOperand(raw_ostream &OS, bool PrintType) const {
632 OS << getName();
633 }
634
635 /// Print plain-text dump of this VPBlockBase to \p O, prefixing all lines
636 /// with \p Indent. \p SlotTracker is used to print unnamed VPValue's using
637 /// consequtive numbers.
638 ///
639 /// Note that the numbering is applied to the whole VPlan, so printing
640 /// individual blocks is consistent with the whole VPlan printing.
641 virtual void print(raw_ostream &O, const Twine &Indent,
642 VPSlotTracker &SlotTracker) const = 0;
643
644 /// Print plain-text dump of this VPlan to \p O.
print(raw_ostream & O)645 void print(raw_ostream &O) const {
646 VPSlotTracker SlotTracker(getPlan());
647 print(O, "", SlotTracker);
648 }
649
650 /// Print the successors of this block to \p O, prefixing all lines with \p
651 /// Indent.
652 void printSuccessors(raw_ostream &O, const Twine &Indent) const;
653
654 /// Dump this VPBlockBase to dbgs().
dump()655 LLVM_DUMP_METHOD void dump() const { print(dbgs()); }
656 #endif
657 };
658
659 /// VPRecipeBase is a base class modeling a sequence of one or more output IR
660 /// instructions. VPRecipeBase owns the the VPValues it defines through VPDef
661 /// and is responsible for deleting its defined values. Single-value
662 /// VPRecipeBases that also inherit from VPValue must make sure to inherit from
663 /// VPRecipeBase before VPValue.
664 class VPRecipeBase : public ilist_node_with_parent<VPRecipeBase, VPBasicBlock>,
665 public VPDef,
666 public VPUser {
667 friend VPBasicBlock;
668 friend class VPBlockUtils;
669
670 /// Each VPRecipe belongs to a single VPBasicBlock.
671 VPBasicBlock *Parent = nullptr;
672
673 public:
VPRecipeBase(const unsigned char SC,ArrayRef<VPValue * > Operands)674 VPRecipeBase(const unsigned char SC, ArrayRef<VPValue *> Operands)
675 : VPDef(SC), VPUser(Operands, VPUser::VPUserID::Recipe) {}
676
677 template <typename IterT>
VPRecipeBase(const unsigned char SC,iterator_range<IterT> Operands)678 VPRecipeBase(const unsigned char SC, iterator_range<IterT> Operands)
679 : VPDef(SC), VPUser(Operands, VPUser::VPUserID::Recipe) {}
680 virtual ~VPRecipeBase() = default;
681
682 /// \return the VPBasicBlock which this VPRecipe belongs to.
getParent()683 VPBasicBlock *getParent() { return Parent; }
getParent()684 const VPBasicBlock *getParent() const { return Parent; }
685
686 /// The method which generates the output IR instructions that correspond to
687 /// this VPRecipe, thereby "executing" the VPlan.
688 virtual void execute(struct VPTransformState &State) = 0;
689
690 /// Insert an unlinked recipe into a basic block immediately before
691 /// the specified recipe.
692 void insertBefore(VPRecipeBase *InsertPos);
693
694 /// Insert an unlinked Recipe into a basic block immediately after
695 /// the specified Recipe.
696 void insertAfter(VPRecipeBase *InsertPos);
697
698 /// Unlink this recipe from its current VPBasicBlock and insert it into
699 /// the VPBasicBlock that MovePos lives in, right after MovePos.
700 void moveAfter(VPRecipeBase *MovePos);
701
702 /// Unlink this recipe and insert into BB before I.
703 ///
704 /// \pre I is a valid iterator into BB.
705 void moveBefore(VPBasicBlock &BB, iplist<VPRecipeBase>::iterator I);
706
707 /// This method unlinks 'this' from the containing basic block, but does not
708 /// delete it.
709 void removeFromParent();
710
711 /// This method unlinks 'this' from the containing basic block and deletes it.
712 ///
713 /// \returns an iterator pointing to the element after the erased one
714 iplist<VPRecipeBase>::iterator eraseFromParent();
715
716 /// Returns the underlying instruction, if the recipe is a VPValue or nullptr
717 /// otherwise.
getUnderlyingInstr()718 Instruction *getUnderlyingInstr() {
719 return cast<Instruction>(getVPSingleValue()->getUnderlyingValue());
720 }
getUnderlyingInstr()721 const Instruction *getUnderlyingInstr() const {
722 return cast<Instruction>(getVPSingleValue()->getUnderlyingValue());
723 }
724
725 /// Method to support type inquiry through isa, cast, and dyn_cast.
classof(const VPDef * D)726 static inline bool classof(const VPDef *D) {
727 // All VPDefs are also VPRecipeBases.
728 return true;
729 }
730
classof(const VPUser * U)731 static inline bool classof(const VPUser *U) {
732 return U->getVPUserID() == VPUser::VPUserID::Recipe;
733 }
734
735 /// Returns true if the recipe may have side-effects.
736 bool mayHaveSideEffects() const;
737
738 /// Returns true for PHI-like recipes.
isPhi()739 bool isPhi() const {
740 return getVPDefID() >= VPFirstPHISC && getVPDefID() <= VPLastPHISC;
741 }
742
743 /// Returns true if the recipe may read from memory.
744 bool mayReadFromMemory() const;
745
746 /// Returns true if the recipe may write to memory.
747 bool mayWriteToMemory() const;
748
749 /// Returns true if the recipe may read from or write to memory.
mayReadOrWriteMemory()750 bool mayReadOrWriteMemory() const {
751 return mayReadFromMemory() || mayWriteToMemory();
752 }
753 };
754
classof(const VPDef * Def)755 inline bool VPUser::classof(const VPDef *Def) {
756 return Def->getVPDefID() == VPRecipeBase::VPInstructionSC ||
757 Def->getVPDefID() == VPRecipeBase::VPWidenSC ||
758 Def->getVPDefID() == VPRecipeBase::VPWidenCallSC ||
759 Def->getVPDefID() == VPRecipeBase::VPWidenSelectSC ||
760 Def->getVPDefID() == VPRecipeBase::VPWidenGEPSC ||
761 Def->getVPDefID() == VPRecipeBase::VPBlendSC ||
762 Def->getVPDefID() == VPRecipeBase::VPInterleaveSC ||
763 Def->getVPDefID() == VPRecipeBase::VPReplicateSC ||
764 Def->getVPDefID() == VPRecipeBase::VPReductionSC ||
765 Def->getVPDefID() == VPRecipeBase::VPBranchOnMaskSC ||
766 Def->getVPDefID() == VPRecipeBase::VPWidenMemoryInstructionSC;
767 }
768
769 /// This is a concrete Recipe that models a single VPlan-level instruction.
770 /// While as any Recipe it may generate a sequence of IR instructions when
771 /// executed, these instructions would always form a single-def expression as
772 /// the VPInstruction is also a single def-use vertex.
773 class VPInstruction : public VPRecipeBase, public VPValue {
774 friend class VPlanSlp;
775
776 public:
777 /// VPlan opcodes, extending LLVM IR with idiomatics instructions.
778 enum {
779 FirstOrderRecurrenceSplice =
780 Instruction::OtherOpsEnd + 1, // Combines the incoming and previous
781 // values of a first-order recurrence.
782 Not,
783 ICmpULE,
784 SLPLoad,
785 SLPStore,
786 ActiveLaneMask,
787 };
788
789 private:
790 typedef unsigned char OpcodeTy;
791 OpcodeTy Opcode;
792
793 /// Utility method serving execute(): generates a single instance of the
794 /// modeled instruction.
795 void generateInstruction(VPTransformState &State, unsigned Part);
796
797 protected:
setUnderlyingInstr(Instruction * I)798 void setUnderlyingInstr(Instruction *I) { setUnderlyingValue(I); }
799
800 public:
VPInstruction(unsigned Opcode,ArrayRef<VPValue * > Operands)801 VPInstruction(unsigned Opcode, ArrayRef<VPValue *> Operands)
802 : VPRecipeBase(VPRecipeBase::VPInstructionSC, Operands),
803 VPValue(VPValue::VPVInstructionSC, nullptr, this), Opcode(Opcode) {}
804
VPInstruction(unsigned Opcode,ArrayRef<VPInstruction * > Operands)805 VPInstruction(unsigned Opcode, ArrayRef<VPInstruction *> Operands)
806 : VPRecipeBase(VPRecipeBase::VPInstructionSC, {}),
807 VPValue(VPValue::VPVInstructionSC, nullptr, this), Opcode(Opcode) {
808 for (auto *I : Operands)
809 addOperand(I->getVPSingleValue());
810 }
811
VPInstruction(unsigned Opcode,std::initializer_list<VPValue * > Operands)812 VPInstruction(unsigned Opcode, std::initializer_list<VPValue *> Operands)
813 : VPInstruction(Opcode, ArrayRef<VPValue *>(Operands)) {}
814
815 /// Method to support type inquiry through isa, cast, and dyn_cast.
classof(const VPValue * V)816 static inline bool classof(const VPValue *V) {
817 return V->getVPValueID() == VPValue::VPVInstructionSC;
818 }
819
clone()820 VPInstruction *clone() const {
821 SmallVector<VPValue *, 2> Operands(operands());
822 return new VPInstruction(Opcode, Operands);
823 }
824
825 /// Method to support type inquiry through isa, cast, and dyn_cast.
classof(const VPDef * R)826 static inline bool classof(const VPDef *R) {
827 return R->getVPDefID() == VPRecipeBase::VPInstructionSC;
828 }
829
getOpcode()830 unsigned getOpcode() const { return Opcode; }
831
832 /// Generate the instruction.
833 /// TODO: We currently execute only per-part unless a specific instance is
834 /// provided.
835 void execute(VPTransformState &State) override;
836
837 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
838 /// Print the VPInstruction to \p O.
839 void print(raw_ostream &O, const Twine &Indent,
840 VPSlotTracker &SlotTracker) const override;
841
842 /// Print the VPInstruction to dbgs() (for debugging).
843 LLVM_DUMP_METHOD void dump() const;
844 #endif
845
846 /// Return true if this instruction may modify memory.
mayWriteToMemory()847 bool mayWriteToMemory() const {
848 // TODO: we can use attributes of the called function to rule out memory
849 // modifications.
850 return Opcode == Instruction::Store || Opcode == Instruction::Call ||
851 Opcode == Instruction::Invoke || Opcode == SLPStore;
852 }
853
hasResult()854 bool hasResult() const {
855 // CallInst may or may not have a result, depending on the called function.
856 // Conservatively return calls have results for now.
857 switch (getOpcode()) {
858 case Instruction::Ret:
859 case Instruction::Br:
860 case Instruction::Store:
861 case Instruction::Switch:
862 case Instruction::IndirectBr:
863 case Instruction::Resume:
864 case Instruction::CatchRet:
865 case Instruction::Unreachable:
866 case Instruction::Fence:
867 case Instruction::AtomicRMW:
868 return false;
869 default:
870 return true;
871 }
872 }
873 };
874
875 /// VPWidenRecipe is a recipe for producing a copy of vector type its
876 /// ingredient. This recipe covers most of the traditional vectorization cases
877 /// where each ingredient transforms into a vectorized version of itself.
878 class VPWidenRecipe : public VPRecipeBase, public VPValue {
879 public:
880 template <typename IterT>
VPWidenRecipe(Instruction & I,iterator_range<IterT> Operands)881 VPWidenRecipe(Instruction &I, iterator_range<IterT> Operands)
882 : VPRecipeBase(VPRecipeBase::VPWidenSC, Operands),
883 VPValue(VPValue::VPVWidenSC, &I, this) {}
884
885 ~VPWidenRecipe() override = default;
886
887 /// Method to support type inquiry through isa, cast, and dyn_cast.
classof(const VPDef * D)888 static inline bool classof(const VPDef *D) {
889 return D->getVPDefID() == VPRecipeBase::VPWidenSC;
890 }
classof(const VPValue * V)891 static inline bool classof(const VPValue *V) {
892 return V->getVPValueID() == VPValue::VPVWidenSC;
893 }
894
895 /// Produce widened copies of all Ingredients.
896 void execute(VPTransformState &State) override;
897
898 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
899 /// Print the recipe.
900 void print(raw_ostream &O, const Twine &Indent,
901 VPSlotTracker &SlotTracker) const override;
902 #endif
903 };
904
905 /// A recipe for widening Call instructions.
906 class VPWidenCallRecipe : public VPRecipeBase, public VPValue {
907
908 public:
909 template <typename IterT>
VPWidenCallRecipe(CallInst & I,iterator_range<IterT> CallArguments)910 VPWidenCallRecipe(CallInst &I, iterator_range<IterT> CallArguments)
911 : VPRecipeBase(VPRecipeBase::VPWidenCallSC, CallArguments),
912 VPValue(VPValue::VPVWidenCallSC, &I, this) {}
913
914 ~VPWidenCallRecipe() override = default;
915
916 /// Method to support type inquiry through isa, cast, and dyn_cast.
classof(const VPDef * D)917 static inline bool classof(const VPDef *D) {
918 return D->getVPDefID() == VPRecipeBase::VPWidenCallSC;
919 }
920
921 /// Produce a widened version of the call instruction.
922 void execute(VPTransformState &State) override;
923
924 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
925 /// Print the recipe.
926 void print(raw_ostream &O, const Twine &Indent,
927 VPSlotTracker &SlotTracker) const override;
928 #endif
929 };
930
931 /// A recipe for widening select instructions.
932 class VPWidenSelectRecipe : public VPRecipeBase, public VPValue {
933
934 /// Is the condition of the select loop invariant?
935 bool InvariantCond;
936
937 public:
938 template <typename IterT>
VPWidenSelectRecipe(SelectInst & I,iterator_range<IterT> Operands,bool InvariantCond)939 VPWidenSelectRecipe(SelectInst &I, iterator_range<IterT> Operands,
940 bool InvariantCond)
941 : VPRecipeBase(VPRecipeBase::VPWidenSelectSC, Operands),
942 VPValue(VPValue::VPVWidenSelectSC, &I, this),
943 InvariantCond(InvariantCond) {}
944
945 ~VPWidenSelectRecipe() override = default;
946
947 /// Method to support type inquiry through isa, cast, and dyn_cast.
classof(const VPDef * D)948 static inline bool classof(const VPDef *D) {
949 return D->getVPDefID() == VPRecipeBase::VPWidenSelectSC;
950 }
951
952 /// Produce a widened version of the select instruction.
953 void execute(VPTransformState &State) override;
954
955 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
956 /// Print the recipe.
957 void print(raw_ostream &O, const Twine &Indent,
958 VPSlotTracker &SlotTracker) const override;
959 #endif
960 };
961
962 /// A recipe for handling GEP instructions.
963 class VPWidenGEPRecipe : public VPRecipeBase, public VPValue {
964 bool IsPtrLoopInvariant;
965 SmallBitVector IsIndexLoopInvariant;
966
967 public:
968 template <typename IterT>
VPWidenGEPRecipe(GetElementPtrInst * GEP,iterator_range<IterT> Operands)969 VPWidenGEPRecipe(GetElementPtrInst *GEP, iterator_range<IterT> Operands)
970 : VPRecipeBase(VPRecipeBase::VPWidenGEPSC, Operands),
971 VPValue(VPWidenGEPSC, GEP, this),
972 IsIndexLoopInvariant(GEP->getNumIndices(), false) {}
973
974 template <typename IterT>
VPWidenGEPRecipe(GetElementPtrInst * GEP,iterator_range<IterT> Operands,Loop * OrigLoop)975 VPWidenGEPRecipe(GetElementPtrInst *GEP, iterator_range<IterT> Operands,
976 Loop *OrigLoop)
977 : VPRecipeBase(VPRecipeBase::VPWidenGEPSC, Operands),
978 VPValue(VPValue::VPVWidenGEPSC, GEP, this),
979 IsIndexLoopInvariant(GEP->getNumIndices(), false) {
980 IsPtrLoopInvariant = OrigLoop->isLoopInvariant(GEP->getPointerOperand());
981 for (auto Index : enumerate(GEP->indices()))
982 IsIndexLoopInvariant[Index.index()] =
983 OrigLoop->isLoopInvariant(Index.value().get());
984 }
985 ~VPWidenGEPRecipe() override = default;
986
987 /// Method to support type inquiry through isa, cast, and dyn_cast.
classof(const VPDef * D)988 static inline bool classof(const VPDef *D) {
989 return D->getVPDefID() == VPRecipeBase::VPWidenGEPSC;
990 }
991
992 /// Generate the gep nodes.
993 void execute(VPTransformState &State) override;
994
995 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
996 /// Print the recipe.
997 void print(raw_ostream &O, const Twine &Indent,
998 VPSlotTracker &SlotTracker) const override;
999 #endif
1000 };
1001
1002 /// A recipe for handling phi nodes of integer and floating-point inductions,
1003 /// producing their vector and scalar values.
1004 class VPWidenIntOrFpInductionRecipe : public VPRecipeBase {
1005 PHINode *IV;
1006
1007 public:
1008 VPWidenIntOrFpInductionRecipe(PHINode *IV, VPValue *Start, Instruction *Cast,
1009 TruncInst *Trunc = nullptr)
1010 : VPRecipeBase(VPWidenIntOrFpInductionSC, {Start}), IV(IV) {
1011 if (Trunc)
1012 new VPValue(Trunc, this);
1013 else
1014 new VPValue(IV, this);
1015
1016 if (Cast)
1017 new VPValue(Cast, this);
1018 }
1019 ~VPWidenIntOrFpInductionRecipe() override = default;
1020
1021 /// Method to support type inquiry through isa, cast, and dyn_cast.
classof(const VPDef * D)1022 static inline bool classof(const VPDef *D) {
1023 return D->getVPDefID() == VPRecipeBase::VPWidenIntOrFpInductionSC;
1024 }
1025
1026 /// Generate the vectorized and scalarized versions of the phi node as
1027 /// needed by their users.
1028 void execute(VPTransformState &State) override;
1029
1030 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1031 /// Print the recipe.
1032 void print(raw_ostream &O, const Twine &Indent,
1033 VPSlotTracker &SlotTracker) const override;
1034 #endif
1035
1036 /// Returns the start value of the induction.
getStartValue()1037 VPValue *getStartValue() { return getOperand(0); }
1038
1039 /// Returns the cast VPValue, if one is attached, or nullptr otherwise.
getCastValue()1040 VPValue *getCastValue() {
1041 if (getNumDefinedValues() != 2)
1042 return nullptr;
1043 return getVPValue(1);
1044 }
1045
1046 /// Returns the first defined value as TruncInst, if it is one or nullptr
1047 /// otherwise.
getTruncInst()1048 TruncInst *getTruncInst() {
1049 return dyn_cast_or_null<TruncInst>(getVPValue(0)->getUnderlyingValue());
1050 }
getTruncInst()1051 const TruncInst *getTruncInst() const {
1052 return dyn_cast_or_null<TruncInst>(getVPValue(0)->getUnderlyingValue());
1053 }
1054 };
1055
1056 /// A recipe for handling first order recurrences and pointer inductions. For
1057 /// first-order recurrences, the start value is the first operand of the recipe
1058 /// and the incoming value from the backedge is the second operand. It also
1059 /// serves as base class for VPReductionPHIRecipe. In the VPlan native path, all
1060 /// incoming VPValues & VPBasicBlock pairs are managed in the recipe directly.
1061 class VPWidenPHIRecipe : public VPRecipeBase, public VPValue {
1062 /// List of incoming blocks. Only used in the VPlan native path.
1063 SmallVector<VPBasicBlock *, 2> IncomingBlocks;
1064
1065 protected:
1066 VPWidenPHIRecipe(unsigned char VPVID, unsigned char VPDefID, PHINode *Phi,
1067 VPValue *Start = nullptr)
1068 : VPRecipeBase(VPDefID, {}), VPValue(VPVID, Phi, this) {
1069 if (Start)
1070 addOperand(Start);
1071 }
1072
1073 public:
1074 /// Create a VPWidenPHIRecipe for \p Phi
VPWidenPHIRecipe(PHINode * Phi)1075 VPWidenPHIRecipe(PHINode *Phi)
1076 : VPWidenPHIRecipe(VPVWidenPHISC, VPWidenPHISC, Phi) {}
1077
1078 /// Create a new VPWidenPHIRecipe for \p Phi with start value \p Start.
VPWidenPHIRecipe(PHINode * Phi,VPValue & Start)1079 VPWidenPHIRecipe(PHINode *Phi, VPValue &Start) : VPWidenPHIRecipe(Phi) {
1080 addOperand(&Start);
1081 }
1082
1083 ~VPWidenPHIRecipe() override = default;
1084
1085 /// Method to support type inquiry through isa, cast, and dyn_cast.
classof(const VPRecipeBase * B)1086 static inline bool classof(const VPRecipeBase *B) {
1087 return B->getVPDefID() == VPRecipeBase::VPWidenPHISC ||
1088 B->getVPDefID() == VPRecipeBase::VPFirstOrderRecurrencePHISC ||
1089 B->getVPDefID() == VPRecipeBase::VPReductionPHISC;
1090 }
classof(const VPValue * V)1091 static inline bool classof(const VPValue *V) {
1092 return V->getVPValueID() == VPValue::VPVWidenPHISC ||
1093 V->getVPValueID() == VPValue::VPVFirstOrderRecurrencePHISC ||
1094 V->getVPValueID() == VPValue::VPVReductionPHISC;
1095 }
1096
1097 /// Generate the phi/select nodes.
1098 void execute(VPTransformState &State) override;
1099
1100 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1101 /// Print the recipe.
1102 void print(raw_ostream &O, const Twine &Indent,
1103 VPSlotTracker &SlotTracker) const override;
1104 #endif
1105
1106 /// Returns the start value of the phi, if it is a reduction or first-order
1107 /// recurrence.
getStartValue()1108 VPValue *getStartValue() {
1109 return getNumOperands() == 0 ? nullptr : getOperand(0);
1110 }
1111
1112 /// Returns the incoming value from the loop backedge, if it is a reduction or
1113 /// first-order recurrence.
getBackedgeValue()1114 VPValue *getBackedgeValue() {
1115 return getOperand(1);
1116 }
1117
1118 /// Returns the backedge value as a recipe. The backedge value is guaranteed
1119 /// to be a recipe.
getBackedgeRecipe()1120 VPRecipeBase *getBackedgeRecipe() {
1121 return cast<VPRecipeBase>(getBackedgeValue()->getDef());
1122 }
1123
1124 /// Adds a pair (\p IncomingV, \p IncomingBlock) to the phi.
addIncoming(VPValue * IncomingV,VPBasicBlock * IncomingBlock)1125 void addIncoming(VPValue *IncomingV, VPBasicBlock *IncomingBlock) {
1126 addOperand(IncomingV);
1127 IncomingBlocks.push_back(IncomingBlock);
1128 }
1129
1130 /// Returns the \p I th incoming VPValue.
getIncomingValue(unsigned I)1131 VPValue *getIncomingValue(unsigned I) { return getOperand(I); }
1132
1133 /// Returns the \p I th incoming VPBasicBlock.
getIncomingBlock(unsigned I)1134 VPBasicBlock *getIncomingBlock(unsigned I) { return IncomingBlocks[I]; }
1135 };
1136
1137 /// A recipe for handling first-order recurrence phis. The start value is the
1138 /// first operand of the recipe and the incoming value from the backedge is the
1139 /// second operand.
1140 struct VPFirstOrderRecurrencePHIRecipe : public VPWidenPHIRecipe {
VPFirstOrderRecurrencePHIRecipeVPFirstOrderRecurrencePHIRecipe1141 VPFirstOrderRecurrencePHIRecipe(PHINode *Phi, VPValue &Start)
1142 : VPWidenPHIRecipe(VPVFirstOrderRecurrencePHISC,
1143 VPFirstOrderRecurrencePHISC, Phi, &Start) {}
1144
1145 /// Method to support type inquiry through isa, cast, and dyn_cast.
classofVPFirstOrderRecurrencePHIRecipe1146 static inline bool classof(const VPRecipeBase *R) {
1147 return R->getVPDefID() == VPRecipeBase::VPFirstOrderRecurrencePHISC;
1148 }
classofVPFirstOrderRecurrencePHIRecipe1149 static inline bool classof(const VPWidenPHIRecipe *D) {
1150 return D->getVPDefID() == VPRecipeBase::VPFirstOrderRecurrencePHISC;
1151 }
classofVPFirstOrderRecurrencePHIRecipe1152 static inline bool classof(const VPValue *V) {
1153 return V->getVPValueID() == VPValue::VPVFirstOrderRecurrencePHISC;
1154 }
1155
1156 void execute(VPTransformState &State) override;
1157
1158 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1159 /// Print the recipe.
1160 void print(raw_ostream &O, const Twine &Indent,
1161 VPSlotTracker &SlotTracker) const override;
1162 #endif
1163 };
1164
1165 /// A recipe for handling reduction phis. The start value is the first operand
1166 /// of the recipe and the incoming value from the backedge is the second
1167 /// operand.
1168 class VPReductionPHIRecipe : public VPWidenPHIRecipe {
1169 /// Descriptor for the reduction.
1170 RecurrenceDescriptor &RdxDesc;
1171
1172 /// The phi is part of an in-loop reduction.
1173 bool IsInLoop;
1174
1175 /// The phi is part of an ordered reduction. Requires IsInLoop to be true.
1176 bool IsOrdered;
1177
1178 public:
1179 /// Create a new VPReductionPHIRecipe for the reduction \p Phi described by \p
1180 /// RdxDesc.
1181 VPReductionPHIRecipe(PHINode *Phi, RecurrenceDescriptor &RdxDesc,
1182 VPValue &Start, bool IsInLoop = false,
1183 bool IsOrdered = false)
1184 : VPWidenPHIRecipe(VPVReductionPHISC, VPReductionPHISC, Phi, &Start),
1185 RdxDesc(RdxDesc), IsInLoop(IsInLoop), IsOrdered(IsOrdered) {
1186 assert((!IsOrdered || IsInLoop) && "IsOrdered requires IsInLoop");
1187 }
1188
1189 ~VPReductionPHIRecipe() override = default;
1190
1191 /// Method to support type inquiry through isa, cast, and dyn_cast.
classof(const VPRecipeBase * R)1192 static inline bool classof(const VPRecipeBase *R) {
1193 return R->getVPDefID() == VPRecipeBase::VPReductionPHISC;
1194 }
classof(const VPValue * V)1195 static inline bool classof(const VPValue *V) {
1196 return V->getVPValueID() == VPValue::VPVReductionPHISC;
1197 }
classof(const VPWidenPHIRecipe * R)1198 static inline bool classof(const VPWidenPHIRecipe *R) {
1199 return R->getVPDefID() == VPRecipeBase::VPReductionPHISC;
1200 }
1201
1202 /// Generate the phi/select nodes.
1203 void execute(VPTransformState &State) override;
1204
1205 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1206 /// Print the recipe.
1207 void print(raw_ostream &O, const Twine &Indent,
1208 VPSlotTracker &SlotTracker) const override;
1209 #endif
1210
getRecurrenceDescriptor()1211 RecurrenceDescriptor &getRecurrenceDescriptor() { return RdxDesc; }
1212
1213 /// Returns true, if the phi is part of an ordered reduction.
isOrdered()1214 bool isOrdered() const { return IsOrdered; }
1215
1216 /// Returns true, if the phi is part of an in-loop reduction.
isInLoop()1217 bool isInLoop() const { return IsInLoop; }
1218 };
1219
1220 /// A recipe for vectorizing a phi-node as a sequence of mask-based select
1221 /// instructions.
1222 class VPBlendRecipe : public VPRecipeBase, public VPValue {
1223 PHINode *Phi;
1224
1225 public:
1226 /// The blend operation is a User of the incoming values and of their
1227 /// respective masks, ordered [I0, M0, I1, M1, ...]. Note that a single value
1228 /// might be incoming with a full mask for which there is no VPValue.
VPBlendRecipe(PHINode * Phi,ArrayRef<VPValue * > Operands)1229 VPBlendRecipe(PHINode *Phi, ArrayRef<VPValue *> Operands)
1230 : VPRecipeBase(VPBlendSC, Operands),
1231 VPValue(VPValue::VPVBlendSC, Phi, this), Phi(Phi) {
1232 assert(Operands.size() > 0 &&
1233 ((Operands.size() == 1) || (Operands.size() % 2 == 0)) &&
1234 "Expected either a single incoming value or a positive even number "
1235 "of operands");
1236 }
1237
1238 /// Method to support type inquiry through isa, cast, and dyn_cast.
classof(const VPDef * D)1239 static inline bool classof(const VPDef *D) {
1240 return D->getVPDefID() == VPRecipeBase::VPBlendSC;
1241 }
1242
1243 /// Return the number of incoming values, taking into account that a single
1244 /// incoming value has no mask.
getNumIncomingValues()1245 unsigned getNumIncomingValues() const { return (getNumOperands() + 1) / 2; }
1246
1247 /// Return incoming value number \p Idx.
getIncomingValue(unsigned Idx)1248 VPValue *getIncomingValue(unsigned Idx) const { return getOperand(Idx * 2); }
1249
1250 /// Return mask number \p Idx.
getMask(unsigned Idx)1251 VPValue *getMask(unsigned Idx) const { return getOperand(Idx * 2 + 1); }
1252
1253 /// Generate the phi/select nodes.
1254 void execute(VPTransformState &State) override;
1255
1256 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1257 /// Print the recipe.
1258 void print(raw_ostream &O, const Twine &Indent,
1259 VPSlotTracker &SlotTracker) const override;
1260 #endif
1261 };
1262
1263 /// VPInterleaveRecipe is a recipe for transforming an interleave group of load
1264 /// or stores into one wide load/store and shuffles. The first operand of a
1265 /// VPInterleave recipe is the address, followed by the stored values, followed
1266 /// by an optional mask.
1267 class VPInterleaveRecipe : public VPRecipeBase {
1268 const InterleaveGroup<Instruction> *IG;
1269
1270 bool HasMask = false;
1271
1272 public:
VPInterleaveRecipe(const InterleaveGroup<Instruction> * IG,VPValue * Addr,ArrayRef<VPValue * > StoredValues,VPValue * Mask)1273 VPInterleaveRecipe(const InterleaveGroup<Instruction> *IG, VPValue *Addr,
1274 ArrayRef<VPValue *> StoredValues, VPValue *Mask)
1275 : VPRecipeBase(VPInterleaveSC, {Addr}), IG(IG) {
1276 for (unsigned i = 0; i < IG->getFactor(); ++i)
1277 if (Instruction *I = IG->getMember(i)) {
1278 if (I->getType()->isVoidTy())
1279 continue;
1280 new VPValue(I, this);
1281 }
1282
1283 for (auto *SV : StoredValues)
1284 addOperand(SV);
1285 if (Mask) {
1286 HasMask = true;
1287 addOperand(Mask);
1288 }
1289 }
1290 ~VPInterleaveRecipe() override = default;
1291
1292 /// Method to support type inquiry through isa, cast, and dyn_cast.
classof(const VPDef * D)1293 static inline bool classof(const VPDef *D) {
1294 return D->getVPDefID() == VPRecipeBase::VPInterleaveSC;
1295 }
1296
1297 /// Return the address accessed by this recipe.
getAddr()1298 VPValue *getAddr() const {
1299 return getOperand(0); // Address is the 1st, mandatory operand.
1300 }
1301
1302 /// Return the mask used by this recipe. Note that a full mask is represented
1303 /// by a nullptr.
getMask()1304 VPValue *getMask() const {
1305 // Mask is optional and therefore the last, currently 2nd operand.
1306 return HasMask ? getOperand(getNumOperands() - 1) : nullptr;
1307 }
1308
1309 /// Return the VPValues stored by this interleave group. If it is a load
1310 /// interleave group, return an empty ArrayRef.
getStoredValues()1311 ArrayRef<VPValue *> getStoredValues() const {
1312 // The first operand is the address, followed by the stored values, followed
1313 // by an optional mask.
1314 return ArrayRef<VPValue *>(op_begin(), getNumOperands())
1315 .slice(1, getNumOperands() - (HasMask ? 2 : 1));
1316 }
1317
1318 /// Generate the wide load or store, and shuffles.
1319 void execute(VPTransformState &State) override;
1320
1321 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1322 /// Print the recipe.
1323 void print(raw_ostream &O, const Twine &Indent,
1324 VPSlotTracker &SlotTracker) const override;
1325 #endif
1326
getInterleaveGroup()1327 const InterleaveGroup<Instruction> *getInterleaveGroup() { return IG; }
1328 };
1329
1330 /// A recipe to represent inloop reduction operations, performing a reduction on
1331 /// a vector operand into a scalar value, and adding the result to a chain.
1332 /// The Operands are {ChainOp, VecOp, [Condition]}.
1333 class VPReductionRecipe : public VPRecipeBase, public VPValue {
1334 /// The recurrence decriptor for the reduction in question.
1335 RecurrenceDescriptor *RdxDesc;
1336 /// Pointer to the TTI, needed to create the target reduction
1337 const TargetTransformInfo *TTI;
1338
1339 public:
VPReductionRecipe(RecurrenceDescriptor * R,Instruction * I,VPValue * ChainOp,VPValue * VecOp,VPValue * CondOp,const TargetTransformInfo * TTI)1340 VPReductionRecipe(RecurrenceDescriptor *R, Instruction *I, VPValue *ChainOp,
1341 VPValue *VecOp, VPValue *CondOp,
1342 const TargetTransformInfo *TTI)
1343 : VPRecipeBase(VPRecipeBase::VPReductionSC, {ChainOp, VecOp}),
1344 VPValue(VPValue::VPVReductionSC, I, this), RdxDesc(R), TTI(TTI) {
1345 if (CondOp)
1346 addOperand(CondOp);
1347 }
1348
1349 ~VPReductionRecipe() override = default;
1350
1351 /// Method to support type inquiry through isa, cast, and dyn_cast.
classof(const VPValue * V)1352 static inline bool classof(const VPValue *V) {
1353 return V->getVPValueID() == VPValue::VPVReductionSC;
1354 }
1355
1356 /// Generate the reduction in the loop
1357 void execute(VPTransformState &State) override;
1358
1359 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1360 /// Print the recipe.
1361 void print(raw_ostream &O, const Twine &Indent,
1362 VPSlotTracker &SlotTracker) const override;
1363 #endif
1364
1365 /// The VPValue of the scalar Chain being accumulated.
getChainOp()1366 VPValue *getChainOp() const { return getOperand(0); }
1367 /// The VPValue of the vector value to be reduced.
getVecOp()1368 VPValue *getVecOp() const { return getOperand(1); }
1369 /// The VPValue of the condition for the block.
getCondOp()1370 VPValue *getCondOp() const {
1371 return getNumOperands() > 2 ? getOperand(2) : nullptr;
1372 }
1373 };
1374
1375 /// VPReplicateRecipe replicates a given instruction producing multiple scalar
1376 /// copies of the original scalar type, one per lane, instead of producing a
1377 /// single copy of widened type for all lanes. If the instruction is known to be
1378 /// uniform only one copy, per lane zero, will be generated.
1379 class VPReplicateRecipe : public VPRecipeBase, public VPValue {
1380 /// Indicator if only a single replica per lane is needed.
1381 bool IsUniform;
1382
1383 /// Indicator if the replicas are also predicated.
1384 bool IsPredicated;
1385
1386 /// Indicator if the scalar values should also be packed into a vector.
1387 bool AlsoPack;
1388
1389 public:
1390 template <typename IterT>
1391 VPReplicateRecipe(Instruction *I, iterator_range<IterT> Operands,
1392 bool IsUniform, bool IsPredicated = false)
VPRecipeBase(VPReplicateSC,Operands)1393 : VPRecipeBase(VPReplicateSC, Operands), VPValue(VPVReplicateSC, I, this),
1394 IsUniform(IsUniform), IsPredicated(IsPredicated) {
1395 // Retain the previous behavior of predicateInstructions(), where an
1396 // insert-element of a predicated instruction got hoisted into the
1397 // predicated basic block iff it was its only user. This is achieved by
1398 // having predicated instructions also pack their values into a vector by
1399 // default unless they have a replicated user which uses their scalar value.
1400 AlsoPack = IsPredicated && !I->use_empty();
1401 }
1402
1403 ~VPReplicateRecipe() override = default;
1404
1405 /// Method to support type inquiry through isa, cast, and dyn_cast.
classof(const VPDef * D)1406 static inline bool classof(const VPDef *D) {
1407 return D->getVPDefID() == VPRecipeBase::VPReplicateSC;
1408 }
1409
classof(const VPValue * V)1410 static inline bool classof(const VPValue *V) {
1411 return V->getVPValueID() == VPValue::VPVReplicateSC;
1412 }
1413
1414 /// Generate replicas of the desired Ingredient. Replicas will be generated
1415 /// for all parts and lanes unless a specific part and lane are specified in
1416 /// the \p State.
1417 void execute(VPTransformState &State) override;
1418
setAlsoPack(bool Pack)1419 void setAlsoPack(bool Pack) { AlsoPack = Pack; }
1420
1421 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1422 /// Print the recipe.
1423 void print(raw_ostream &O, const Twine &Indent,
1424 VPSlotTracker &SlotTracker) const override;
1425 #endif
1426
isUniform()1427 bool isUniform() const { return IsUniform; }
1428
isPacked()1429 bool isPacked() const { return AlsoPack; }
1430
isPredicated()1431 bool isPredicated() const { return IsPredicated; }
1432 };
1433
1434 /// A recipe for generating conditional branches on the bits of a mask.
1435 class VPBranchOnMaskRecipe : public VPRecipeBase {
1436 public:
VPBranchOnMaskRecipe(VPValue * BlockInMask)1437 VPBranchOnMaskRecipe(VPValue *BlockInMask)
1438 : VPRecipeBase(VPBranchOnMaskSC, {}) {
1439 if (BlockInMask) // nullptr means all-one mask.
1440 addOperand(BlockInMask);
1441 }
1442
1443 /// Method to support type inquiry through isa, cast, and dyn_cast.
classof(const VPDef * D)1444 static inline bool classof(const VPDef *D) {
1445 return D->getVPDefID() == VPRecipeBase::VPBranchOnMaskSC;
1446 }
1447
1448 /// Generate the extraction of the appropriate bit from the block mask and the
1449 /// conditional branch.
1450 void execute(VPTransformState &State) override;
1451
1452 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1453 /// Print the recipe.
print(raw_ostream & O,const Twine & Indent,VPSlotTracker & SlotTracker)1454 void print(raw_ostream &O, const Twine &Indent,
1455 VPSlotTracker &SlotTracker) const override {
1456 O << Indent << "BRANCH-ON-MASK ";
1457 if (VPValue *Mask = getMask())
1458 Mask->printAsOperand(O, SlotTracker);
1459 else
1460 O << " All-One";
1461 }
1462 #endif
1463
1464 /// Return the mask used by this recipe. Note that a full mask is represented
1465 /// by a nullptr.
getMask()1466 VPValue *getMask() const {
1467 assert(getNumOperands() <= 1 && "should have either 0 or 1 operands");
1468 // Mask is optional.
1469 return getNumOperands() == 1 ? getOperand(0) : nullptr;
1470 }
1471 };
1472
1473 /// VPPredInstPHIRecipe is a recipe for generating the phi nodes needed when
1474 /// control converges back from a Branch-on-Mask. The phi nodes are needed in
1475 /// order to merge values that are set under such a branch and feed their uses.
1476 /// The phi nodes can be scalar or vector depending on the users of the value.
1477 /// This recipe works in concert with VPBranchOnMaskRecipe.
1478 class VPPredInstPHIRecipe : public VPRecipeBase, public VPValue {
1479 public:
1480 /// Construct a VPPredInstPHIRecipe given \p PredInst whose value needs a phi
1481 /// nodes after merging back from a Branch-on-Mask.
VPPredInstPHIRecipe(VPValue * PredV)1482 VPPredInstPHIRecipe(VPValue *PredV)
1483 : VPRecipeBase(VPPredInstPHISC, PredV),
1484 VPValue(VPValue::VPVPredInstPHI, nullptr, this) {}
1485 ~VPPredInstPHIRecipe() override = default;
1486
1487 /// Method to support type inquiry through isa, cast, and dyn_cast.
classof(const VPDef * D)1488 static inline bool classof(const VPDef *D) {
1489 return D->getVPDefID() == VPRecipeBase::VPPredInstPHISC;
1490 }
1491
1492 /// Generates phi nodes for live-outs as needed to retain SSA form.
1493 void execute(VPTransformState &State) override;
1494
1495 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1496 /// Print the recipe.
1497 void print(raw_ostream &O, const Twine &Indent,
1498 VPSlotTracker &SlotTracker) const override;
1499 #endif
1500 };
1501
1502 /// A Recipe for widening load/store operations.
1503 /// The recipe uses the following VPValues:
1504 /// - For load: Address, optional mask
1505 /// - For store: Address, stored value, optional mask
1506 /// TODO: We currently execute only per-part unless a specific instance is
1507 /// provided.
1508 class VPWidenMemoryInstructionRecipe : public VPRecipeBase {
1509 Instruction &Ingredient;
1510
setMask(VPValue * Mask)1511 void setMask(VPValue *Mask) {
1512 if (!Mask)
1513 return;
1514 addOperand(Mask);
1515 }
1516
isMasked()1517 bool isMasked() const {
1518 return isStore() ? getNumOperands() == 3 : getNumOperands() == 2;
1519 }
1520
1521 public:
VPWidenMemoryInstructionRecipe(LoadInst & Load,VPValue * Addr,VPValue * Mask)1522 VPWidenMemoryInstructionRecipe(LoadInst &Load, VPValue *Addr, VPValue *Mask)
1523 : VPRecipeBase(VPWidenMemoryInstructionSC, {Addr}), Ingredient(Load) {
1524 new VPValue(VPValue::VPVMemoryInstructionSC, &Load, this);
1525 setMask(Mask);
1526 }
1527
VPWidenMemoryInstructionRecipe(StoreInst & Store,VPValue * Addr,VPValue * StoredValue,VPValue * Mask)1528 VPWidenMemoryInstructionRecipe(StoreInst &Store, VPValue *Addr,
1529 VPValue *StoredValue, VPValue *Mask)
1530 : VPRecipeBase(VPWidenMemoryInstructionSC, {Addr, StoredValue}),
1531 Ingredient(Store) {
1532 setMask(Mask);
1533 }
1534
1535 /// Method to support type inquiry through isa, cast, and dyn_cast.
classof(const VPDef * D)1536 static inline bool classof(const VPDef *D) {
1537 return D->getVPDefID() == VPRecipeBase::VPWidenMemoryInstructionSC;
1538 }
1539
1540 /// Return the address accessed by this recipe.
getAddr()1541 VPValue *getAddr() const {
1542 return getOperand(0); // Address is the 1st, mandatory operand.
1543 }
1544
1545 /// Return the mask used by this recipe. Note that a full mask is represented
1546 /// by a nullptr.
getMask()1547 VPValue *getMask() const {
1548 // Mask is optional and therefore the last operand.
1549 return isMasked() ? getOperand(getNumOperands() - 1) : nullptr;
1550 }
1551
1552 /// Returns true if this recipe is a store.
isStore()1553 bool isStore() const { return isa<StoreInst>(Ingredient); }
1554
1555 /// Return the address accessed by this recipe.
getStoredValue()1556 VPValue *getStoredValue() const {
1557 assert(isStore() && "Stored value only available for store instructions");
1558 return getOperand(1); // Stored value is the 2nd, mandatory operand.
1559 }
1560
1561 /// Generate the wide load/store.
1562 void execute(VPTransformState &State) override;
1563
1564 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1565 /// Print the recipe.
1566 void print(raw_ostream &O, const Twine &Indent,
1567 VPSlotTracker &SlotTracker) const override;
1568 #endif
1569 };
1570
1571 /// A Recipe for widening the canonical induction variable of the vector loop.
1572 class VPWidenCanonicalIVRecipe : public VPRecipeBase {
1573 public:
VPWidenCanonicalIVRecipe()1574 VPWidenCanonicalIVRecipe() : VPRecipeBase(VPWidenCanonicalIVSC, {}) {
1575 new VPValue(nullptr, this);
1576 }
1577
1578 ~VPWidenCanonicalIVRecipe() override = default;
1579
1580 /// Method to support type inquiry through isa, cast, and dyn_cast.
classof(const VPDef * D)1581 static inline bool classof(const VPDef *D) {
1582 return D->getVPDefID() == VPRecipeBase::VPWidenCanonicalIVSC;
1583 }
1584
1585 /// Generate a canonical vector induction variable of the vector loop, with
1586 /// start = {<Part*VF, Part*VF+1, ..., Part*VF+VF-1> for 0 <= Part < UF}, and
1587 /// step = <VF*UF, VF*UF, ..., VF*UF>.
1588 void execute(VPTransformState &State) override;
1589
1590 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1591 /// Print the recipe.
1592 void print(raw_ostream &O, const Twine &Indent,
1593 VPSlotTracker &SlotTracker) const override;
1594 #endif
1595 };
1596
1597 /// VPBasicBlock serves as the leaf of the Hierarchical Control-Flow Graph. It
1598 /// holds a sequence of zero or more VPRecipe's each representing a sequence of
1599 /// output IR instructions. All PHI-like recipes must come before any non-PHI recipes.
1600 class VPBasicBlock : public VPBlockBase {
1601 public:
1602 using RecipeListTy = iplist<VPRecipeBase>;
1603
1604 private:
1605 /// The VPRecipes held in the order of output instructions to generate.
1606 RecipeListTy Recipes;
1607
1608 public:
1609 VPBasicBlock(const Twine &Name = "", VPRecipeBase *Recipe = nullptr)
1610 : VPBlockBase(VPBasicBlockSC, Name.str()) {
1611 if (Recipe)
1612 appendRecipe(Recipe);
1613 }
1614
~VPBasicBlock()1615 ~VPBasicBlock() override {
1616 while (!Recipes.empty())
1617 Recipes.pop_back();
1618 }
1619
1620 /// Instruction iterators...
1621 using iterator = RecipeListTy::iterator;
1622 using const_iterator = RecipeListTy::const_iterator;
1623 using reverse_iterator = RecipeListTy::reverse_iterator;
1624 using const_reverse_iterator = RecipeListTy::const_reverse_iterator;
1625
1626 //===--------------------------------------------------------------------===//
1627 /// Recipe iterator methods
1628 ///
begin()1629 inline iterator begin() { return Recipes.begin(); }
begin()1630 inline const_iterator begin() const { return Recipes.begin(); }
end()1631 inline iterator end() { return Recipes.end(); }
end()1632 inline const_iterator end() const { return Recipes.end(); }
1633
rbegin()1634 inline reverse_iterator rbegin() { return Recipes.rbegin(); }
rbegin()1635 inline const_reverse_iterator rbegin() const { return Recipes.rbegin(); }
rend()1636 inline reverse_iterator rend() { return Recipes.rend(); }
rend()1637 inline const_reverse_iterator rend() const { return Recipes.rend(); }
1638
size()1639 inline size_t size() const { return Recipes.size(); }
empty()1640 inline bool empty() const { return Recipes.empty(); }
front()1641 inline const VPRecipeBase &front() const { return Recipes.front(); }
front()1642 inline VPRecipeBase &front() { return Recipes.front(); }
back()1643 inline const VPRecipeBase &back() const { return Recipes.back(); }
back()1644 inline VPRecipeBase &back() { return Recipes.back(); }
1645
1646 /// Returns a reference to the list of recipes.
getRecipeList()1647 RecipeListTy &getRecipeList() { return Recipes; }
1648
1649 /// Returns a pointer to a member of the recipe list.
getSublistAccess(VPRecipeBase *)1650 static RecipeListTy VPBasicBlock::*getSublistAccess(VPRecipeBase *) {
1651 return &VPBasicBlock::Recipes;
1652 }
1653
1654 /// Method to support type inquiry through isa, cast, and dyn_cast.
classof(const VPBlockBase * V)1655 static inline bool classof(const VPBlockBase *V) {
1656 return V->getVPBlockID() == VPBlockBase::VPBasicBlockSC;
1657 }
1658
insert(VPRecipeBase * Recipe,iterator InsertPt)1659 void insert(VPRecipeBase *Recipe, iterator InsertPt) {
1660 assert(Recipe && "No recipe to append.");
1661 assert(!Recipe->Parent && "Recipe already in VPlan");
1662 Recipe->Parent = this;
1663 Recipes.insert(InsertPt, Recipe);
1664 }
1665
1666 /// Augment the existing recipes of a VPBasicBlock with an additional
1667 /// \p Recipe as the last recipe.
appendRecipe(VPRecipeBase * Recipe)1668 void appendRecipe(VPRecipeBase *Recipe) { insert(Recipe, end()); }
1669
1670 /// The method which generates the output IR instructions that correspond to
1671 /// this VPBasicBlock, thereby "executing" the VPlan.
1672 void execute(struct VPTransformState *State) override;
1673
1674 /// Return the position of the first non-phi node recipe in the block.
1675 iterator getFirstNonPhi();
1676
1677 /// Returns an iterator range over the PHI-like recipes in the block.
phis()1678 iterator_range<iterator> phis() {
1679 return make_range(begin(), getFirstNonPhi());
1680 }
1681
1682 void dropAllReferences(VPValue *NewValue) override;
1683
1684 /// Split current block at \p SplitAt by inserting a new block between the
1685 /// current block and its successors and moving all recipes starting at
1686 /// SplitAt to the new block. Returns the new block.
1687 VPBasicBlock *splitAt(iterator SplitAt);
1688
1689 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1690 /// Print this VPBsicBlock to \p O, prefixing all lines with \p Indent. \p
1691 /// SlotTracker is used to print unnamed VPValue's using consequtive numbers.
1692 ///
1693 /// Note that the numbering is applied to the whole VPlan, so printing
1694 /// individual blocks is consistent with the whole VPlan printing.
1695 void print(raw_ostream &O, const Twine &Indent,
1696 VPSlotTracker &SlotTracker) const override;
1697 using VPBlockBase::print; // Get the print(raw_stream &O) version.
1698 #endif
1699
1700 private:
1701 /// Create an IR BasicBlock to hold the output instructions generated by this
1702 /// VPBasicBlock, and return it. Update the CFGState accordingly.
1703 BasicBlock *createEmptyBasicBlock(VPTransformState::CFGState &CFG);
1704 };
1705
1706 /// VPRegionBlock represents a collection of VPBasicBlocks and VPRegionBlocks
1707 /// which form a Single-Entry-Single-Exit subgraph of the output IR CFG.
1708 /// A VPRegionBlock may indicate that its contents are to be replicated several
1709 /// times. This is designed to support predicated scalarization, in which a
1710 /// scalar if-then code structure needs to be generated VF * UF times. Having
1711 /// this replication indicator helps to keep a single model for multiple
1712 /// candidate VF's. The actual replication takes place only once the desired VF
1713 /// and UF have been determined.
1714 class VPRegionBlock : public VPBlockBase {
1715 /// Hold the Single Entry of the SESE region modelled by the VPRegionBlock.
1716 VPBlockBase *Entry;
1717
1718 /// Hold the Single Exit of the SESE region modelled by the VPRegionBlock.
1719 VPBlockBase *Exit;
1720
1721 /// An indicator whether this region is to generate multiple replicated
1722 /// instances of output IR corresponding to its VPBlockBases.
1723 bool IsReplicator;
1724
1725 public:
1726 VPRegionBlock(VPBlockBase *Entry, VPBlockBase *Exit,
1727 const std::string &Name = "", bool IsReplicator = false)
VPBlockBase(VPRegionBlockSC,Name)1728 : VPBlockBase(VPRegionBlockSC, Name), Entry(Entry), Exit(Exit),
1729 IsReplicator(IsReplicator) {
1730 assert(Entry->getPredecessors().empty() && "Entry block has predecessors.");
1731 assert(Exit->getSuccessors().empty() && "Exit block has successors.");
1732 Entry->setParent(this);
1733 Exit->setParent(this);
1734 }
1735 VPRegionBlock(const std::string &Name = "", bool IsReplicator = false)
VPBlockBase(VPRegionBlockSC,Name)1736 : VPBlockBase(VPRegionBlockSC, Name), Entry(nullptr), Exit(nullptr),
1737 IsReplicator(IsReplicator) {}
1738
~VPRegionBlock()1739 ~VPRegionBlock() override {
1740 if (Entry) {
1741 VPValue DummyValue;
1742 Entry->dropAllReferences(&DummyValue);
1743 deleteCFG(Entry);
1744 }
1745 }
1746
1747 /// Method to support type inquiry through isa, cast, and dyn_cast.
classof(const VPBlockBase * V)1748 static inline bool classof(const VPBlockBase *V) {
1749 return V->getVPBlockID() == VPBlockBase::VPRegionBlockSC;
1750 }
1751
getEntry()1752 const VPBlockBase *getEntry() const { return Entry; }
getEntry()1753 VPBlockBase *getEntry() { return Entry; }
1754
1755 /// Set \p EntryBlock as the entry VPBlockBase of this VPRegionBlock. \p
1756 /// EntryBlock must have no predecessors.
setEntry(VPBlockBase * EntryBlock)1757 void setEntry(VPBlockBase *EntryBlock) {
1758 assert(EntryBlock->getPredecessors().empty() &&
1759 "Entry block cannot have predecessors.");
1760 Entry = EntryBlock;
1761 EntryBlock->setParent(this);
1762 }
1763
1764 // FIXME: DominatorTreeBase is doing 'A->getParent()->front()'. 'front' is a
1765 // specific interface of llvm::Function, instead of using
1766 // GraphTraints::getEntryNode. We should add a new template parameter to
1767 // DominatorTreeBase representing the Graph type.
front()1768 VPBlockBase &front() const { return *Entry; }
1769
getExit()1770 const VPBlockBase *getExit() const { return Exit; }
getExit()1771 VPBlockBase *getExit() { return Exit; }
1772
1773 /// Set \p ExitBlock as the exit VPBlockBase of this VPRegionBlock. \p
1774 /// ExitBlock must have no successors.
setExit(VPBlockBase * ExitBlock)1775 void setExit(VPBlockBase *ExitBlock) {
1776 assert(ExitBlock->getSuccessors().empty() &&
1777 "Exit block cannot have successors.");
1778 Exit = ExitBlock;
1779 ExitBlock->setParent(this);
1780 }
1781
1782 /// An indicator whether this region is to generate multiple replicated
1783 /// instances of output IR corresponding to its VPBlockBases.
isReplicator()1784 bool isReplicator() const { return IsReplicator; }
1785
1786 /// The method which generates the output IR instructions that correspond to
1787 /// this VPRegionBlock, thereby "executing" the VPlan.
1788 void execute(struct VPTransformState *State) override;
1789
1790 void dropAllReferences(VPValue *NewValue) override;
1791
1792 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1793 /// Print this VPRegionBlock to \p O (recursively), prefixing all lines with
1794 /// \p Indent. \p SlotTracker is used to print unnamed VPValue's using
1795 /// consequtive numbers.
1796 ///
1797 /// Note that the numbering is applied to the whole VPlan, so printing
1798 /// individual regions is consistent with the whole VPlan printing.
1799 void print(raw_ostream &O, const Twine &Indent,
1800 VPSlotTracker &SlotTracker) const override;
1801 using VPBlockBase::print; // Get the print(raw_stream &O) version.
1802 #endif
1803 };
1804
1805 //===----------------------------------------------------------------------===//
1806 // GraphTraits specializations for VPlan Hierarchical Control-Flow Graphs //
1807 //===----------------------------------------------------------------------===//
1808
1809 // The following set of template specializations implement GraphTraits to treat
1810 // any VPBlockBase as a node in a graph of VPBlockBases. It's important to note
1811 // that VPBlockBase traits don't recurse into VPRegioBlocks, i.e., if the
1812 // VPBlockBase is a VPRegionBlock, this specialization provides access to its
1813 // successors/predecessors but not to the blocks inside the region.
1814
1815 template <> struct GraphTraits<VPBlockBase *> {
1816 using NodeRef = VPBlockBase *;
1817 using ChildIteratorType = SmallVectorImpl<VPBlockBase *>::iterator;
1818
1819 static NodeRef getEntryNode(NodeRef N) { return N; }
1820
1821 static inline ChildIteratorType child_begin(NodeRef N) {
1822 return N->getSuccessors().begin();
1823 }
1824
1825 static inline ChildIteratorType child_end(NodeRef N) {
1826 return N->getSuccessors().end();
1827 }
1828 };
1829
1830 template <> struct GraphTraits<const VPBlockBase *> {
1831 using NodeRef = const VPBlockBase *;
1832 using ChildIteratorType = SmallVectorImpl<VPBlockBase *>::const_iterator;
1833
1834 static NodeRef getEntryNode(NodeRef N) { return N; }
1835
1836 static inline ChildIteratorType child_begin(NodeRef N) {
1837 return N->getSuccessors().begin();
1838 }
1839
1840 static inline ChildIteratorType child_end(NodeRef N) {
1841 return N->getSuccessors().end();
1842 }
1843 };
1844
1845 // Inverse order specialization for VPBasicBlocks. Predecessors are used instead
1846 // of successors for the inverse traversal.
1847 template <> struct GraphTraits<Inverse<VPBlockBase *>> {
1848 using NodeRef = VPBlockBase *;
1849 using ChildIteratorType = SmallVectorImpl<VPBlockBase *>::iterator;
1850
1851 static NodeRef getEntryNode(Inverse<NodeRef> B) { return B.Graph; }
1852
1853 static inline ChildIteratorType child_begin(NodeRef N) {
1854 return N->getPredecessors().begin();
1855 }
1856
1857 static inline ChildIteratorType child_end(NodeRef N) {
1858 return N->getPredecessors().end();
1859 }
1860 };
1861
1862 // The following set of template specializations implement GraphTraits to
1863 // treat VPRegionBlock as a graph and recurse inside its nodes. It's important
1864 // to note that the blocks inside the VPRegionBlock are treated as VPBlockBases
1865 // (i.e., no dyn_cast is performed, VPBlockBases specialization is used), so
1866 // there won't be automatic recursion into other VPBlockBases that turn to be
1867 // VPRegionBlocks.
1868
1869 template <>
1870 struct GraphTraits<VPRegionBlock *> : public GraphTraits<VPBlockBase *> {
1871 using GraphRef = VPRegionBlock *;
1872 using nodes_iterator = df_iterator<NodeRef>;
1873
1874 static NodeRef getEntryNode(GraphRef N) { return N->getEntry(); }
1875
1876 static nodes_iterator nodes_begin(GraphRef N) {
1877 return nodes_iterator::begin(N->getEntry());
1878 }
1879
1880 static nodes_iterator nodes_end(GraphRef N) {
1881 // df_iterator::end() returns an empty iterator so the node used doesn't
1882 // matter.
1883 return nodes_iterator::end(N);
1884 }
1885 };
1886
1887 template <>
1888 struct GraphTraits<const VPRegionBlock *>
1889 : public GraphTraits<const VPBlockBase *> {
1890 using GraphRef = const VPRegionBlock *;
1891 using nodes_iterator = df_iterator<NodeRef>;
1892
1893 static NodeRef getEntryNode(GraphRef N) { return N->getEntry(); }
1894
1895 static nodes_iterator nodes_begin(GraphRef N) {
1896 return nodes_iterator::begin(N->getEntry());
1897 }
1898
1899 static nodes_iterator nodes_end(GraphRef N) {
1900 // df_iterator::end() returns an empty iterator so the node used doesn't
1901 // matter.
1902 return nodes_iterator::end(N);
1903 }
1904 };
1905
1906 template <>
1907 struct GraphTraits<Inverse<VPRegionBlock *>>
1908 : public GraphTraits<Inverse<VPBlockBase *>> {
1909 using GraphRef = VPRegionBlock *;
1910 using nodes_iterator = df_iterator<NodeRef>;
1911
1912 static NodeRef getEntryNode(Inverse<GraphRef> N) {
1913 return N.Graph->getExit();
1914 }
1915
1916 static nodes_iterator nodes_begin(GraphRef N) {
1917 return nodes_iterator::begin(N->getExit());
1918 }
1919
1920 static nodes_iterator nodes_end(GraphRef N) {
1921 // df_iterator::end() returns an empty iterator so the node used doesn't
1922 // matter.
1923 return nodes_iterator::end(N);
1924 }
1925 };
1926
1927 /// Iterator to traverse all successors of a VPBlockBase node. This includes the
1928 /// entry node of VPRegionBlocks. Exit blocks of a region implicitly have their
1929 /// parent region's successors. This ensures all blocks in a region are visited
1930 /// before any blocks in a successor region when doing a reverse post-order
1931 // traversal of the graph.
1932 template <typename BlockPtrTy>
1933 class VPAllSuccessorsIterator
1934 : public iterator_facade_base<VPAllSuccessorsIterator<BlockPtrTy>,
1935 std::forward_iterator_tag, VPBlockBase> {
1936 BlockPtrTy Block;
1937 /// Index of the current successor. For VPBasicBlock nodes, this simply is the
1938 /// index for the successor array. For VPRegionBlock, SuccessorIdx == 0 is
1939 /// used for the region's entry block, and SuccessorIdx - 1 are the indices
1940 /// for the successor array.
1941 size_t SuccessorIdx;
1942
1943 static BlockPtrTy getBlockWithSuccs(BlockPtrTy Current) {
1944 while (Current && Current->getNumSuccessors() == 0)
1945 Current = Current->getParent();
1946 return Current;
1947 }
1948
1949 /// Templated helper to dereference successor \p SuccIdx of \p Block. Used by
1950 /// both the const and non-const operator* implementations.
1951 template <typename T1> static T1 deref(T1 Block, unsigned SuccIdx) {
1952 if (auto *R = dyn_cast<VPRegionBlock>(Block)) {
1953 if (SuccIdx == 0)
1954 return R->getEntry();
1955 SuccIdx--;
1956 }
1957
1958 // For exit blocks, use the next parent region with successors.
1959 return getBlockWithSuccs(Block)->getSuccessors()[SuccIdx];
1960 }
1961
1962 public:
1963 VPAllSuccessorsIterator(BlockPtrTy Block, size_t Idx = 0)
1964 : Block(Block), SuccessorIdx(Idx) {}
1965 VPAllSuccessorsIterator(const VPAllSuccessorsIterator &Other)
1966 : Block(Other.Block), SuccessorIdx(Other.SuccessorIdx) {}
1967
1968 VPAllSuccessorsIterator &operator=(const VPAllSuccessorsIterator &R) {
1969 Block = R.Block;
1970 SuccessorIdx = R.SuccessorIdx;
1971 return *this;
1972 }
1973
1974 static VPAllSuccessorsIterator end(BlockPtrTy Block) {
1975 BlockPtrTy ParentWithSuccs = getBlockWithSuccs(Block);
1976 unsigned NumSuccessors = ParentWithSuccs
1977 ? ParentWithSuccs->getNumSuccessors()
1978 : Block->getNumSuccessors();
1979
1980 if (auto *R = dyn_cast<VPRegionBlock>(Block))
1981 return {R, NumSuccessors + 1};
1982 return {Block, NumSuccessors};
1983 }
1984
1985 bool operator==(const VPAllSuccessorsIterator &R) const {
1986 return Block == R.Block && SuccessorIdx == R.SuccessorIdx;
1987 }
1988
1989 const VPBlockBase *operator*() const { return deref(Block, SuccessorIdx); }
1990
1991 BlockPtrTy operator*() { return deref(Block, SuccessorIdx); }
1992
1993 VPAllSuccessorsIterator &operator++() {
1994 SuccessorIdx++;
1995 return *this;
1996 }
1997
1998 VPAllSuccessorsIterator operator++(int X) {
1999 VPAllSuccessorsIterator Orig = *this;
2000 SuccessorIdx++;
2001 return Orig;
2002 }
2003 };
2004
2005 /// Helper for GraphTraits specialization that traverses through VPRegionBlocks.
2006 template <typename BlockTy> class VPBlockRecursiveTraversalWrapper {
2007 BlockTy Entry;
2008
2009 public:
2010 VPBlockRecursiveTraversalWrapper(BlockTy Entry) : Entry(Entry) {}
2011 BlockTy getEntry() { return Entry; }
2012 };
2013
2014 /// GraphTraits specialization to recursively traverse VPBlockBase nodes,
2015 /// including traversing through VPRegionBlocks. Exit blocks of a region
2016 /// implicitly have their parent region's successors. This ensures all blocks in
2017 /// a region are visited before any blocks in a successor region when doing a
2018 /// reverse post-order traversal of the graph.
2019 template <>
2020 struct GraphTraits<VPBlockRecursiveTraversalWrapper<VPBlockBase *>> {
2021 using NodeRef = VPBlockBase *;
2022 using ChildIteratorType = VPAllSuccessorsIterator<VPBlockBase *>;
2023
2024 static NodeRef
2025 getEntryNode(VPBlockRecursiveTraversalWrapper<VPBlockBase *> N) {
2026 return N.getEntry();
2027 }
2028
2029 static inline ChildIteratorType child_begin(NodeRef N) {
2030 return ChildIteratorType(N);
2031 }
2032
2033 static inline ChildIteratorType child_end(NodeRef N) {
2034 return ChildIteratorType::end(N);
2035 }
2036 };
2037
2038 template <>
2039 struct GraphTraits<VPBlockRecursiveTraversalWrapper<const VPBlockBase *>> {
2040 using NodeRef = const VPBlockBase *;
2041 using ChildIteratorType = VPAllSuccessorsIterator<const VPBlockBase *>;
2042
2043 static NodeRef
2044 getEntryNode(VPBlockRecursiveTraversalWrapper<const VPBlockBase *> N) {
2045 return N.getEntry();
2046 }
2047
2048 static inline ChildIteratorType child_begin(NodeRef N) {
2049 return ChildIteratorType(N);
2050 }
2051
2052 static inline ChildIteratorType child_end(NodeRef N) {
2053 return ChildIteratorType::end(N);
2054 }
2055 };
2056
2057 /// VPlan models a candidate for vectorization, encoding various decisions take
2058 /// to produce efficient output IR, including which branches, basic-blocks and
2059 /// output IR instructions to generate, and their cost. VPlan holds a
2060 /// Hierarchical-CFG of VPBasicBlocks and VPRegionBlocks rooted at an Entry
2061 /// VPBlock.
2062 class VPlan {
2063 friend class VPlanPrinter;
2064 friend class VPSlotTracker;
2065
2066 /// Hold the single entry to the Hierarchical CFG of the VPlan.
2067 VPBlockBase *Entry;
2068
2069 /// Holds the VFs applicable to this VPlan.
2070 SmallSetVector<ElementCount, 2> VFs;
2071
2072 /// Holds the name of the VPlan, for printing.
2073 std::string Name;
2074
2075 /// Holds all the external definitions created for this VPlan.
2076 // TODO: Introduce a specific representation for external definitions in
2077 // VPlan. External definitions must be immutable and hold a pointer to its
2078 // underlying IR that will be used to implement its structural comparison
2079 // (operators '==' and '<').
2080 SetVector<VPValue *> VPExternalDefs;
2081
2082 /// Represents the backedge taken count of the original loop, for folding
2083 /// the tail.
2084 VPValue *BackedgeTakenCount = nullptr;
2085
2086 /// Holds a mapping between Values and their corresponding VPValue inside
2087 /// VPlan.
2088 Value2VPValueTy Value2VPValue;
2089
2090 /// Contains all VPValues that been allocated by addVPValue directly and need
2091 /// to be free when the plan's destructor is called.
2092 SmallVector<VPValue *, 16> VPValuesToFree;
2093
2094 /// Holds the VPLoopInfo analysis for this VPlan.
2095 VPLoopInfo VPLInfo;
2096
2097 public:
2098 VPlan(VPBlockBase *Entry = nullptr) : Entry(Entry) {
2099 if (Entry)
2100 Entry->setPlan(this);
2101 }
2102
2103 ~VPlan() {
2104 if (Entry) {
2105 VPValue DummyValue;
2106 for (VPBlockBase *Block : depth_first(Entry))
2107 Block->dropAllReferences(&DummyValue);
2108
2109 VPBlockBase::deleteCFG(Entry);
2110 }
2111 for (VPValue *VPV : VPValuesToFree)
2112 delete VPV;
2113 if (BackedgeTakenCount)
2114 delete BackedgeTakenCount;
2115 for (VPValue *Def : VPExternalDefs)
2116 delete Def;
2117 }
2118
2119 /// Generate the IR code for this VPlan.
2120 void execute(struct VPTransformState *State);
2121
2122 VPBlockBase *getEntry() { return Entry; }
2123 const VPBlockBase *getEntry() const { return Entry; }
2124
2125 VPBlockBase *setEntry(VPBlockBase *Block) {
2126 Entry = Block;
2127 Block->setPlan(this);
2128 return Entry;
2129 }
2130
2131 /// The backedge taken count of the original loop.
2132 VPValue *getOrCreateBackedgeTakenCount() {
2133 if (!BackedgeTakenCount)
2134 BackedgeTakenCount = new VPValue();
2135 return BackedgeTakenCount;
2136 }
2137
2138 void addVF(ElementCount VF) { VFs.insert(VF); }
2139
2140 bool hasVF(ElementCount VF) { return VFs.count(VF); }
2141
2142 const std::string &getName() const { return Name; }
2143
2144 void setName(const Twine &newName) { Name = newName.str(); }
2145
2146 /// Add \p VPVal to the pool of external definitions if it's not already
2147 /// in the pool.
2148 void addExternalDef(VPValue *VPVal) { VPExternalDefs.insert(VPVal); }
2149
2150 void addVPValue(Value *V) {
2151 assert(V && "Trying to add a null Value to VPlan");
2152 assert(!Value2VPValue.count(V) && "Value already exists in VPlan");
2153 VPValue *VPV = new VPValue(V);
2154 Value2VPValue[V] = VPV;
2155 VPValuesToFree.push_back(VPV);
2156 }
2157
2158 void addVPValue(Value *V, VPValue *VPV) {
2159 assert(V && "Trying to add a null Value to VPlan");
2160 assert(!Value2VPValue.count(V) && "Value already exists in VPlan");
2161 Value2VPValue[V] = VPV;
2162 }
2163
2164 VPValue *getVPValue(Value *V) {
2165 assert(V && "Trying to get the VPValue of a null Value");
2166 assert(Value2VPValue.count(V) && "Value does not exist in VPlan");
2167 return Value2VPValue[V];
2168 }
2169
2170 VPValue *getOrAddVPValue(Value *V) {
2171 assert(V && "Trying to get or add the VPValue of a null Value");
2172 if (!Value2VPValue.count(V))
2173 addVPValue(V);
2174 return getVPValue(V);
2175 }
2176
2177 void removeVPValueFor(Value *V) { Value2VPValue.erase(V); }
2178
2179 /// Return the VPLoopInfo analysis for this VPlan.
2180 VPLoopInfo &getVPLoopInfo() { return VPLInfo; }
2181 const VPLoopInfo &getVPLoopInfo() const { return VPLInfo; }
2182
2183 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2184 /// Print this VPlan to \p O.
2185 void print(raw_ostream &O) const;
2186
2187 /// Print this VPlan in DOT format to \p O.
2188 void printDOT(raw_ostream &O) const;
2189
2190 /// Dump the plan to stderr (for debugging).
2191 LLVM_DUMP_METHOD void dump() const;
2192 #endif
2193
2194 /// Returns a range mapping the values the range \p Operands to their
2195 /// corresponding VPValues.
2196 iterator_range<mapped_iterator<Use *, std::function<VPValue *(Value *)>>>
2197 mapToVPValues(User::op_range Operands) {
2198 std::function<VPValue *(Value *)> Fn = [this](Value *Op) {
2199 return getOrAddVPValue(Op);
2200 };
2201 return map_range(Operands, Fn);
2202 }
2203
2204 private:
2205 /// Add to the given dominator tree the header block and every new basic block
2206 /// that was created between it and the latch block, inclusive.
2207 static void updateDominatorTree(DominatorTree *DT, BasicBlock *LoopLatchBB,
2208 BasicBlock *LoopPreHeaderBB,
2209 BasicBlock *LoopExitBB);
2210 };
2211
2212 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2213 /// VPlanPrinter prints a given VPlan to a given output stream. The printing is
2214 /// indented and follows the dot format.
2215 class VPlanPrinter {
2216 raw_ostream &OS;
2217 const VPlan &Plan;
2218 unsigned Depth = 0;
2219 unsigned TabWidth = 2;
2220 std::string Indent;
2221 unsigned BID = 0;
2222 SmallDenseMap<const VPBlockBase *, unsigned> BlockID;
2223
2224 VPSlotTracker SlotTracker;
2225
2226 /// Handle indentation.
2227 void bumpIndent(int b) { Indent = std::string((Depth += b) * TabWidth, ' '); }
2228
2229 /// Print a given \p Block of the Plan.
2230 void dumpBlock(const VPBlockBase *Block);
2231
2232 /// Print the information related to the CFG edges going out of a given
2233 /// \p Block, followed by printing the successor blocks themselves.
2234 void dumpEdges(const VPBlockBase *Block);
2235
2236 /// Print a given \p BasicBlock, including its VPRecipes, followed by printing
2237 /// its successor blocks.
2238 void dumpBasicBlock(const VPBasicBlock *BasicBlock);
2239
2240 /// Print a given \p Region of the Plan.
2241 void dumpRegion(const VPRegionBlock *Region);
2242
2243 unsigned getOrCreateBID(const VPBlockBase *Block) {
2244 return BlockID.count(Block) ? BlockID[Block] : BlockID[Block] = BID++;
2245 }
2246
2247 const Twine getOrCreateName(const VPBlockBase *Block);
2248
2249 const Twine getUID(const VPBlockBase *Block);
2250
2251 /// Print the information related to a CFG edge between two VPBlockBases.
2252 void drawEdge(const VPBlockBase *From, const VPBlockBase *To, bool Hidden,
2253 const Twine &Label);
2254
2255 public:
2256 VPlanPrinter(raw_ostream &O, const VPlan &P)
2257 : OS(O), Plan(P), SlotTracker(&P) {}
2258
2259 LLVM_DUMP_METHOD void dump();
2260 };
2261
2262 struct VPlanIngredient {
2263 const Value *V;
2264
2265 VPlanIngredient(const Value *V) : V(V) {}
2266
2267 void print(raw_ostream &O) const;
2268 };
2269
2270 inline raw_ostream &operator<<(raw_ostream &OS, const VPlanIngredient &I) {
2271 I.print(OS);
2272 return OS;
2273 }
2274
2275 inline raw_ostream &operator<<(raw_ostream &OS, const VPlan &Plan) {
2276 Plan.print(OS);
2277 return OS;
2278 }
2279 #endif
2280
2281 //===----------------------------------------------------------------------===//
2282 // VPlan Utilities
2283 //===----------------------------------------------------------------------===//
2284
2285 /// Class that provides utilities for VPBlockBases in VPlan.
2286 class VPBlockUtils {
2287 public:
2288 VPBlockUtils() = delete;
2289
2290 /// Insert disconnected VPBlockBase \p NewBlock after \p BlockPtr. Add \p
2291 /// NewBlock as successor of \p BlockPtr and \p BlockPtr as predecessor of \p
2292 /// NewBlock, and propagate \p BlockPtr parent to \p NewBlock. If \p BlockPtr
2293 /// has more than one successor, its conditional bit is propagated to \p
2294 /// NewBlock. \p NewBlock must have neither successors nor predecessors.
2295 static void insertBlockAfter(VPBlockBase *NewBlock, VPBlockBase *BlockPtr) {
2296 assert(NewBlock->getSuccessors().empty() &&
2297 "Can't insert new block with successors.");
2298 // TODO: move successors from BlockPtr to NewBlock when this functionality
2299 // is necessary. For now, setBlockSingleSuccessor will assert if BlockPtr
2300 // already has successors.
2301 BlockPtr->setOneSuccessor(NewBlock);
2302 NewBlock->setPredecessors({BlockPtr});
2303 NewBlock->setParent(BlockPtr->getParent());
2304 }
2305
2306 /// Insert disconnected VPBlockBases \p IfTrue and \p IfFalse after \p
2307 /// BlockPtr. Add \p IfTrue and \p IfFalse as succesors of \p BlockPtr and \p
2308 /// BlockPtr as predecessor of \p IfTrue and \p IfFalse. Propagate \p BlockPtr
2309 /// parent to \p IfTrue and \p IfFalse. \p Condition is set as the successor
2310 /// selector. \p BlockPtr must have no successors and \p IfTrue and \p IfFalse
2311 /// must have neither successors nor predecessors.
2312 static void insertTwoBlocksAfter(VPBlockBase *IfTrue, VPBlockBase *IfFalse,
2313 VPValue *Condition, VPBlockBase *BlockPtr) {
2314 assert(IfTrue->getSuccessors().empty() &&
2315 "Can't insert IfTrue with successors.");
2316 assert(IfFalse->getSuccessors().empty() &&
2317 "Can't insert IfFalse with successors.");
2318 BlockPtr->setTwoSuccessors(IfTrue, IfFalse, Condition);
2319 IfTrue->setPredecessors({BlockPtr});
2320 IfFalse->setPredecessors({BlockPtr});
2321 IfTrue->setParent(BlockPtr->getParent());
2322 IfFalse->setParent(BlockPtr->getParent());
2323 }
2324
2325 /// Connect VPBlockBases \p From and \p To bi-directionally. Append \p To to
2326 /// the successors of \p From and \p From to the predecessors of \p To. Both
2327 /// VPBlockBases must have the same parent, which can be null. Both
2328 /// VPBlockBases can be already connected to other VPBlockBases.
2329 static void connectBlocks(VPBlockBase *From, VPBlockBase *To) {
2330 assert((From->getParent() == To->getParent()) &&
2331 "Can't connect two block with different parents");
2332 assert(From->getNumSuccessors() < 2 &&
2333 "Blocks can't have more than two successors.");
2334 From->appendSuccessor(To);
2335 To->appendPredecessor(From);
2336 }
2337
2338 /// Disconnect VPBlockBases \p From and \p To bi-directionally. Remove \p To
2339 /// from the successors of \p From and \p From from the predecessors of \p To.
2340 static void disconnectBlocks(VPBlockBase *From, VPBlockBase *To) {
2341 assert(To && "Successor to disconnect is null.");
2342 From->removeSuccessor(To);
2343 To->removePredecessor(From);
2344 }
2345
2346 /// Returns true if the edge \p FromBlock -> \p ToBlock is a back-edge.
2347 static bool isBackEdge(const VPBlockBase *FromBlock,
2348 const VPBlockBase *ToBlock, const VPLoopInfo *VPLI) {
2349 assert(FromBlock->getParent() == ToBlock->getParent() &&
2350 FromBlock->getParent() && "Must be in same region");
2351 const VPLoop *FromLoop = VPLI->getLoopFor(FromBlock);
2352 const VPLoop *ToLoop = VPLI->getLoopFor(ToBlock);
2353 if (!FromLoop || !ToLoop || FromLoop != ToLoop)
2354 return false;
2355
2356 // A back-edge is a branch from the loop latch to its header.
2357 return ToLoop->isLoopLatch(FromBlock) && ToBlock == ToLoop->getHeader();
2358 }
2359
2360 /// Returns true if \p Block is a loop latch
2361 static bool blockIsLoopLatch(const VPBlockBase *Block,
2362 const VPLoopInfo *VPLInfo) {
2363 if (const VPLoop *ParentVPL = VPLInfo->getLoopFor(Block))
2364 return ParentVPL->isLoopLatch(Block);
2365
2366 return false;
2367 }
2368
2369 /// Count and return the number of succesors of \p PredBlock excluding any
2370 /// backedges.
2371 static unsigned countSuccessorsNoBE(VPBlockBase *PredBlock,
2372 VPLoopInfo *VPLI) {
2373 unsigned Count = 0;
2374 for (VPBlockBase *SuccBlock : PredBlock->getSuccessors()) {
2375 if (!VPBlockUtils::isBackEdge(PredBlock, SuccBlock, VPLI))
2376 Count++;
2377 }
2378 return Count;
2379 }
2380
2381 /// Return an iterator range over \p Range which only includes \p BlockTy
2382 /// blocks. The accesses are casted to \p BlockTy.
2383 template <typename BlockTy, typename T>
2384 static auto blocksOnly(const T &Range) {
2385 // Create BaseTy with correct const-ness based on BlockTy.
2386 using BaseTy =
2387 typename std::conditional<std::is_const<BlockTy>::value,
2388 const VPBlockBase, VPBlockBase>::type;
2389
2390 // We need to first create an iterator range over (const) BlocktTy & instead
2391 // of (const) BlockTy * for filter_range to work properly.
2392 auto Mapped =
2393 map_range(Range, [](BaseTy *Block) -> BaseTy & { return *Block; });
2394 auto Filter = make_filter_range(
2395 Mapped, [](BaseTy &Block) { return isa<BlockTy>(&Block); });
2396 return map_range(Filter, [](BaseTy &Block) -> BlockTy * {
2397 return cast<BlockTy>(&Block);
2398 });
2399 }
2400 };
2401
2402 class VPInterleavedAccessInfo {
2403 DenseMap<VPInstruction *, InterleaveGroup<VPInstruction> *>
2404 InterleaveGroupMap;
2405
2406 /// Type for mapping of instruction based interleave groups to VPInstruction
2407 /// interleave groups
2408 using Old2NewTy = DenseMap<InterleaveGroup<Instruction> *,
2409 InterleaveGroup<VPInstruction> *>;
2410
2411 /// Recursively \p Region and populate VPlan based interleave groups based on
2412 /// \p IAI.
2413 void visitRegion(VPRegionBlock *Region, Old2NewTy &Old2New,
2414 InterleavedAccessInfo &IAI);
2415 /// Recursively traverse \p Block and populate VPlan based interleave groups
2416 /// based on \p IAI.
2417 void visitBlock(VPBlockBase *Block, Old2NewTy &Old2New,
2418 InterleavedAccessInfo &IAI);
2419
2420 public:
2421 VPInterleavedAccessInfo(VPlan &Plan, InterleavedAccessInfo &IAI);
2422
2423 ~VPInterleavedAccessInfo() {
2424 SmallPtrSet<InterleaveGroup<VPInstruction> *, 4> DelSet;
2425 // Avoid releasing a pointer twice.
2426 for (auto &I : InterleaveGroupMap)
2427 DelSet.insert(I.second);
2428 for (auto *Ptr : DelSet)
2429 delete Ptr;
2430 }
2431
2432 /// Get the interleave group that \p Instr belongs to.
2433 ///
2434 /// \returns nullptr if doesn't have such group.
2435 InterleaveGroup<VPInstruction> *
2436 getInterleaveGroup(VPInstruction *Instr) const {
2437 return InterleaveGroupMap.lookup(Instr);
2438 }
2439 };
2440
2441 /// Class that maps (parts of) an existing VPlan to trees of combined
2442 /// VPInstructions.
2443 class VPlanSlp {
2444 enum class OpMode { Failed, Load, Opcode };
2445
2446 /// A DenseMapInfo implementation for using SmallVector<VPValue *, 4> as
2447 /// DenseMap keys.
2448 struct BundleDenseMapInfo {
2449 static SmallVector<VPValue *, 4> getEmptyKey() {
2450 return {reinterpret_cast<VPValue *>(-1)};
2451 }
2452
2453 static SmallVector<VPValue *, 4> getTombstoneKey() {
2454 return {reinterpret_cast<VPValue *>(-2)};
2455 }
2456
2457 static unsigned getHashValue(const SmallVector<VPValue *, 4> &V) {
2458 return static_cast<unsigned>(hash_combine_range(V.begin(), V.end()));
2459 }
2460
2461 static bool isEqual(const SmallVector<VPValue *, 4> &LHS,
2462 const SmallVector<VPValue *, 4> &RHS) {
2463 return LHS == RHS;
2464 }
2465 };
2466
2467 /// Mapping of values in the original VPlan to a combined VPInstruction.
2468 DenseMap<SmallVector<VPValue *, 4>, VPInstruction *, BundleDenseMapInfo>
2469 BundleToCombined;
2470
2471 VPInterleavedAccessInfo &IAI;
2472
2473 /// Basic block to operate on. For now, only instructions in a single BB are
2474 /// considered.
2475 const VPBasicBlock &BB;
2476
2477 /// Indicates whether we managed to combine all visited instructions or not.
2478 bool CompletelySLP = true;
2479
2480 /// Width of the widest combined bundle in bits.
2481 unsigned WidestBundleBits = 0;
2482
2483 using MultiNodeOpTy =
2484 typename std::pair<VPInstruction *, SmallVector<VPValue *, 4>>;
2485
2486 // Input operand bundles for the current multi node. Each multi node operand
2487 // bundle contains values not matching the multi node's opcode. They will
2488 // be reordered in reorderMultiNodeOps, once we completed building a
2489 // multi node.
2490 SmallVector<MultiNodeOpTy, 4> MultiNodeOps;
2491
2492 /// Indicates whether we are building a multi node currently.
2493 bool MultiNodeActive = false;
2494
2495 /// Check if we can vectorize Operands together.
2496 bool areVectorizable(ArrayRef<VPValue *> Operands) const;
2497
2498 /// Add combined instruction \p New for the bundle \p Operands.
2499 void addCombined(ArrayRef<VPValue *> Operands, VPInstruction *New);
2500
2501 /// Indicate we hit a bundle we failed to combine. Returns nullptr for now.
2502 VPInstruction *markFailed();
2503
2504 /// Reorder operands in the multi node to maximize sequential memory access
2505 /// and commutative operations.
2506 SmallVector<MultiNodeOpTy, 4> reorderMultiNodeOps();
2507
2508 /// Choose the best candidate to use for the lane after \p Last. The set of
2509 /// candidates to choose from are values with an opcode matching \p Last's
2510 /// or loads consecutive to \p Last.
2511 std::pair<OpMode, VPValue *> getBest(OpMode Mode, VPValue *Last,
2512 SmallPtrSetImpl<VPValue *> &Candidates,
2513 VPInterleavedAccessInfo &IAI);
2514
2515 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2516 /// Print bundle \p Values to dbgs().
2517 void dumpBundle(ArrayRef<VPValue *> Values);
2518 #endif
2519
2520 public:
2521 VPlanSlp(VPInterleavedAccessInfo &IAI, VPBasicBlock &BB) : IAI(IAI), BB(BB) {}
2522
2523 ~VPlanSlp() = default;
2524
2525 /// Tries to build an SLP tree rooted at \p Operands and returns a
2526 /// VPInstruction combining \p Operands, if they can be combined.
2527 VPInstruction *buildGraph(ArrayRef<VPValue *> Operands);
2528
2529 /// Return the width of the widest combined bundle in bits.
2530 unsigned getWidestBundleBits() const { return WidestBundleBits; }
2531
2532 /// Return true if all visited instruction can be combined.
2533 bool isCompletelySLP() const { return CompletelySLP; }
2534 };
2535 } // end namespace llvm
2536
2537 #endif // LLVM_TRANSFORMS_VECTORIZE_VPLAN_H
2538