1 //===- VPlan.h - Represent A Vectorizer Plan --------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file
10 /// This file contains the declarations of the Vectorization Plan base classes:
11 /// 1. VPBasicBlock and VPRegionBlock that inherit from a common pure virtual
12 /// VPBlockBase, together implementing a Hierarchical CFG;
13 /// 2. Specializations of GraphTraits that allow VPBlockBase graphs to be
14 /// treated as proper graphs for generic algorithms;
15 /// 3. Pure virtual VPRecipeBase serving as the base class for recipes contained
16 /// within VPBasicBlocks;
17 /// 4. VPInstruction, a concrete Recipe and VPUser modeling a single planned
18 /// instruction;
19 /// 5. The VPlan class holding a candidate for vectorization;
20 /// 6. The VPlanPrinter class providing a way to print a plan in dot format;
21 /// These are documented in docs/VectorizationPlan.rst.
22 //
23 //===----------------------------------------------------------------------===//
24
25 #ifndef LLVM_TRANSFORMS_VECTORIZE_VPLAN_H
26 #define LLVM_TRANSFORMS_VECTORIZE_VPLAN_H
27
28 #include "VPlanLoopInfo.h"
29 #include "VPlanValue.h"
30 #include "llvm/ADT/DenseMap.h"
31 #include "llvm/ADT/DepthFirstIterator.h"
32 #include "llvm/ADT/GraphTraits.h"
33 #include "llvm/ADT/Optional.h"
34 #include "llvm/ADT/SmallBitVector.h"
35 #include "llvm/ADT/SmallPtrSet.h"
36 #include "llvm/ADT/SmallSet.h"
37 #include "llvm/ADT/SmallVector.h"
38 #include "llvm/ADT/Twine.h"
39 #include "llvm/ADT/ilist.h"
40 #include "llvm/ADT/ilist_node.h"
41 #include "llvm/Analysis/VectorUtils.h"
42 #include "llvm/IR/IRBuilder.h"
43 #include "llvm/Support/InstructionCost.h"
44 #include <algorithm>
45 #include <cassert>
46 #include <cstddef>
47 #include <map>
48 #include <string>
49
50 namespace llvm {
51
52 class BasicBlock;
53 class DominatorTree;
54 class InnerLoopVectorizer;
55 class LoopInfo;
56 class raw_ostream;
57 class RecurrenceDescriptor;
58 class Value;
59 class VPBasicBlock;
60 class VPRegionBlock;
61 class VPlan;
62 class VPlanSlp;
63
64 /// Returns a calculation for the total number of elements for a given \p VF.
65 /// For fixed width vectors this value is a constant, whereas for scalable
66 /// vectors it is an expression determined at runtime.
67 Value *getRuntimeVF(IRBuilder<> &B, Type *Ty, ElementCount VF);
68
69 /// A range of powers-of-2 vectorization factors with fixed start and
70 /// adjustable end. The range includes start and excludes end, e.g.,:
71 /// [1, 9) = {1, 2, 4, 8}
72 struct VFRange {
73 // A power of 2.
74 const ElementCount Start;
75
76 // Need not be a power of 2. If End <= Start range is empty.
77 ElementCount End;
78
isEmptyVFRange79 bool isEmpty() const {
80 return End.getKnownMinValue() <= Start.getKnownMinValue();
81 }
82
VFRangeVFRange83 VFRange(const ElementCount &Start, const ElementCount &End)
84 : Start(Start), End(End) {
85 assert(Start.isScalable() == End.isScalable() &&
86 "Both Start and End should have the same scalable flag");
87 assert(isPowerOf2_32(Start.getKnownMinValue()) &&
88 "Expected Start to be a power of 2");
89 }
90 };
91
92 using VPlanPtr = std::unique_ptr<VPlan>;
93
94 /// In what follows, the term "input IR" refers to code that is fed into the
95 /// vectorizer whereas the term "output IR" refers to code that is generated by
96 /// the vectorizer.
97
98 /// VPLane provides a way to access lanes in both fixed width and scalable
99 /// vectors, where for the latter the lane index sometimes needs calculating
100 /// as a runtime expression.
101 class VPLane {
102 public:
103 /// Kind describes how to interpret Lane.
104 enum class Kind : uint8_t {
105 /// For First, Lane is the index into the first N elements of a
106 /// fixed-vector <N x <ElTy>> or a scalable vector <vscale x N x <ElTy>>.
107 First,
108 /// For ScalableLast, Lane is the offset from the start of the last
109 /// N-element subvector in a scalable vector <vscale x N x <ElTy>>. For
110 /// example, a Lane of 0 corresponds to lane `(vscale - 1) * N`, a Lane of
111 /// 1 corresponds to `((vscale - 1) * N) + 1`, etc.
112 ScalableLast
113 };
114
115 private:
116 /// in [0..VF)
117 unsigned Lane;
118
119 /// Indicates how the Lane should be interpreted, as described above.
120 Kind LaneKind;
121
122 public:
VPLane(unsigned Lane,Kind LaneKind)123 VPLane(unsigned Lane, Kind LaneKind) : Lane(Lane), LaneKind(LaneKind) {}
124
getFirstLane()125 static VPLane getFirstLane() { return VPLane(0, VPLane::Kind::First); }
126
getLastLaneForVF(const ElementCount & VF)127 static VPLane getLastLaneForVF(const ElementCount &VF) {
128 unsigned LaneOffset = VF.getKnownMinValue() - 1;
129 Kind LaneKind;
130 if (VF.isScalable())
131 // In this case 'LaneOffset' refers to the offset from the start of the
132 // last subvector with VF.getKnownMinValue() elements.
133 LaneKind = VPLane::Kind::ScalableLast;
134 else
135 LaneKind = VPLane::Kind::First;
136 return VPLane(LaneOffset, LaneKind);
137 }
138
139 /// Returns a compile-time known value for the lane index and asserts if the
140 /// lane can only be calculated at runtime.
getKnownLane()141 unsigned getKnownLane() const {
142 assert(LaneKind == Kind::First);
143 return Lane;
144 }
145
146 /// Returns an expression describing the lane index that can be used at
147 /// runtime.
148 Value *getAsRuntimeExpr(IRBuilder<> &Builder, const ElementCount &VF) const;
149
150 /// Returns the Kind of lane offset.
getKind()151 Kind getKind() const { return LaneKind; }
152
153 /// Returns true if this is the first lane of the whole vector.
isFirstLane()154 bool isFirstLane() const { return Lane == 0 && LaneKind == Kind::First; }
155
156 /// Maps the lane to a cache index based on \p VF.
mapToCacheIndex(const ElementCount & VF)157 unsigned mapToCacheIndex(const ElementCount &VF) const {
158 switch (LaneKind) {
159 case VPLane::Kind::ScalableLast:
160 assert(VF.isScalable() && Lane < VF.getKnownMinValue());
161 return VF.getKnownMinValue() + Lane;
162 default:
163 assert(Lane < VF.getKnownMinValue());
164 return Lane;
165 }
166 }
167
168 /// Returns the maxmimum number of lanes that we are able to consider
169 /// caching for \p VF.
getNumCachedLanes(const ElementCount & VF)170 static unsigned getNumCachedLanes(const ElementCount &VF) {
171 return VF.getKnownMinValue() * (VF.isScalable() ? 2 : 1);
172 }
173 };
174
175 /// VPIteration represents a single point in the iteration space of the output
176 /// (vectorized and/or unrolled) IR loop.
177 struct VPIteration {
178 /// in [0..UF)
179 unsigned Part;
180
181 VPLane Lane;
182
183 VPIteration(unsigned Part, unsigned Lane,
184 VPLane::Kind Kind = VPLane::Kind::First)
PartVPIteration185 : Part(Part), Lane(Lane, Kind) {}
186
VPIterationVPIteration187 VPIteration(unsigned Part, const VPLane &Lane) : Part(Part), Lane(Lane) {}
188
isFirstIterationVPIteration189 bool isFirstIteration() const { return Part == 0 && Lane.isFirstLane(); }
190 };
191
192 /// VPTransformState holds information passed down when "executing" a VPlan,
193 /// needed for generating the output IR.
194 struct VPTransformState {
VPTransformStateVPTransformState195 VPTransformState(ElementCount VF, unsigned UF, LoopInfo *LI,
196 DominatorTree *DT, IRBuilder<> &Builder,
197 InnerLoopVectorizer *ILV, VPlan *Plan)
198 : VF(VF), UF(UF), Instance(), LI(LI), DT(DT), Builder(Builder), ILV(ILV),
199 Plan(Plan) {}
200
201 /// The chosen Vectorization and Unroll Factors of the loop being vectorized.
202 ElementCount VF;
203 unsigned UF;
204
205 /// Hold the indices to generate specific scalar instructions. Null indicates
206 /// that all instances are to be generated, using either scalar or vector
207 /// instructions.
208 Optional<VPIteration> Instance;
209
210 struct DataState {
211 /// A type for vectorized values in the new loop. Each value from the
212 /// original loop, when vectorized, is represented by UF vector values in
213 /// the new unrolled loop, where UF is the unroll factor.
214 typedef SmallVector<Value *, 2> PerPartValuesTy;
215
216 DenseMap<VPValue *, PerPartValuesTy> PerPartOutput;
217
218 using ScalarsPerPartValuesTy = SmallVector<SmallVector<Value *, 4>, 2>;
219 DenseMap<VPValue *, ScalarsPerPartValuesTy> PerPartScalars;
220 } Data;
221
222 /// Get the generated Value for a given VPValue and a given Part. Note that
223 /// as some Defs are still created by ILV and managed in its ValueMap, this
224 /// method will delegate the call to ILV in such cases in order to provide
225 /// callers a consistent API.
226 /// \see set.
227 Value *get(VPValue *Def, unsigned Part);
228
229 /// Get the generated Value for a given VPValue and given Part and Lane.
230 Value *get(VPValue *Def, const VPIteration &Instance);
231
hasVectorValueVPTransformState232 bool hasVectorValue(VPValue *Def, unsigned Part) {
233 auto I = Data.PerPartOutput.find(Def);
234 return I != Data.PerPartOutput.end() && Part < I->second.size() &&
235 I->second[Part];
236 }
237
hasAnyVectorValueVPTransformState238 bool hasAnyVectorValue(VPValue *Def) const {
239 return Data.PerPartOutput.find(Def) != Data.PerPartOutput.end();
240 }
241
hasScalarValueVPTransformState242 bool hasScalarValue(VPValue *Def, VPIteration Instance) {
243 auto I = Data.PerPartScalars.find(Def);
244 if (I == Data.PerPartScalars.end())
245 return false;
246 unsigned CacheIdx = Instance.Lane.mapToCacheIndex(VF);
247 return Instance.Part < I->second.size() &&
248 CacheIdx < I->second[Instance.Part].size() &&
249 I->second[Instance.Part][CacheIdx];
250 }
251
252 /// Set the generated Value for a given VPValue and a given Part.
setVPTransformState253 void set(VPValue *Def, Value *V, unsigned Part) {
254 if (!Data.PerPartOutput.count(Def)) {
255 DataState::PerPartValuesTy Entry(UF);
256 Data.PerPartOutput[Def] = Entry;
257 }
258 Data.PerPartOutput[Def][Part] = V;
259 }
260 /// Reset an existing vector value for \p Def and a given \p Part.
resetVPTransformState261 void reset(VPValue *Def, Value *V, unsigned Part) {
262 auto Iter = Data.PerPartOutput.find(Def);
263 assert(Iter != Data.PerPartOutput.end() &&
264 "need to overwrite existing value");
265 Iter->second[Part] = V;
266 }
267
268 /// Set the generated scalar \p V for \p Def and the given \p Instance.
setVPTransformState269 void set(VPValue *Def, Value *V, const VPIteration &Instance) {
270 auto Iter = Data.PerPartScalars.insert({Def, {}});
271 auto &PerPartVec = Iter.first->second;
272 while (PerPartVec.size() <= Instance.Part)
273 PerPartVec.emplace_back();
274 auto &Scalars = PerPartVec[Instance.Part];
275 unsigned CacheIdx = Instance.Lane.mapToCacheIndex(VF);
276 while (Scalars.size() <= CacheIdx)
277 Scalars.push_back(nullptr);
278 assert(!Scalars[CacheIdx] && "should overwrite existing value");
279 Scalars[CacheIdx] = V;
280 }
281
282 /// Reset an existing scalar value for \p Def and a given \p Instance.
resetVPTransformState283 void reset(VPValue *Def, Value *V, const VPIteration &Instance) {
284 auto Iter = Data.PerPartScalars.find(Def);
285 assert(Iter != Data.PerPartScalars.end() &&
286 "need to overwrite existing value");
287 assert(Instance.Part < Iter->second.size() &&
288 "need to overwrite existing value");
289 unsigned CacheIdx = Instance.Lane.mapToCacheIndex(VF);
290 assert(CacheIdx < Iter->second[Instance.Part].size() &&
291 "need to overwrite existing value");
292 Iter->second[Instance.Part][CacheIdx] = V;
293 }
294
295 /// Hold state information used when constructing the CFG of the output IR,
296 /// traversing the VPBasicBlocks and generating corresponding IR BasicBlocks.
297 struct CFGState {
298 /// The previous VPBasicBlock visited. Initially set to null.
299 VPBasicBlock *PrevVPBB = nullptr;
300
301 /// The previous IR BasicBlock created or used. Initially set to the new
302 /// header BasicBlock.
303 BasicBlock *PrevBB = nullptr;
304
305 /// The last IR BasicBlock in the output IR. Set to the new latch
306 /// BasicBlock, used for placing the newly created BasicBlocks.
307 BasicBlock *LastBB = nullptr;
308
309 /// A mapping of each VPBasicBlock to the corresponding BasicBlock. In case
310 /// of replication, maps the BasicBlock of the last replica created.
311 SmallDenseMap<VPBasicBlock *, BasicBlock *> VPBB2IRBB;
312
313 /// Vector of VPBasicBlocks whose terminator instruction needs to be fixed
314 /// up at the end of vector code generation.
315 SmallVector<VPBasicBlock *, 8> VPBBsToFix;
316
317 CFGState() = default;
318 } CFG;
319
320 /// Hold a pointer to LoopInfo to register new basic blocks in the loop.
321 LoopInfo *LI;
322
323 /// Hold a pointer to Dominator Tree to register new basic blocks in the loop.
324 DominatorTree *DT;
325
326 /// Hold a reference to the IRBuilder used to generate output IR code.
327 IRBuilder<> &Builder;
328
329 VPValue2ValueTy VPValue2Value;
330
331 /// Hold the canonical scalar IV of the vector loop (start=0, step=VF*UF).
332 Value *CanonicalIV = nullptr;
333
334 /// Hold the trip count of the scalar loop.
335 Value *TripCount = nullptr;
336
337 /// Hold a pointer to InnerLoopVectorizer to reuse its IR generation methods.
338 InnerLoopVectorizer *ILV;
339
340 /// Pointer to the VPlan code is generated for.
341 VPlan *Plan;
342 };
343
344 /// VPUsers instance used by VPBlockBase to manage CondBit and the block
345 /// predicate. Currently VPBlockUsers are used in VPBlockBase for historical
346 /// reasons, but in the future the only VPUsers should either be recipes or
347 /// live-outs.VPBlockBase uses.
348 struct VPBlockUser : public VPUser {
VPBlockUserVPBlockUser349 VPBlockUser() : VPUser({}, VPUserID::Block) {}
350
getSingleOperandOrNullVPBlockUser351 VPValue *getSingleOperandOrNull() {
352 if (getNumOperands() == 1)
353 return getOperand(0);
354
355 return nullptr;
356 }
getSingleOperandOrNullVPBlockUser357 const VPValue *getSingleOperandOrNull() const {
358 if (getNumOperands() == 1)
359 return getOperand(0);
360
361 return nullptr;
362 }
363
resetSingleOpUserVPBlockUser364 void resetSingleOpUser(VPValue *NewVal) {
365 assert(getNumOperands() <= 1 && "Didn't expect more than one operand!");
366 if (!NewVal) {
367 if (getNumOperands() == 1)
368 removeLastOperand();
369 return;
370 }
371
372 if (getNumOperands() == 1)
373 setOperand(0, NewVal);
374 else
375 addOperand(NewVal);
376 }
377 };
378
379 /// VPBlockBase is the building block of the Hierarchical Control-Flow Graph.
380 /// A VPBlockBase can be either a VPBasicBlock or a VPRegionBlock.
381 class VPBlockBase {
382 friend class VPBlockUtils;
383
384 const unsigned char SubclassID; ///< Subclass identifier (for isa/dyn_cast).
385
386 /// An optional name for the block.
387 std::string Name;
388
389 /// The immediate VPRegionBlock which this VPBlockBase belongs to, or null if
390 /// it is a topmost VPBlockBase.
391 VPRegionBlock *Parent = nullptr;
392
393 /// List of predecessor blocks.
394 SmallVector<VPBlockBase *, 1> Predecessors;
395
396 /// List of successor blocks.
397 SmallVector<VPBlockBase *, 1> Successors;
398
399 /// Successor selector managed by a VPUser. For blocks with zero or one
400 /// successors, there is no operand. Otherwise there is exactly one operand
401 /// which is the branch condition.
402 VPBlockUser CondBitUser;
403
404 /// If the block is predicated, its predicate is stored as an operand of this
405 /// VPUser to maintain the def-use relations. Otherwise there is no operand
406 /// here.
407 VPBlockUser PredicateUser;
408
409 /// VPlan containing the block. Can only be set on the entry block of the
410 /// plan.
411 VPlan *Plan = nullptr;
412
413 /// Add \p Successor as the last successor to this block.
appendSuccessor(VPBlockBase * Successor)414 void appendSuccessor(VPBlockBase *Successor) {
415 assert(Successor && "Cannot add nullptr successor!");
416 Successors.push_back(Successor);
417 }
418
419 /// Add \p Predecessor as the last predecessor to this block.
appendPredecessor(VPBlockBase * Predecessor)420 void appendPredecessor(VPBlockBase *Predecessor) {
421 assert(Predecessor && "Cannot add nullptr predecessor!");
422 Predecessors.push_back(Predecessor);
423 }
424
425 /// Remove \p Predecessor from the predecessors of this block.
removePredecessor(VPBlockBase * Predecessor)426 void removePredecessor(VPBlockBase *Predecessor) {
427 auto Pos = find(Predecessors, Predecessor);
428 assert(Pos && "Predecessor does not exist");
429 Predecessors.erase(Pos);
430 }
431
432 /// Remove \p Successor from the successors of this block.
removeSuccessor(VPBlockBase * Successor)433 void removeSuccessor(VPBlockBase *Successor) {
434 auto Pos = find(Successors, Successor);
435 assert(Pos && "Successor does not exist");
436 Successors.erase(Pos);
437 }
438
439 protected:
VPBlockBase(const unsigned char SC,const std::string & N)440 VPBlockBase(const unsigned char SC, const std::string &N)
441 : SubclassID(SC), Name(N) {}
442
443 public:
444 /// An enumeration for keeping track of the concrete subclass of VPBlockBase
445 /// that are actually instantiated. Values of this enumeration are kept in the
446 /// SubclassID field of the VPBlockBase objects. They are used for concrete
447 /// type identification.
448 using VPBlockTy = enum { VPBasicBlockSC, VPRegionBlockSC };
449
450 using VPBlocksTy = SmallVectorImpl<VPBlockBase *>;
451
452 virtual ~VPBlockBase() = default;
453
getName()454 const std::string &getName() const { return Name; }
455
setName(const Twine & newName)456 void setName(const Twine &newName) { Name = newName.str(); }
457
458 /// \return an ID for the concrete type of this object.
459 /// This is used to implement the classof checks. This should not be used
460 /// for any other purpose, as the values may change as LLVM evolves.
getVPBlockID()461 unsigned getVPBlockID() const { return SubclassID; }
462
getParent()463 VPRegionBlock *getParent() { return Parent; }
getParent()464 const VPRegionBlock *getParent() const { return Parent; }
465
466 /// \return A pointer to the plan containing the current block.
467 VPlan *getPlan();
468 const VPlan *getPlan() const;
469
470 /// Sets the pointer of the plan containing the block. The block must be the
471 /// entry block into the VPlan.
472 void setPlan(VPlan *ParentPlan);
473
setParent(VPRegionBlock * P)474 void setParent(VPRegionBlock *P) { Parent = P; }
475
476 /// \return the VPBasicBlock that is the entry of this VPBlockBase,
477 /// recursively, if the latter is a VPRegionBlock. Otherwise, if this
478 /// VPBlockBase is a VPBasicBlock, it is returned.
479 const VPBasicBlock *getEntryBasicBlock() const;
480 VPBasicBlock *getEntryBasicBlock();
481
482 /// \return the VPBasicBlock that is the exit of this VPBlockBase,
483 /// recursively, if the latter is a VPRegionBlock. Otherwise, if this
484 /// VPBlockBase is a VPBasicBlock, it is returned.
485 const VPBasicBlock *getExitBasicBlock() const;
486 VPBasicBlock *getExitBasicBlock();
487
getSuccessors()488 const VPBlocksTy &getSuccessors() const { return Successors; }
getSuccessors()489 VPBlocksTy &getSuccessors() { return Successors; }
490
getPredecessors()491 const VPBlocksTy &getPredecessors() const { return Predecessors; }
getPredecessors()492 VPBlocksTy &getPredecessors() { return Predecessors; }
493
494 /// \return the successor of this VPBlockBase if it has a single successor.
495 /// Otherwise return a null pointer.
getSingleSuccessor()496 VPBlockBase *getSingleSuccessor() const {
497 return (Successors.size() == 1 ? *Successors.begin() : nullptr);
498 }
499
500 /// \return the predecessor of this VPBlockBase if it has a single
501 /// predecessor. Otherwise return a null pointer.
getSinglePredecessor()502 VPBlockBase *getSinglePredecessor() const {
503 return (Predecessors.size() == 1 ? *Predecessors.begin() : nullptr);
504 }
505
getNumSuccessors()506 size_t getNumSuccessors() const { return Successors.size(); }
getNumPredecessors()507 size_t getNumPredecessors() const { return Predecessors.size(); }
508
509 /// An Enclosing Block of a block B is any block containing B, including B
510 /// itself. \return the closest enclosing block starting from "this", which
511 /// has successors. \return the root enclosing block if all enclosing blocks
512 /// have no successors.
513 VPBlockBase *getEnclosingBlockWithSuccessors();
514
515 /// \return the closest enclosing block starting from "this", which has
516 /// predecessors. \return the root enclosing block if all enclosing blocks
517 /// have no predecessors.
518 VPBlockBase *getEnclosingBlockWithPredecessors();
519
520 /// \return the successors either attached directly to this VPBlockBase or, if
521 /// this VPBlockBase is the exit block of a VPRegionBlock and has no
522 /// successors of its own, search recursively for the first enclosing
523 /// VPRegionBlock that has successors and return them. If no such
524 /// VPRegionBlock exists, return the (empty) successors of the topmost
525 /// VPBlockBase reached.
getHierarchicalSuccessors()526 const VPBlocksTy &getHierarchicalSuccessors() {
527 return getEnclosingBlockWithSuccessors()->getSuccessors();
528 }
529
530 /// \return the hierarchical successor of this VPBlockBase if it has a single
531 /// hierarchical successor. Otherwise return a null pointer.
getSingleHierarchicalSuccessor()532 VPBlockBase *getSingleHierarchicalSuccessor() {
533 return getEnclosingBlockWithSuccessors()->getSingleSuccessor();
534 }
535
536 /// \return the predecessors either attached directly to this VPBlockBase or,
537 /// if this VPBlockBase is the entry block of a VPRegionBlock and has no
538 /// predecessors of its own, search recursively for the first enclosing
539 /// VPRegionBlock that has predecessors and return them. If no such
540 /// VPRegionBlock exists, return the (empty) predecessors of the topmost
541 /// VPBlockBase reached.
getHierarchicalPredecessors()542 const VPBlocksTy &getHierarchicalPredecessors() {
543 return getEnclosingBlockWithPredecessors()->getPredecessors();
544 }
545
546 /// \return the hierarchical predecessor of this VPBlockBase if it has a
547 /// single hierarchical predecessor. Otherwise return a null pointer.
getSingleHierarchicalPredecessor()548 VPBlockBase *getSingleHierarchicalPredecessor() {
549 return getEnclosingBlockWithPredecessors()->getSinglePredecessor();
550 }
551
552 /// \return the condition bit selecting the successor.
553 VPValue *getCondBit();
554 /// \return the condition bit selecting the successor.
555 const VPValue *getCondBit() const;
556 /// Set the condition bit selecting the successor.
557 void setCondBit(VPValue *CV);
558
559 /// \return the block's predicate.
560 VPValue *getPredicate();
561 /// \return the block's predicate.
562 const VPValue *getPredicate() const;
563 /// Set the block's predicate.
564 void setPredicate(VPValue *Pred);
565
566 /// Set a given VPBlockBase \p Successor as the single successor of this
567 /// VPBlockBase. This VPBlockBase is not added as predecessor of \p Successor.
568 /// This VPBlockBase must have no successors.
setOneSuccessor(VPBlockBase * Successor)569 void setOneSuccessor(VPBlockBase *Successor) {
570 assert(Successors.empty() && "Setting one successor when others exist.");
571 appendSuccessor(Successor);
572 }
573
574 /// Set two given VPBlockBases \p IfTrue and \p IfFalse to be the two
575 /// successors of this VPBlockBase. \p Condition is set as the successor
576 /// selector. This VPBlockBase is not added as predecessor of \p IfTrue or \p
577 /// IfFalse. This VPBlockBase must have no successors.
setTwoSuccessors(VPBlockBase * IfTrue,VPBlockBase * IfFalse,VPValue * Condition)578 void setTwoSuccessors(VPBlockBase *IfTrue, VPBlockBase *IfFalse,
579 VPValue *Condition) {
580 assert(Successors.empty() && "Setting two successors when others exist.");
581 assert(Condition && "Setting two successors without condition!");
582 setCondBit(Condition);
583 appendSuccessor(IfTrue);
584 appendSuccessor(IfFalse);
585 }
586
587 /// Set each VPBasicBlock in \p NewPreds as predecessor of this VPBlockBase.
588 /// This VPBlockBase must have no predecessors. This VPBlockBase is not added
589 /// as successor of any VPBasicBlock in \p NewPreds.
setPredecessors(ArrayRef<VPBlockBase * > NewPreds)590 void setPredecessors(ArrayRef<VPBlockBase *> NewPreds) {
591 assert(Predecessors.empty() && "Block predecessors already set.");
592 for (auto *Pred : NewPreds)
593 appendPredecessor(Pred);
594 }
595
596 /// Remove all the predecessor of this block.
clearPredecessors()597 void clearPredecessors() { Predecessors.clear(); }
598
599 /// Remove all the successors of this block and set to null its condition bit
clearSuccessors()600 void clearSuccessors() {
601 Successors.clear();
602 setCondBit(nullptr);
603 }
604
605 /// The method which generates the output IR that correspond to this
606 /// VPBlockBase, thereby "executing" the VPlan.
607 virtual void execute(struct VPTransformState *State) = 0;
608
609 /// Delete all blocks reachable from a given VPBlockBase, inclusive.
610 static void deleteCFG(VPBlockBase *Entry);
611
612 /// Return true if it is legal to hoist instructions into this block.
isLegalToHoistInto()613 bool isLegalToHoistInto() {
614 // There are currently no constraints that prevent an instruction to be
615 // hoisted into a VPBlockBase.
616 return true;
617 }
618
619 /// Replace all operands of VPUsers in the block with \p NewValue and also
620 /// replaces all uses of VPValues defined in the block with NewValue.
621 virtual void dropAllReferences(VPValue *NewValue) = 0;
622
623 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
printAsOperand(raw_ostream & OS,bool PrintType)624 void printAsOperand(raw_ostream &OS, bool PrintType) const {
625 OS << getName();
626 }
627
628 /// Print plain-text dump of this VPBlockBase to \p O, prefixing all lines
629 /// with \p Indent. \p SlotTracker is used to print unnamed VPValue's using
630 /// consequtive numbers.
631 ///
632 /// Note that the numbering is applied to the whole VPlan, so printing
633 /// individual blocks is consistent with the whole VPlan printing.
634 virtual void print(raw_ostream &O, const Twine &Indent,
635 VPSlotTracker &SlotTracker) const = 0;
636
637 /// Print plain-text dump of this VPlan to \p O.
print(raw_ostream & O)638 void print(raw_ostream &O) const {
639 VPSlotTracker SlotTracker(getPlan());
640 print(O, "", SlotTracker);
641 }
642
643 /// Dump this VPBlockBase to dbgs().
dump()644 LLVM_DUMP_METHOD void dump() const { print(dbgs()); }
645 #endif
646 };
647
648 /// VPRecipeBase is a base class modeling a sequence of one or more output IR
649 /// instructions. VPRecipeBase owns the the VPValues it defines through VPDef
650 /// and is responsible for deleting its defined values. Single-value
651 /// VPRecipeBases that also inherit from VPValue must make sure to inherit from
652 /// VPRecipeBase before VPValue.
653 class VPRecipeBase : public ilist_node_with_parent<VPRecipeBase, VPBasicBlock>,
654 public VPDef,
655 public VPUser {
656 friend VPBasicBlock;
657 friend class VPBlockUtils;
658
659 /// Each VPRecipe belongs to a single VPBasicBlock.
660 VPBasicBlock *Parent = nullptr;
661
662 public:
VPRecipeBase(const unsigned char SC,ArrayRef<VPValue * > Operands)663 VPRecipeBase(const unsigned char SC, ArrayRef<VPValue *> Operands)
664 : VPDef(SC), VPUser(Operands, VPUser::VPUserID::Recipe) {}
665
666 template <typename IterT>
VPRecipeBase(const unsigned char SC,iterator_range<IterT> Operands)667 VPRecipeBase(const unsigned char SC, iterator_range<IterT> Operands)
668 : VPDef(SC), VPUser(Operands, VPUser::VPUserID::Recipe) {}
669 virtual ~VPRecipeBase() = default;
670
671 /// \return the VPBasicBlock which this VPRecipe belongs to.
getParent()672 VPBasicBlock *getParent() { return Parent; }
getParent()673 const VPBasicBlock *getParent() const { return Parent; }
674
675 /// The method which generates the output IR instructions that correspond to
676 /// this VPRecipe, thereby "executing" the VPlan.
677 virtual void execute(struct VPTransformState &State) = 0;
678
679 /// Insert an unlinked recipe into a basic block immediately before
680 /// the specified recipe.
681 void insertBefore(VPRecipeBase *InsertPos);
682
683 /// Insert an unlinked Recipe into a basic block immediately after
684 /// the specified Recipe.
685 void insertAfter(VPRecipeBase *InsertPos);
686
687 /// Unlink this recipe from its current VPBasicBlock and insert it into
688 /// the VPBasicBlock that MovePos lives in, right after MovePos.
689 void moveAfter(VPRecipeBase *MovePos);
690
691 /// Unlink this recipe and insert into BB before I.
692 ///
693 /// \pre I is a valid iterator into BB.
694 void moveBefore(VPBasicBlock &BB, iplist<VPRecipeBase>::iterator I);
695
696 /// This method unlinks 'this' from the containing basic block, but does not
697 /// delete it.
698 void removeFromParent();
699
700 /// This method unlinks 'this' from the containing basic block and deletes it.
701 ///
702 /// \returns an iterator pointing to the element after the erased one
703 iplist<VPRecipeBase>::iterator eraseFromParent();
704
705 /// Returns the underlying instruction, if the recipe is a VPValue or nullptr
706 /// otherwise.
getUnderlyingInstr()707 Instruction *getUnderlyingInstr() {
708 return cast<Instruction>(getVPSingleValue()->getUnderlyingValue());
709 }
getUnderlyingInstr()710 const Instruction *getUnderlyingInstr() const {
711 return cast<Instruction>(getVPSingleValue()->getUnderlyingValue());
712 }
713
714 /// Method to support type inquiry through isa, cast, and dyn_cast.
classof(const VPDef * D)715 static inline bool classof(const VPDef *D) {
716 // All VPDefs are also VPRecipeBases.
717 return true;
718 }
719
classof(const VPUser * U)720 static inline bool classof(const VPUser *U) {
721 return U->getVPUserID() == VPUser::VPUserID::Recipe;
722 }
723
724 /// Returns true if the recipe may have side-effects.
725 bool mayHaveSideEffects() const;
726
727 /// Returns true for PHI-like recipes.
isPhi()728 bool isPhi() const {
729 return getVPDefID() == VPWidenIntOrFpInductionSC || getVPDefID() == VPWidenPHISC ||
730 getVPDefID() == VPPredInstPHISC || getVPDefID() == VPWidenCanonicalIVSC;
731 }
732 };
733
classof(const VPDef * Def)734 inline bool VPUser::classof(const VPDef *Def) {
735 return Def->getVPDefID() == VPRecipeBase::VPInstructionSC ||
736 Def->getVPDefID() == VPRecipeBase::VPWidenSC ||
737 Def->getVPDefID() == VPRecipeBase::VPWidenCallSC ||
738 Def->getVPDefID() == VPRecipeBase::VPWidenSelectSC ||
739 Def->getVPDefID() == VPRecipeBase::VPWidenGEPSC ||
740 Def->getVPDefID() == VPRecipeBase::VPBlendSC ||
741 Def->getVPDefID() == VPRecipeBase::VPInterleaveSC ||
742 Def->getVPDefID() == VPRecipeBase::VPReplicateSC ||
743 Def->getVPDefID() == VPRecipeBase::VPReductionSC ||
744 Def->getVPDefID() == VPRecipeBase::VPBranchOnMaskSC ||
745 Def->getVPDefID() == VPRecipeBase::VPWidenMemoryInstructionSC;
746 }
747
748 /// This is a concrete Recipe that models a single VPlan-level instruction.
749 /// While as any Recipe it may generate a sequence of IR instructions when
750 /// executed, these instructions would always form a single-def expression as
751 /// the VPInstruction is also a single def-use vertex.
752 class VPInstruction : public VPRecipeBase, public VPValue {
753 friend class VPlanSlp;
754
755 public:
756 /// VPlan opcodes, extending LLVM IR with idiomatics instructions.
757 enum {
758 Not = Instruction::OtherOpsEnd + 1,
759 ICmpULE,
760 SLPLoad,
761 SLPStore,
762 ActiveLaneMask,
763 };
764
765 private:
766 typedef unsigned char OpcodeTy;
767 OpcodeTy Opcode;
768
769 /// Utility method serving execute(): generates a single instance of the
770 /// modeled instruction.
771 void generateInstruction(VPTransformState &State, unsigned Part);
772
773 protected:
setUnderlyingInstr(Instruction * I)774 void setUnderlyingInstr(Instruction *I) { setUnderlyingValue(I); }
775
776 public:
VPInstruction(unsigned Opcode,ArrayRef<VPValue * > Operands)777 VPInstruction(unsigned Opcode, ArrayRef<VPValue *> Operands)
778 : VPRecipeBase(VPRecipeBase::VPInstructionSC, Operands),
779 VPValue(VPValue::VPVInstructionSC, nullptr, this), Opcode(Opcode) {}
780
VPInstruction(unsigned Opcode,ArrayRef<VPInstruction * > Operands)781 VPInstruction(unsigned Opcode, ArrayRef<VPInstruction *> Operands)
782 : VPRecipeBase(VPRecipeBase::VPInstructionSC, {}),
783 VPValue(VPValue::VPVInstructionSC, nullptr, this), Opcode(Opcode) {
784 for (auto *I : Operands)
785 addOperand(I->getVPSingleValue());
786 }
787
VPInstruction(unsigned Opcode,std::initializer_list<VPValue * > Operands)788 VPInstruction(unsigned Opcode, std::initializer_list<VPValue *> Operands)
789 : VPInstruction(Opcode, ArrayRef<VPValue *>(Operands)) {}
790
791 /// Method to support type inquiry through isa, cast, and dyn_cast.
classof(const VPValue * V)792 static inline bool classof(const VPValue *V) {
793 return V->getVPValueID() == VPValue::VPVInstructionSC;
794 }
795
clone()796 VPInstruction *clone() const {
797 SmallVector<VPValue *, 2> Operands(operands());
798 return new VPInstruction(Opcode, Operands);
799 }
800
801 /// Method to support type inquiry through isa, cast, and dyn_cast.
classof(const VPDef * R)802 static inline bool classof(const VPDef *R) {
803 return R->getVPDefID() == VPRecipeBase::VPInstructionSC;
804 }
805
getOpcode()806 unsigned getOpcode() const { return Opcode; }
807
808 /// Generate the instruction.
809 /// TODO: We currently execute only per-part unless a specific instance is
810 /// provided.
811 void execute(VPTransformState &State) override;
812
813 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
814 /// Print the VPInstruction to \p O.
815 void print(raw_ostream &O, const Twine &Indent,
816 VPSlotTracker &SlotTracker) const override;
817
818 /// Print the VPInstruction to dbgs() (for debugging).
819 LLVM_DUMP_METHOD void dump() const;
820 #endif
821
822 /// Return true if this instruction may modify memory.
mayWriteToMemory()823 bool mayWriteToMemory() const {
824 // TODO: we can use attributes of the called function to rule out memory
825 // modifications.
826 return Opcode == Instruction::Store || Opcode == Instruction::Call ||
827 Opcode == Instruction::Invoke || Opcode == SLPStore;
828 }
829
hasResult()830 bool hasResult() const {
831 // CallInst may or may not have a result, depending on the called function.
832 // Conservatively return calls have results for now.
833 switch (getOpcode()) {
834 case Instruction::Ret:
835 case Instruction::Br:
836 case Instruction::Store:
837 case Instruction::Switch:
838 case Instruction::IndirectBr:
839 case Instruction::Resume:
840 case Instruction::CatchRet:
841 case Instruction::Unreachable:
842 case Instruction::Fence:
843 case Instruction::AtomicRMW:
844 return false;
845 default:
846 return true;
847 }
848 }
849 };
850
851 /// VPWidenRecipe is a recipe for producing a copy of vector type its
852 /// ingredient. This recipe covers most of the traditional vectorization cases
853 /// where each ingredient transforms into a vectorized version of itself.
854 class VPWidenRecipe : public VPRecipeBase, public VPValue {
855 public:
856 template <typename IterT>
VPWidenRecipe(Instruction & I,iterator_range<IterT> Operands)857 VPWidenRecipe(Instruction &I, iterator_range<IterT> Operands)
858 : VPRecipeBase(VPRecipeBase::VPWidenSC, Operands),
859 VPValue(VPValue::VPVWidenSC, &I, this) {}
860
861 ~VPWidenRecipe() override = default;
862
863 /// Method to support type inquiry through isa, cast, and dyn_cast.
classof(const VPDef * D)864 static inline bool classof(const VPDef *D) {
865 return D->getVPDefID() == VPRecipeBase::VPWidenSC;
866 }
classof(const VPValue * V)867 static inline bool classof(const VPValue *V) {
868 return V->getVPValueID() == VPValue::VPVWidenSC;
869 }
870
871 /// Produce widened copies of all Ingredients.
872 void execute(VPTransformState &State) override;
873
874 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
875 /// Print the recipe.
876 void print(raw_ostream &O, const Twine &Indent,
877 VPSlotTracker &SlotTracker) const override;
878 #endif
879 };
880
881 /// A recipe for widening Call instructions.
882 class VPWidenCallRecipe : public VPRecipeBase, public VPValue {
883
884 public:
885 template <typename IterT>
VPWidenCallRecipe(CallInst & I,iterator_range<IterT> CallArguments)886 VPWidenCallRecipe(CallInst &I, iterator_range<IterT> CallArguments)
887 : VPRecipeBase(VPRecipeBase::VPWidenCallSC, CallArguments),
888 VPValue(VPValue::VPVWidenCallSC, &I, this) {}
889
890 ~VPWidenCallRecipe() override = default;
891
892 /// Method to support type inquiry through isa, cast, and dyn_cast.
classof(const VPDef * D)893 static inline bool classof(const VPDef *D) {
894 return D->getVPDefID() == VPRecipeBase::VPWidenCallSC;
895 }
896
897 /// Produce a widened version of the call instruction.
898 void execute(VPTransformState &State) override;
899
900 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
901 /// Print the recipe.
902 void print(raw_ostream &O, const Twine &Indent,
903 VPSlotTracker &SlotTracker) const override;
904 #endif
905 };
906
907 /// A recipe for widening select instructions.
908 class VPWidenSelectRecipe : public VPRecipeBase, public VPValue {
909
910 /// Is the condition of the select loop invariant?
911 bool InvariantCond;
912
913 public:
914 template <typename IterT>
VPWidenSelectRecipe(SelectInst & I,iterator_range<IterT> Operands,bool InvariantCond)915 VPWidenSelectRecipe(SelectInst &I, iterator_range<IterT> Operands,
916 bool InvariantCond)
917 : VPRecipeBase(VPRecipeBase::VPWidenSelectSC, Operands),
918 VPValue(VPValue::VPVWidenSelectSC, &I, this),
919 InvariantCond(InvariantCond) {}
920
921 ~VPWidenSelectRecipe() override = default;
922
923 /// Method to support type inquiry through isa, cast, and dyn_cast.
classof(const VPDef * D)924 static inline bool classof(const VPDef *D) {
925 return D->getVPDefID() == VPRecipeBase::VPWidenSelectSC;
926 }
927
928 /// Produce a widened version of the select instruction.
929 void execute(VPTransformState &State) override;
930
931 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
932 /// Print the recipe.
933 void print(raw_ostream &O, const Twine &Indent,
934 VPSlotTracker &SlotTracker) const override;
935 #endif
936 };
937
938 /// A recipe for handling GEP instructions.
939 class VPWidenGEPRecipe : public VPRecipeBase, public VPValue {
940 bool IsPtrLoopInvariant;
941 SmallBitVector IsIndexLoopInvariant;
942
943 public:
944 template <typename IterT>
VPWidenGEPRecipe(GetElementPtrInst * GEP,iterator_range<IterT> Operands)945 VPWidenGEPRecipe(GetElementPtrInst *GEP, iterator_range<IterT> Operands)
946 : VPRecipeBase(VPRecipeBase::VPWidenGEPSC, Operands),
947 VPValue(VPWidenGEPSC, GEP, this),
948 IsIndexLoopInvariant(GEP->getNumIndices(), false) {}
949
950 template <typename IterT>
VPWidenGEPRecipe(GetElementPtrInst * GEP,iterator_range<IterT> Operands,Loop * OrigLoop)951 VPWidenGEPRecipe(GetElementPtrInst *GEP, iterator_range<IterT> Operands,
952 Loop *OrigLoop)
953 : VPRecipeBase(VPRecipeBase::VPWidenGEPSC, Operands),
954 VPValue(VPValue::VPVWidenGEPSC, GEP, this),
955 IsIndexLoopInvariant(GEP->getNumIndices(), false) {
956 IsPtrLoopInvariant = OrigLoop->isLoopInvariant(GEP->getPointerOperand());
957 for (auto Index : enumerate(GEP->indices()))
958 IsIndexLoopInvariant[Index.index()] =
959 OrigLoop->isLoopInvariant(Index.value().get());
960 }
961 ~VPWidenGEPRecipe() override = default;
962
963 /// Method to support type inquiry through isa, cast, and dyn_cast.
classof(const VPDef * D)964 static inline bool classof(const VPDef *D) {
965 return D->getVPDefID() == VPRecipeBase::VPWidenGEPSC;
966 }
967
968 /// Generate the gep nodes.
969 void execute(VPTransformState &State) override;
970
971 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
972 /// Print the recipe.
973 void print(raw_ostream &O, const Twine &Indent,
974 VPSlotTracker &SlotTracker) const override;
975 #endif
976 };
977
978 /// A recipe for handling phi nodes of integer and floating-point inductions,
979 /// producing their vector and scalar values.
980 class VPWidenIntOrFpInductionRecipe : public VPRecipeBase {
981 PHINode *IV;
982
983 public:
984 VPWidenIntOrFpInductionRecipe(PHINode *IV, VPValue *Start, Instruction *Cast,
985 TruncInst *Trunc = nullptr)
986 : VPRecipeBase(VPWidenIntOrFpInductionSC, {Start}), IV(IV) {
987 if (Trunc)
988 new VPValue(Trunc, this);
989 else
990 new VPValue(IV, this);
991
992 if (Cast)
993 new VPValue(Cast, this);
994 }
995 ~VPWidenIntOrFpInductionRecipe() override = default;
996
997 /// Method to support type inquiry through isa, cast, and dyn_cast.
classof(const VPDef * D)998 static inline bool classof(const VPDef *D) {
999 return D->getVPDefID() == VPRecipeBase::VPWidenIntOrFpInductionSC;
1000 }
1001
1002 /// Generate the vectorized and scalarized versions of the phi node as
1003 /// needed by their users.
1004 void execute(VPTransformState &State) override;
1005
1006 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1007 /// Print the recipe.
1008 void print(raw_ostream &O, const Twine &Indent,
1009 VPSlotTracker &SlotTracker) const override;
1010 #endif
1011
1012 /// Returns the start value of the induction.
getStartValue()1013 VPValue *getStartValue() { return getOperand(0); }
1014
1015 /// Returns the cast VPValue, if one is attached, or nullptr otherwise.
getCastValue()1016 VPValue *getCastValue() {
1017 if (getNumDefinedValues() != 2)
1018 return nullptr;
1019 return getVPValue(1);
1020 }
1021
1022 /// Returns the first defined value as TruncInst, if it is one or nullptr
1023 /// otherwise.
getTruncInst()1024 TruncInst *getTruncInst() {
1025 return dyn_cast_or_null<TruncInst>(getVPValue(0)->getUnderlyingValue());
1026 }
getTruncInst()1027 const TruncInst *getTruncInst() const {
1028 return dyn_cast_or_null<TruncInst>(getVPValue(0)->getUnderlyingValue());
1029 }
1030 };
1031
1032 /// A recipe for handling all phi nodes except for integer and FP inductions.
1033 /// For reduction PHIs, RdxDesc must point to the corresponding recurrence
1034 /// descriptor, the start value is the first operand of the recipe and the
1035 /// incoming value from the backedge is the second operand. In the VPlan native
1036 /// path, all incoming VPValues & VPBasicBlock pairs are managed in the recipe
1037 /// directly.
1038 class VPWidenPHIRecipe : public VPRecipeBase, public VPValue {
1039 /// Descriptor for a reduction PHI.
1040 RecurrenceDescriptor *RdxDesc = nullptr;
1041
1042 /// List of incoming blocks. Only used in the VPlan native path.
1043 SmallVector<VPBasicBlock *, 2> IncomingBlocks;
1044
1045 public:
1046 /// Create a new VPWidenPHIRecipe for the reduction \p Phi described by \p
1047 /// RdxDesc.
VPWidenPHIRecipe(PHINode * Phi,RecurrenceDescriptor & RdxDesc,VPValue & Start)1048 VPWidenPHIRecipe(PHINode *Phi, RecurrenceDescriptor &RdxDesc, VPValue &Start)
1049 : VPWidenPHIRecipe(Phi) {
1050 this->RdxDesc = &RdxDesc;
1051 addOperand(&Start);
1052 }
1053
1054 /// Create a VPWidenPHIRecipe for \p Phi
VPWidenPHIRecipe(PHINode * Phi)1055 VPWidenPHIRecipe(PHINode *Phi)
1056 : VPRecipeBase(VPWidenPHISC, {}),
1057 VPValue(VPValue::VPVWidenPHISC, Phi, this) {}
1058 ~VPWidenPHIRecipe() override = default;
1059
1060 /// Method to support type inquiry through isa, cast, and dyn_cast.
classof(const VPDef * D)1061 static inline bool classof(const VPDef *D) {
1062 return D->getVPDefID() == VPRecipeBase::VPWidenPHISC;
1063 }
classof(const VPValue * V)1064 static inline bool classof(const VPValue *V) {
1065 return V->getVPValueID() == VPValue::VPVWidenPHISC;
1066 }
1067
1068 /// Generate the phi/select nodes.
1069 void execute(VPTransformState &State) override;
1070
1071 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1072 /// Print the recipe.
1073 void print(raw_ostream &O, const Twine &Indent,
1074 VPSlotTracker &SlotTracker) const override;
1075 #endif
1076
1077 /// Returns the start value of the phi, if it is a reduction.
getStartValue()1078 VPValue *getStartValue() {
1079 return getNumOperands() == 0 ? nullptr : getOperand(0);
1080 }
1081
1082 /// Returns the incoming value from the loop backedge, if it is a reduction.
getBackedgeValue()1083 VPValue *getBackedgeValue() {
1084 assert(RdxDesc && "second incoming value is only guaranteed to be backedge "
1085 "value for reductions");
1086 return getOperand(1);
1087 }
1088
1089 /// Adds a pair (\p IncomingV, \p IncomingBlock) to the phi.
addIncoming(VPValue * IncomingV,VPBasicBlock * IncomingBlock)1090 void addIncoming(VPValue *IncomingV, VPBasicBlock *IncomingBlock) {
1091 addOperand(IncomingV);
1092 IncomingBlocks.push_back(IncomingBlock);
1093 }
1094
1095 /// Returns the \p I th incoming VPValue.
getIncomingValue(unsigned I)1096 VPValue *getIncomingValue(unsigned I) { return getOperand(I); }
1097
1098 /// Returns the \p I th incoming VPBasicBlock.
getIncomingBlock(unsigned I)1099 VPBasicBlock *getIncomingBlock(unsigned I) { return IncomingBlocks[I]; }
1100
getRecurrenceDescriptor()1101 RecurrenceDescriptor *getRecurrenceDescriptor() { return RdxDesc; }
1102 };
1103
1104 /// A recipe for vectorizing a phi-node as a sequence of mask-based select
1105 /// instructions.
1106 class VPBlendRecipe : public VPRecipeBase, public VPValue {
1107 PHINode *Phi;
1108
1109 public:
1110 /// The blend operation is a User of the incoming values and of their
1111 /// respective masks, ordered [I0, M0, I1, M1, ...]. Note that a single value
1112 /// might be incoming with a full mask for which there is no VPValue.
VPBlendRecipe(PHINode * Phi,ArrayRef<VPValue * > Operands)1113 VPBlendRecipe(PHINode *Phi, ArrayRef<VPValue *> Operands)
1114 : VPRecipeBase(VPBlendSC, Operands),
1115 VPValue(VPValue::VPVBlendSC, Phi, this), Phi(Phi) {
1116 assert(Operands.size() > 0 &&
1117 ((Operands.size() == 1) || (Operands.size() % 2 == 0)) &&
1118 "Expected either a single incoming value or a positive even number "
1119 "of operands");
1120 }
1121
1122 /// Method to support type inquiry through isa, cast, and dyn_cast.
classof(const VPDef * D)1123 static inline bool classof(const VPDef *D) {
1124 return D->getVPDefID() == VPRecipeBase::VPBlendSC;
1125 }
1126
1127 /// Return the number of incoming values, taking into account that a single
1128 /// incoming value has no mask.
getNumIncomingValues()1129 unsigned getNumIncomingValues() const { return (getNumOperands() + 1) / 2; }
1130
1131 /// Return incoming value number \p Idx.
getIncomingValue(unsigned Idx)1132 VPValue *getIncomingValue(unsigned Idx) const { return getOperand(Idx * 2); }
1133
1134 /// Return mask number \p Idx.
getMask(unsigned Idx)1135 VPValue *getMask(unsigned Idx) const { return getOperand(Idx * 2 + 1); }
1136
1137 /// Generate the phi/select nodes.
1138 void execute(VPTransformState &State) override;
1139
1140 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1141 /// Print the recipe.
1142 void print(raw_ostream &O, const Twine &Indent,
1143 VPSlotTracker &SlotTracker) const override;
1144 #endif
1145 };
1146
1147 /// VPInterleaveRecipe is a recipe for transforming an interleave group of load
1148 /// or stores into one wide load/store and shuffles. The first operand of a
1149 /// VPInterleave recipe is the address, followed by the stored values, followed
1150 /// by an optional mask.
1151 class VPInterleaveRecipe : public VPRecipeBase {
1152 const InterleaveGroup<Instruction> *IG;
1153
1154 bool HasMask = false;
1155
1156 public:
VPInterleaveRecipe(const InterleaveGroup<Instruction> * IG,VPValue * Addr,ArrayRef<VPValue * > StoredValues,VPValue * Mask)1157 VPInterleaveRecipe(const InterleaveGroup<Instruction> *IG, VPValue *Addr,
1158 ArrayRef<VPValue *> StoredValues, VPValue *Mask)
1159 : VPRecipeBase(VPInterleaveSC, {Addr}), IG(IG) {
1160 for (unsigned i = 0; i < IG->getFactor(); ++i)
1161 if (Instruction *I = IG->getMember(i)) {
1162 if (I->getType()->isVoidTy())
1163 continue;
1164 new VPValue(I, this);
1165 }
1166
1167 for (auto *SV : StoredValues)
1168 addOperand(SV);
1169 if (Mask) {
1170 HasMask = true;
1171 addOperand(Mask);
1172 }
1173 }
1174 ~VPInterleaveRecipe() override = default;
1175
1176 /// Method to support type inquiry through isa, cast, and dyn_cast.
classof(const VPDef * D)1177 static inline bool classof(const VPDef *D) {
1178 return D->getVPDefID() == VPRecipeBase::VPInterleaveSC;
1179 }
1180
1181 /// Return the address accessed by this recipe.
getAddr()1182 VPValue *getAddr() const {
1183 return getOperand(0); // Address is the 1st, mandatory operand.
1184 }
1185
1186 /// Return the mask used by this recipe. Note that a full mask is represented
1187 /// by a nullptr.
getMask()1188 VPValue *getMask() const {
1189 // Mask is optional and therefore the last, currently 2nd operand.
1190 return HasMask ? getOperand(getNumOperands() - 1) : nullptr;
1191 }
1192
1193 /// Return the VPValues stored by this interleave group. If it is a load
1194 /// interleave group, return an empty ArrayRef.
getStoredValues()1195 ArrayRef<VPValue *> getStoredValues() const {
1196 // The first operand is the address, followed by the stored values, followed
1197 // by an optional mask.
1198 return ArrayRef<VPValue *>(op_begin(), getNumOperands())
1199 .slice(1, getNumOperands() - (HasMask ? 2 : 1));
1200 }
1201
1202 /// Generate the wide load or store, and shuffles.
1203 void execute(VPTransformState &State) override;
1204
1205 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1206 /// Print the recipe.
1207 void print(raw_ostream &O, const Twine &Indent,
1208 VPSlotTracker &SlotTracker) const override;
1209 #endif
1210
getInterleaveGroup()1211 const InterleaveGroup<Instruction> *getInterleaveGroup() { return IG; }
1212 };
1213
1214 /// A recipe to represent inloop reduction operations, performing a reduction on
1215 /// a vector operand into a scalar value, and adding the result to a chain.
1216 /// The Operands are {ChainOp, VecOp, [Condition]}.
1217 class VPReductionRecipe : public VPRecipeBase, public VPValue {
1218 /// The recurrence decriptor for the reduction in question.
1219 RecurrenceDescriptor *RdxDesc;
1220 /// Pointer to the TTI, needed to create the target reduction
1221 const TargetTransformInfo *TTI;
1222
1223 public:
VPReductionRecipe(RecurrenceDescriptor * R,Instruction * I,VPValue * ChainOp,VPValue * VecOp,VPValue * CondOp,const TargetTransformInfo * TTI)1224 VPReductionRecipe(RecurrenceDescriptor *R, Instruction *I, VPValue *ChainOp,
1225 VPValue *VecOp, VPValue *CondOp,
1226 const TargetTransformInfo *TTI)
1227 : VPRecipeBase(VPRecipeBase::VPReductionSC, {ChainOp, VecOp}),
1228 VPValue(VPValue::VPVReductionSC, I, this), RdxDesc(R), TTI(TTI) {
1229 if (CondOp)
1230 addOperand(CondOp);
1231 }
1232
1233 ~VPReductionRecipe() override = default;
1234
1235 /// Method to support type inquiry through isa, cast, and dyn_cast.
classof(const VPValue * V)1236 static inline bool classof(const VPValue *V) {
1237 return V->getVPValueID() == VPValue::VPVReductionSC;
1238 }
1239
classof(const VPDef * D)1240 static inline bool classof(const VPDef *D) {
1241 return D->getVPDefID() == VPRecipeBase::VPReductionSC;
1242 }
1243
1244 /// Generate the reduction in the loop
1245 void execute(VPTransformState &State) override;
1246
1247 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1248 /// Print the recipe.
1249 void print(raw_ostream &O, const Twine &Indent,
1250 VPSlotTracker &SlotTracker) const override;
1251 #endif
1252
1253 /// The VPValue of the scalar Chain being accumulated.
getChainOp()1254 VPValue *getChainOp() const { return getOperand(0); }
1255 /// The VPValue of the vector value to be reduced.
getVecOp()1256 VPValue *getVecOp() const { return getOperand(1); }
1257 /// The VPValue of the condition for the block.
getCondOp()1258 VPValue *getCondOp() const {
1259 return getNumOperands() > 2 ? getOperand(2) : nullptr;
1260 }
1261 };
1262
1263 /// VPReplicateRecipe replicates a given instruction producing multiple scalar
1264 /// copies of the original scalar type, one per lane, instead of producing a
1265 /// single copy of widened type for all lanes. If the instruction is known to be
1266 /// uniform only one copy, per lane zero, will be generated.
1267 class VPReplicateRecipe : public VPRecipeBase, public VPValue {
1268 /// Indicator if only a single replica per lane is needed.
1269 bool IsUniform;
1270
1271 /// Indicator if the replicas are also predicated.
1272 bool IsPredicated;
1273
1274 /// Indicator if the scalar values should also be packed into a vector.
1275 bool AlsoPack;
1276
1277 public:
1278 template <typename IterT>
1279 VPReplicateRecipe(Instruction *I, iterator_range<IterT> Operands,
1280 bool IsUniform, bool IsPredicated = false)
VPRecipeBase(VPReplicateSC,Operands)1281 : VPRecipeBase(VPReplicateSC, Operands), VPValue(VPVReplicateSC, I, this),
1282 IsUniform(IsUniform), IsPredicated(IsPredicated) {
1283 // Retain the previous behavior of predicateInstructions(), where an
1284 // insert-element of a predicated instruction got hoisted into the
1285 // predicated basic block iff it was its only user. This is achieved by
1286 // having predicated instructions also pack their values into a vector by
1287 // default unless they have a replicated user which uses their scalar value.
1288 AlsoPack = IsPredicated && !I->use_empty();
1289 }
1290
1291 ~VPReplicateRecipe() override = default;
1292
1293 /// Method to support type inquiry through isa, cast, and dyn_cast.
classof(const VPDef * D)1294 static inline bool classof(const VPDef *D) {
1295 return D->getVPDefID() == VPRecipeBase::VPReplicateSC;
1296 }
1297
classof(const VPValue * V)1298 static inline bool classof(const VPValue *V) {
1299 return V->getVPValueID() == VPValue::VPVReplicateSC;
1300 }
1301
1302 /// Generate replicas of the desired Ingredient. Replicas will be generated
1303 /// for all parts and lanes unless a specific part and lane are specified in
1304 /// the \p State.
1305 void execute(VPTransformState &State) override;
1306
setAlsoPack(bool Pack)1307 void setAlsoPack(bool Pack) { AlsoPack = Pack; }
1308
1309 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1310 /// Print the recipe.
1311 void print(raw_ostream &O, const Twine &Indent,
1312 VPSlotTracker &SlotTracker) const override;
1313 #endif
1314
isUniform()1315 bool isUniform() const { return IsUniform; }
1316
isPacked()1317 bool isPacked() const { return AlsoPack; }
1318
isPredicated()1319 bool isPredicated() const { return IsPredicated; }
1320 };
1321
1322 /// A recipe for generating conditional branches on the bits of a mask.
1323 class VPBranchOnMaskRecipe : public VPRecipeBase {
1324 public:
VPBranchOnMaskRecipe(VPValue * BlockInMask)1325 VPBranchOnMaskRecipe(VPValue *BlockInMask)
1326 : VPRecipeBase(VPBranchOnMaskSC, {}) {
1327 if (BlockInMask) // nullptr means all-one mask.
1328 addOperand(BlockInMask);
1329 }
1330
1331 /// Method to support type inquiry through isa, cast, and dyn_cast.
classof(const VPDef * D)1332 static inline bool classof(const VPDef *D) {
1333 return D->getVPDefID() == VPRecipeBase::VPBranchOnMaskSC;
1334 }
1335
1336 /// Generate the extraction of the appropriate bit from the block mask and the
1337 /// conditional branch.
1338 void execute(VPTransformState &State) override;
1339
1340 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1341 /// Print the recipe.
print(raw_ostream & O,const Twine & Indent,VPSlotTracker & SlotTracker)1342 void print(raw_ostream &O, const Twine &Indent,
1343 VPSlotTracker &SlotTracker) const override {
1344 O << Indent << "BRANCH-ON-MASK ";
1345 if (VPValue *Mask = getMask())
1346 Mask->printAsOperand(O, SlotTracker);
1347 else
1348 O << " All-One";
1349 }
1350 #endif
1351
1352 /// Return the mask used by this recipe. Note that a full mask is represented
1353 /// by a nullptr.
getMask()1354 VPValue *getMask() const {
1355 assert(getNumOperands() <= 1 && "should have either 0 or 1 operands");
1356 // Mask is optional.
1357 return getNumOperands() == 1 ? getOperand(0) : nullptr;
1358 }
1359 };
1360
1361 /// VPPredInstPHIRecipe is a recipe for generating the phi nodes needed when
1362 /// control converges back from a Branch-on-Mask. The phi nodes are needed in
1363 /// order to merge values that are set under such a branch and feed their uses.
1364 /// The phi nodes can be scalar or vector depending on the users of the value.
1365 /// This recipe works in concert with VPBranchOnMaskRecipe.
1366 class VPPredInstPHIRecipe : public VPRecipeBase, public VPValue {
1367 public:
1368 /// Construct a VPPredInstPHIRecipe given \p PredInst whose value needs a phi
1369 /// nodes after merging back from a Branch-on-Mask.
VPPredInstPHIRecipe(VPValue * PredV)1370 VPPredInstPHIRecipe(VPValue *PredV)
1371 : VPRecipeBase(VPPredInstPHISC, PredV),
1372 VPValue(VPValue::VPVPredInstPHI, nullptr, this) {}
1373 ~VPPredInstPHIRecipe() override = default;
1374
1375 /// Method to support type inquiry through isa, cast, and dyn_cast.
classof(const VPDef * D)1376 static inline bool classof(const VPDef *D) {
1377 return D->getVPDefID() == VPRecipeBase::VPPredInstPHISC;
1378 }
1379
1380 /// Generates phi nodes for live-outs as needed to retain SSA form.
1381 void execute(VPTransformState &State) override;
1382
1383 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1384 /// Print the recipe.
1385 void print(raw_ostream &O, const Twine &Indent,
1386 VPSlotTracker &SlotTracker) const override;
1387 #endif
1388 };
1389
1390 /// A Recipe for widening load/store operations.
1391 /// The recipe uses the following VPValues:
1392 /// - For load: Address, optional mask
1393 /// - For store: Address, stored value, optional mask
1394 /// TODO: We currently execute only per-part unless a specific instance is
1395 /// provided.
1396 class VPWidenMemoryInstructionRecipe : public VPRecipeBase {
1397 Instruction &Ingredient;
1398
setMask(VPValue * Mask)1399 void setMask(VPValue *Mask) {
1400 if (!Mask)
1401 return;
1402 addOperand(Mask);
1403 }
1404
isMasked()1405 bool isMasked() const {
1406 return isStore() ? getNumOperands() == 3 : getNumOperands() == 2;
1407 }
1408
1409 public:
VPWidenMemoryInstructionRecipe(LoadInst & Load,VPValue * Addr,VPValue * Mask)1410 VPWidenMemoryInstructionRecipe(LoadInst &Load, VPValue *Addr, VPValue *Mask)
1411 : VPRecipeBase(VPWidenMemoryInstructionSC, {Addr}), Ingredient(Load) {
1412 new VPValue(VPValue::VPVMemoryInstructionSC, &Load, this);
1413 setMask(Mask);
1414 }
1415
VPWidenMemoryInstructionRecipe(StoreInst & Store,VPValue * Addr,VPValue * StoredValue,VPValue * Mask)1416 VPWidenMemoryInstructionRecipe(StoreInst &Store, VPValue *Addr,
1417 VPValue *StoredValue, VPValue *Mask)
1418 : VPRecipeBase(VPWidenMemoryInstructionSC, {Addr, StoredValue}),
1419 Ingredient(Store) {
1420 setMask(Mask);
1421 }
1422
1423 /// Method to support type inquiry through isa, cast, and dyn_cast.
classof(const VPDef * D)1424 static inline bool classof(const VPDef *D) {
1425 return D->getVPDefID() == VPRecipeBase::VPWidenMemoryInstructionSC;
1426 }
1427
1428 /// Return the address accessed by this recipe.
getAddr()1429 VPValue *getAddr() const {
1430 return getOperand(0); // Address is the 1st, mandatory operand.
1431 }
1432
1433 /// Return the mask used by this recipe. Note that a full mask is represented
1434 /// by a nullptr.
getMask()1435 VPValue *getMask() const {
1436 // Mask is optional and therefore the last operand.
1437 return isMasked() ? getOperand(getNumOperands() - 1) : nullptr;
1438 }
1439
1440 /// Returns true if this recipe is a store.
isStore()1441 bool isStore() const { return isa<StoreInst>(Ingredient); }
1442
1443 /// Return the address accessed by this recipe.
getStoredValue()1444 VPValue *getStoredValue() const {
1445 assert(isStore() && "Stored value only available for store instructions");
1446 return getOperand(1); // Stored value is the 2nd, mandatory operand.
1447 }
1448
1449 /// Generate the wide load/store.
1450 void execute(VPTransformState &State) override;
1451
1452 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1453 /// Print the recipe.
1454 void print(raw_ostream &O, const Twine &Indent,
1455 VPSlotTracker &SlotTracker) const override;
1456 #endif
1457 };
1458
1459 /// A Recipe for widening the canonical induction variable of the vector loop.
1460 class VPWidenCanonicalIVRecipe : public VPRecipeBase {
1461 public:
VPWidenCanonicalIVRecipe()1462 VPWidenCanonicalIVRecipe() : VPRecipeBase(VPWidenCanonicalIVSC, {}) {
1463 new VPValue(nullptr, this);
1464 }
1465
1466 ~VPWidenCanonicalIVRecipe() override = default;
1467
1468 /// Method to support type inquiry through isa, cast, and dyn_cast.
classof(const VPDef * D)1469 static inline bool classof(const VPDef *D) {
1470 return D->getVPDefID() == VPRecipeBase::VPWidenCanonicalIVSC;
1471 }
1472
1473 /// Generate a canonical vector induction variable of the vector loop, with
1474 /// start = {<Part*VF, Part*VF+1, ..., Part*VF+VF-1> for 0 <= Part < UF}, and
1475 /// step = <VF*UF, VF*UF, ..., VF*UF>.
1476 void execute(VPTransformState &State) override;
1477
1478 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1479 /// Print the recipe.
1480 void print(raw_ostream &O, const Twine &Indent,
1481 VPSlotTracker &SlotTracker) const override;
1482 #endif
1483 };
1484
1485 /// VPBasicBlock serves as the leaf of the Hierarchical Control-Flow Graph. It
1486 /// holds a sequence of zero or more VPRecipe's each representing a sequence of
1487 /// output IR instructions. All PHI-like recipes must come before any non-PHI recipes.
1488 class VPBasicBlock : public VPBlockBase {
1489 public:
1490 using RecipeListTy = iplist<VPRecipeBase>;
1491
1492 private:
1493 /// The VPRecipes held in the order of output instructions to generate.
1494 RecipeListTy Recipes;
1495
1496 public:
1497 VPBasicBlock(const Twine &Name = "", VPRecipeBase *Recipe = nullptr)
1498 : VPBlockBase(VPBasicBlockSC, Name.str()) {
1499 if (Recipe)
1500 appendRecipe(Recipe);
1501 }
1502
~VPBasicBlock()1503 ~VPBasicBlock() override {
1504 while (!Recipes.empty())
1505 Recipes.pop_back();
1506 }
1507
1508 /// Instruction iterators...
1509 using iterator = RecipeListTy::iterator;
1510 using const_iterator = RecipeListTy::const_iterator;
1511 using reverse_iterator = RecipeListTy::reverse_iterator;
1512 using const_reverse_iterator = RecipeListTy::const_reverse_iterator;
1513
1514 //===--------------------------------------------------------------------===//
1515 /// Recipe iterator methods
1516 ///
begin()1517 inline iterator begin() { return Recipes.begin(); }
begin()1518 inline const_iterator begin() const { return Recipes.begin(); }
end()1519 inline iterator end() { return Recipes.end(); }
end()1520 inline const_iterator end() const { return Recipes.end(); }
1521
rbegin()1522 inline reverse_iterator rbegin() { return Recipes.rbegin(); }
rbegin()1523 inline const_reverse_iterator rbegin() const { return Recipes.rbegin(); }
rend()1524 inline reverse_iterator rend() { return Recipes.rend(); }
rend()1525 inline const_reverse_iterator rend() const { return Recipes.rend(); }
1526
size()1527 inline size_t size() const { return Recipes.size(); }
empty()1528 inline bool empty() const { return Recipes.empty(); }
front()1529 inline const VPRecipeBase &front() const { return Recipes.front(); }
front()1530 inline VPRecipeBase &front() { return Recipes.front(); }
back()1531 inline const VPRecipeBase &back() const { return Recipes.back(); }
back()1532 inline VPRecipeBase &back() { return Recipes.back(); }
1533
1534 /// Returns a reference to the list of recipes.
getRecipeList()1535 RecipeListTy &getRecipeList() { return Recipes; }
1536
1537 /// Returns a pointer to a member of the recipe list.
getSublistAccess(VPRecipeBase *)1538 static RecipeListTy VPBasicBlock::*getSublistAccess(VPRecipeBase *) {
1539 return &VPBasicBlock::Recipes;
1540 }
1541
1542 /// Method to support type inquiry through isa, cast, and dyn_cast.
classof(const VPBlockBase * V)1543 static inline bool classof(const VPBlockBase *V) {
1544 return V->getVPBlockID() == VPBlockBase::VPBasicBlockSC;
1545 }
1546
insert(VPRecipeBase * Recipe,iterator InsertPt)1547 void insert(VPRecipeBase *Recipe, iterator InsertPt) {
1548 assert(Recipe && "No recipe to append.");
1549 assert(!Recipe->Parent && "Recipe already in VPlan");
1550 Recipe->Parent = this;
1551 Recipes.insert(InsertPt, Recipe);
1552 }
1553
1554 /// Augment the existing recipes of a VPBasicBlock with an additional
1555 /// \p Recipe as the last recipe.
appendRecipe(VPRecipeBase * Recipe)1556 void appendRecipe(VPRecipeBase *Recipe) { insert(Recipe, end()); }
1557
1558 /// The method which generates the output IR instructions that correspond to
1559 /// this VPBasicBlock, thereby "executing" the VPlan.
1560 void execute(struct VPTransformState *State) override;
1561
1562 /// Return the position of the first non-phi node recipe in the block.
1563 iterator getFirstNonPhi();
1564
1565 /// Returns an iterator range over the PHI-like recipes in the block.
phis()1566 iterator_range<iterator> phis() {
1567 return make_range(begin(), getFirstNonPhi());
1568 }
1569
1570 void dropAllReferences(VPValue *NewValue) override;
1571
1572 /// Split current block at \p SplitAt by inserting a new block between the
1573 /// current block and its successors and moving all recipes starting at
1574 /// SplitAt to the new block. Returns the new block.
1575 VPBasicBlock *splitAt(iterator SplitAt);
1576
1577 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1578 /// Print this VPBsicBlock to \p O, prefixing all lines with \p Indent. \p
1579 /// SlotTracker is used to print unnamed VPValue's using consequtive numbers.
1580 ///
1581 /// Note that the numbering is applied to the whole VPlan, so printing
1582 /// individual blocks is consistent with the whole VPlan printing.
1583 void print(raw_ostream &O, const Twine &Indent,
1584 VPSlotTracker &SlotTracker) const override;
1585 using VPBlockBase::print; // Get the print(raw_stream &O) version.
1586 #endif
1587
1588 private:
1589 /// Create an IR BasicBlock to hold the output instructions generated by this
1590 /// VPBasicBlock, and return it. Update the CFGState accordingly.
1591 BasicBlock *createEmptyBasicBlock(VPTransformState::CFGState &CFG);
1592 };
1593
1594 /// VPRegionBlock represents a collection of VPBasicBlocks and VPRegionBlocks
1595 /// which form a Single-Entry-Single-Exit subgraph of the output IR CFG.
1596 /// A VPRegionBlock may indicate that its contents are to be replicated several
1597 /// times. This is designed to support predicated scalarization, in which a
1598 /// scalar if-then code structure needs to be generated VF * UF times. Having
1599 /// this replication indicator helps to keep a single model for multiple
1600 /// candidate VF's. The actual replication takes place only once the desired VF
1601 /// and UF have been determined.
1602 class VPRegionBlock : public VPBlockBase {
1603 /// Hold the Single Entry of the SESE region modelled by the VPRegionBlock.
1604 VPBlockBase *Entry;
1605
1606 /// Hold the Single Exit of the SESE region modelled by the VPRegionBlock.
1607 VPBlockBase *Exit;
1608
1609 /// An indicator whether this region is to generate multiple replicated
1610 /// instances of output IR corresponding to its VPBlockBases.
1611 bool IsReplicator;
1612
1613 public:
1614 VPRegionBlock(VPBlockBase *Entry, VPBlockBase *Exit,
1615 const std::string &Name = "", bool IsReplicator = false)
VPBlockBase(VPRegionBlockSC,Name)1616 : VPBlockBase(VPRegionBlockSC, Name), Entry(Entry), Exit(Exit),
1617 IsReplicator(IsReplicator) {
1618 assert(Entry->getPredecessors().empty() && "Entry block has predecessors.");
1619 assert(Exit->getSuccessors().empty() && "Exit block has successors.");
1620 Entry->setParent(this);
1621 Exit->setParent(this);
1622 }
1623 VPRegionBlock(const std::string &Name = "", bool IsReplicator = false)
VPBlockBase(VPRegionBlockSC,Name)1624 : VPBlockBase(VPRegionBlockSC, Name), Entry(nullptr), Exit(nullptr),
1625 IsReplicator(IsReplicator) {}
1626
~VPRegionBlock()1627 ~VPRegionBlock() override {
1628 if (Entry) {
1629 VPValue DummyValue;
1630 Entry->dropAllReferences(&DummyValue);
1631 deleteCFG(Entry);
1632 }
1633 }
1634
1635 /// Method to support type inquiry through isa, cast, and dyn_cast.
classof(const VPBlockBase * V)1636 static inline bool classof(const VPBlockBase *V) {
1637 return V->getVPBlockID() == VPBlockBase::VPRegionBlockSC;
1638 }
1639
getEntry()1640 const VPBlockBase *getEntry() const { return Entry; }
getEntry()1641 VPBlockBase *getEntry() { return Entry; }
1642
1643 /// Set \p EntryBlock as the entry VPBlockBase of this VPRegionBlock. \p
1644 /// EntryBlock must have no predecessors.
setEntry(VPBlockBase * EntryBlock)1645 void setEntry(VPBlockBase *EntryBlock) {
1646 assert(EntryBlock->getPredecessors().empty() &&
1647 "Entry block cannot have predecessors.");
1648 Entry = EntryBlock;
1649 EntryBlock->setParent(this);
1650 }
1651
1652 // FIXME: DominatorTreeBase is doing 'A->getParent()->front()'. 'front' is a
1653 // specific interface of llvm::Function, instead of using
1654 // GraphTraints::getEntryNode. We should add a new template parameter to
1655 // DominatorTreeBase representing the Graph type.
front()1656 VPBlockBase &front() const { return *Entry; }
1657
getExit()1658 const VPBlockBase *getExit() const { return Exit; }
getExit()1659 VPBlockBase *getExit() { return Exit; }
1660
1661 /// Set \p ExitBlock as the exit VPBlockBase of this VPRegionBlock. \p
1662 /// ExitBlock must have no successors.
setExit(VPBlockBase * ExitBlock)1663 void setExit(VPBlockBase *ExitBlock) {
1664 assert(ExitBlock->getSuccessors().empty() &&
1665 "Exit block cannot have successors.");
1666 Exit = ExitBlock;
1667 ExitBlock->setParent(this);
1668 }
1669
1670 /// An indicator whether this region is to generate multiple replicated
1671 /// instances of output IR corresponding to its VPBlockBases.
isReplicator()1672 bool isReplicator() const { return IsReplicator; }
1673
1674 /// The method which generates the output IR instructions that correspond to
1675 /// this VPRegionBlock, thereby "executing" the VPlan.
1676 void execute(struct VPTransformState *State) override;
1677
1678 void dropAllReferences(VPValue *NewValue) override;
1679
1680 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1681 /// Print this VPRegionBlock to \p O (recursively), prefixing all lines with
1682 /// \p Indent. \p SlotTracker is used to print unnamed VPValue's using
1683 /// consequtive numbers.
1684 ///
1685 /// Note that the numbering is applied to the whole VPlan, so printing
1686 /// individual regions is consistent with the whole VPlan printing.
1687 void print(raw_ostream &O, const Twine &Indent,
1688 VPSlotTracker &SlotTracker) const override;
1689 using VPBlockBase::print; // Get the print(raw_stream &O) version.
1690 #endif
1691 };
1692
1693 //===----------------------------------------------------------------------===//
1694 // GraphTraits specializations for VPlan Hierarchical Control-Flow Graphs //
1695 //===----------------------------------------------------------------------===//
1696
1697 // The following set of template specializations implement GraphTraits to treat
1698 // any VPBlockBase as a node in a graph of VPBlockBases. It's important to note
1699 // that VPBlockBase traits don't recurse into VPRegioBlocks, i.e., if the
1700 // VPBlockBase is a VPRegionBlock, this specialization provides access to its
1701 // successors/predecessors but not to the blocks inside the region.
1702
1703 template <> struct GraphTraits<VPBlockBase *> {
1704 using NodeRef = VPBlockBase *;
1705 using ChildIteratorType = SmallVectorImpl<VPBlockBase *>::iterator;
1706
1707 static NodeRef getEntryNode(NodeRef N) { return N; }
1708
1709 static inline ChildIteratorType child_begin(NodeRef N) {
1710 return N->getSuccessors().begin();
1711 }
1712
1713 static inline ChildIteratorType child_end(NodeRef N) {
1714 return N->getSuccessors().end();
1715 }
1716 };
1717
1718 template <> struct GraphTraits<const VPBlockBase *> {
1719 using NodeRef = const VPBlockBase *;
1720 using ChildIteratorType = SmallVectorImpl<VPBlockBase *>::const_iterator;
1721
1722 static NodeRef getEntryNode(NodeRef N) { return N; }
1723
1724 static inline ChildIteratorType child_begin(NodeRef N) {
1725 return N->getSuccessors().begin();
1726 }
1727
1728 static inline ChildIteratorType child_end(NodeRef N) {
1729 return N->getSuccessors().end();
1730 }
1731 };
1732
1733 // Inverse order specialization for VPBasicBlocks. Predecessors are used instead
1734 // of successors for the inverse traversal.
1735 template <> struct GraphTraits<Inverse<VPBlockBase *>> {
1736 using NodeRef = VPBlockBase *;
1737 using ChildIteratorType = SmallVectorImpl<VPBlockBase *>::iterator;
1738
1739 static NodeRef getEntryNode(Inverse<NodeRef> B) { return B.Graph; }
1740
1741 static inline ChildIteratorType child_begin(NodeRef N) {
1742 return N->getPredecessors().begin();
1743 }
1744
1745 static inline ChildIteratorType child_end(NodeRef N) {
1746 return N->getPredecessors().end();
1747 }
1748 };
1749
1750 // The following set of template specializations implement GraphTraits to
1751 // treat VPRegionBlock as a graph and recurse inside its nodes. It's important
1752 // to note that the blocks inside the VPRegionBlock are treated as VPBlockBases
1753 // (i.e., no dyn_cast is performed, VPBlockBases specialization is used), so
1754 // there won't be automatic recursion into other VPBlockBases that turn to be
1755 // VPRegionBlocks.
1756
1757 template <>
1758 struct GraphTraits<VPRegionBlock *> : public GraphTraits<VPBlockBase *> {
1759 using GraphRef = VPRegionBlock *;
1760 using nodes_iterator = df_iterator<NodeRef>;
1761
1762 static NodeRef getEntryNode(GraphRef N) { return N->getEntry(); }
1763
1764 static nodes_iterator nodes_begin(GraphRef N) {
1765 return nodes_iterator::begin(N->getEntry());
1766 }
1767
1768 static nodes_iterator nodes_end(GraphRef N) {
1769 // df_iterator::end() returns an empty iterator so the node used doesn't
1770 // matter.
1771 return nodes_iterator::end(N);
1772 }
1773 };
1774
1775 template <>
1776 struct GraphTraits<const VPRegionBlock *>
1777 : public GraphTraits<const VPBlockBase *> {
1778 using GraphRef = const VPRegionBlock *;
1779 using nodes_iterator = df_iterator<NodeRef>;
1780
1781 static NodeRef getEntryNode(GraphRef N) { return N->getEntry(); }
1782
1783 static nodes_iterator nodes_begin(GraphRef N) {
1784 return nodes_iterator::begin(N->getEntry());
1785 }
1786
1787 static nodes_iterator nodes_end(GraphRef N) {
1788 // df_iterator::end() returns an empty iterator so the node used doesn't
1789 // matter.
1790 return nodes_iterator::end(N);
1791 }
1792 };
1793
1794 template <>
1795 struct GraphTraits<Inverse<VPRegionBlock *>>
1796 : public GraphTraits<Inverse<VPBlockBase *>> {
1797 using GraphRef = VPRegionBlock *;
1798 using nodes_iterator = df_iterator<NodeRef>;
1799
1800 static NodeRef getEntryNode(Inverse<GraphRef> N) {
1801 return N.Graph->getExit();
1802 }
1803
1804 static nodes_iterator nodes_begin(GraphRef N) {
1805 return nodes_iterator::begin(N->getExit());
1806 }
1807
1808 static nodes_iterator nodes_end(GraphRef N) {
1809 // df_iterator::end() returns an empty iterator so the node used doesn't
1810 // matter.
1811 return nodes_iterator::end(N);
1812 }
1813 };
1814
1815 /// Iterator to traverse all successors of a VPBlockBase node. This includes the
1816 /// entry node of VPRegionBlocks. Exit blocks of a region implicitly have their
1817 /// parent region's successors. This ensures all blocks in a region are visited
1818 /// before any blocks in a successor region when doing a reverse post-order
1819 // traversal of the graph.
1820 template <typename BlockPtrTy>
1821 class VPAllSuccessorsIterator
1822 : public iterator_facade_base<VPAllSuccessorsIterator<BlockPtrTy>,
1823 std::forward_iterator_tag, VPBlockBase> {
1824 BlockPtrTy Block;
1825 /// Index of the current successor. For VPBasicBlock nodes, this simply is the
1826 /// index for the successor array. For VPRegionBlock, SuccessorIdx == 0 is
1827 /// used for the region's entry block, and SuccessorIdx - 1 are the indices
1828 /// for the successor array.
1829 size_t SuccessorIdx;
1830
1831 static BlockPtrTy getBlockWithSuccs(BlockPtrTy Current) {
1832 while (Current && Current->getNumSuccessors() == 0)
1833 Current = Current->getParent();
1834 return Current;
1835 }
1836
1837 /// Templated helper to dereference successor \p SuccIdx of \p Block. Used by
1838 /// both the const and non-const operator* implementations.
1839 template <typename T1> static T1 deref(T1 Block, unsigned SuccIdx) {
1840 if (auto *R = dyn_cast<VPRegionBlock>(Block)) {
1841 if (SuccIdx == 0)
1842 return R->getEntry();
1843 SuccIdx--;
1844 }
1845
1846 // For exit blocks, use the next parent region with successors.
1847 return getBlockWithSuccs(Block)->getSuccessors()[SuccIdx];
1848 }
1849
1850 public:
1851 VPAllSuccessorsIterator(BlockPtrTy Block, size_t Idx = 0)
1852 : Block(Block), SuccessorIdx(Idx) {}
1853 VPAllSuccessorsIterator(const VPAllSuccessorsIterator &Other)
1854 : Block(Other.Block), SuccessorIdx(Other.SuccessorIdx) {}
1855
1856 VPAllSuccessorsIterator &operator=(const VPAllSuccessorsIterator &R) {
1857 Block = R.Block;
1858 SuccessorIdx = R.SuccessorIdx;
1859 return *this;
1860 }
1861
1862 static VPAllSuccessorsIterator end(BlockPtrTy Block) {
1863 BlockPtrTy ParentWithSuccs = getBlockWithSuccs(Block);
1864 unsigned NumSuccessors = ParentWithSuccs
1865 ? ParentWithSuccs->getNumSuccessors()
1866 : Block->getNumSuccessors();
1867
1868 if (auto *R = dyn_cast<VPRegionBlock>(Block))
1869 return {R, NumSuccessors + 1};
1870 return {Block, NumSuccessors};
1871 }
1872
1873 bool operator==(const VPAllSuccessorsIterator &R) const {
1874 return Block == R.Block && SuccessorIdx == R.SuccessorIdx;
1875 }
1876
1877 const VPBlockBase *operator*() const { return deref(Block, SuccessorIdx); }
1878
1879 BlockPtrTy operator*() { return deref(Block, SuccessorIdx); }
1880
1881 VPAllSuccessorsIterator &operator++() {
1882 SuccessorIdx++;
1883 return *this;
1884 }
1885
1886 VPAllSuccessorsIterator operator++(int X) {
1887 VPAllSuccessorsIterator Orig = *this;
1888 SuccessorIdx++;
1889 return Orig;
1890 }
1891 };
1892
1893 /// Helper for GraphTraits specialization that traverses through VPRegionBlocks.
1894 template <typename BlockTy> class VPBlockRecursiveTraversalWrapper {
1895 BlockTy Entry;
1896
1897 public:
1898 VPBlockRecursiveTraversalWrapper(BlockTy Entry) : Entry(Entry) {}
1899 BlockTy getEntry() { return Entry; }
1900 };
1901
1902 /// GraphTraits specialization to recursively traverse VPBlockBase nodes,
1903 /// including traversing through VPRegionBlocks. Exit blocks of a region
1904 /// implicitly have their parent region's successors. This ensures all blocks in
1905 /// a region are visited before any blocks in a successor region when doing a
1906 /// reverse post-order traversal of the graph.
1907 template <>
1908 struct GraphTraits<VPBlockRecursiveTraversalWrapper<VPBlockBase *>> {
1909 using NodeRef = VPBlockBase *;
1910 using ChildIteratorType = VPAllSuccessorsIterator<VPBlockBase *>;
1911
1912 static NodeRef
1913 getEntryNode(VPBlockRecursiveTraversalWrapper<VPBlockBase *> N) {
1914 return N.getEntry();
1915 }
1916
1917 static inline ChildIteratorType child_begin(NodeRef N) {
1918 return ChildIteratorType(N);
1919 }
1920
1921 static inline ChildIteratorType child_end(NodeRef N) {
1922 return ChildIteratorType::end(N);
1923 }
1924 };
1925
1926 template <>
1927 struct GraphTraits<VPBlockRecursiveTraversalWrapper<const VPBlockBase *>> {
1928 using NodeRef = const VPBlockBase *;
1929 using ChildIteratorType = VPAllSuccessorsIterator<const VPBlockBase *>;
1930
1931 static NodeRef
1932 getEntryNode(VPBlockRecursiveTraversalWrapper<const VPBlockBase *> N) {
1933 return N.getEntry();
1934 }
1935
1936 static inline ChildIteratorType child_begin(NodeRef N) {
1937 return ChildIteratorType(N);
1938 }
1939
1940 static inline ChildIteratorType child_end(NodeRef N) {
1941 return ChildIteratorType::end(N);
1942 }
1943 };
1944
1945 /// VPlan models a candidate for vectorization, encoding various decisions take
1946 /// to produce efficient output IR, including which branches, basic-blocks and
1947 /// output IR instructions to generate, and their cost. VPlan holds a
1948 /// Hierarchical-CFG of VPBasicBlocks and VPRegionBlocks rooted at an Entry
1949 /// VPBlock.
1950 class VPlan {
1951 friend class VPlanPrinter;
1952 friend class VPSlotTracker;
1953
1954 /// Hold the single entry to the Hierarchical CFG of the VPlan.
1955 VPBlockBase *Entry;
1956
1957 /// Holds the VFs applicable to this VPlan.
1958 SmallSetVector<ElementCount, 2> VFs;
1959
1960 /// Holds the name of the VPlan, for printing.
1961 std::string Name;
1962
1963 /// Holds all the external definitions created for this VPlan.
1964 // TODO: Introduce a specific representation for external definitions in
1965 // VPlan. External definitions must be immutable and hold a pointer to its
1966 // underlying IR that will be used to implement its structural comparison
1967 // (operators '==' and '<').
1968 SetVector<VPValue *> VPExternalDefs;
1969
1970 /// Represents the backedge taken count of the original loop, for folding
1971 /// the tail.
1972 VPValue *BackedgeTakenCount = nullptr;
1973
1974 /// Holds a mapping between Values and their corresponding VPValue inside
1975 /// VPlan.
1976 Value2VPValueTy Value2VPValue;
1977
1978 /// Contains all VPValues that been allocated by addVPValue directly and need
1979 /// to be free when the plan's destructor is called.
1980 SmallVector<VPValue *, 16> VPValuesToFree;
1981
1982 /// Holds the VPLoopInfo analysis for this VPlan.
1983 VPLoopInfo VPLInfo;
1984
1985 public:
1986 VPlan(VPBlockBase *Entry = nullptr) : Entry(Entry) {
1987 if (Entry)
1988 Entry->setPlan(this);
1989 }
1990
1991 ~VPlan() {
1992 if (Entry) {
1993 VPValue DummyValue;
1994 for (VPBlockBase *Block : depth_first(Entry))
1995 Block->dropAllReferences(&DummyValue);
1996
1997 VPBlockBase::deleteCFG(Entry);
1998 }
1999 for (VPValue *VPV : VPValuesToFree)
2000 delete VPV;
2001 if (BackedgeTakenCount)
2002 delete BackedgeTakenCount;
2003 for (VPValue *Def : VPExternalDefs)
2004 delete Def;
2005 }
2006
2007 /// Generate the IR code for this VPlan.
2008 void execute(struct VPTransformState *State);
2009
2010 VPBlockBase *getEntry() { return Entry; }
2011 const VPBlockBase *getEntry() const { return Entry; }
2012
2013 VPBlockBase *setEntry(VPBlockBase *Block) {
2014 Entry = Block;
2015 Block->setPlan(this);
2016 return Entry;
2017 }
2018
2019 /// The backedge taken count of the original loop.
2020 VPValue *getOrCreateBackedgeTakenCount() {
2021 if (!BackedgeTakenCount)
2022 BackedgeTakenCount = new VPValue();
2023 return BackedgeTakenCount;
2024 }
2025
2026 void addVF(ElementCount VF) { VFs.insert(VF); }
2027
2028 bool hasVF(ElementCount VF) { return VFs.count(VF); }
2029
2030 const std::string &getName() const { return Name; }
2031
2032 void setName(const Twine &newName) { Name = newName.str(); }
2033
2034 /// Add \p VPVal to the pool of external definitions if it's not already
2035 /// in the pool.
2036 void addExternalDef(VPValue *VPVal) { VPExternalDefs.insert(VPVal); }
2037
2038 void addVPValue(Value *V) {
2039 assert(V && "Trying to add a null Value to VPlan");
2040 assert(!Value2VPValue.count(V) && "Value already exists in VPlan");
2041 VPValue *VPV = new VPValue(V);
2042 Value2VPValue[V] = VPV;
2043 VPValuesToFree.push_back(VPV);
2044 }
2045
2046 void addVPValue(Value *V, VPValue *VPV) {
2047 assert(V && "Trying to add a null Value to VPlan");
2048 assert(!Value2VPValue.count(V) && "Value already exists in VPlan");
2049 Value2VPValue[V] = VPV;
2050 }
2051
2052 VPValue *getVPValue(Value *V) {
2053 assert(V && "Trying to get the VPValue of a null Value");
2054 assert(Value2VPValue.count(V) && "Value does not exist in VPlan");
2055 return Value2VPValue[V];
2056 }
2057
2058 VPValue *getOrAddVPValue(Value *V) {
2059 assert(V && "Trying to get or add the VPValue of a null Value");
2060 if (!Value2VPValue.count(V))
2061 addVPValue(V);
2062 return getVPValue(V);
2063 }
2064
2065 void removeVPValueFor(Value *V) { Value2VPValue.erase(V); }
2066
2067 /// Return the VPLoopInfo analysis for this VPlan.
2068 VPLoopInfo &getVPLoopInfo() { return VPLInfo; }
2069 const VPLoopInfo &getVPLoopInfo() const { return VPLInfo; }
2070
2071 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2072 /// Print this VPlan to \p O.
2073 void print(raw_ostream &O) const;
2074
2075 /// Print this VPlan in DOT format to \p O.
2076 void printDOT(raw_ostream &O) const;
2077
2078 /// Dump the plan to stderr (for debugging).
2079 LLVM_DUMP_METHOD void dump() const;
2080 #endif
2081
2082 /// Returns a range mapping the values the range \p Operands to their
2083 /// corresponding VPValues.
2084 iterator_range<mapped_iterator<Use *, std::function<VPValue *(Value *)>>>
2085 mapToVPValues(User::op_range Operands) {
2086 std::function<VPValue *(Value *)> Fn = [this](Value *Op) {
2087 return getOrAddVPValue(Op);
2088 };
2089 return map_range(Operands, Fn);
2090 }
2091
2092 private:
2093 /// Add to the given dominator tree the header block and every new basic block
2094 /// that was created between it and the latch block, inclusive.
2095 static void updateDominatorTree(DominatorTree *DT, BasicBlock *LoopLatchBB,
2096 BasicBlock *LoopPreHeaderBB,
2097 BasicBlock *LoopExitBB);
2098 };
2099
2100 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2101 /// VPlanPrinter prints a given VPlan to a given output stream. The printing is
2102 /// indented and follows the dot format.
2103 class VPlanPrinter {
2104 raw_ostream &OS;
2105 const VPlan &Plan;
2106 unsigned Depth = 0;
2107 unsigned TabWidth = 2;
2108 std::string Indent;
2109 unsigned BID = 0;
2110 SmallDenseMap<const VPBlockBase *, unsigned> BlockID;
2111
2112 VPSlotTracker SlotTracker;
2113
2114 /// Handle indentation.
2115 void bumpIndent(int b) { Indent = std::string((Depth += b) * TabWidth, ' '); }
2116
2117 /// Print a given \p Block of the Plan.
2118 void dumpBlock(const VPBlockBase *Block);
2119
2120 /// Print the information related to the CFG edges going out of a given
2121 /// \p Block, followed by printing the successor blocks themselves.
2122 void dumpEdges(const VPBlockBase *Block);
2123
2124 /// Print a given \p BasicBlock, including its VPRecipes, followed by printing
2125 /// its successor blocks.
2126 void dumpBasicBlock(const VPBasicBlock *BasicBlock);
2127
2128 /// Print a given \p Region of the Plan.
2129 void dumpRegion(const VPRegionBlock *Region);
2130
2131 unsigned getOrCreateBID(const VPBlockBase *Block) {
2132 return BlockID.count(Block) ? BlockID[Block] : BlockID[Block] = BID++;
2133 }
2134
2135 const Twine getOrCreateName(const VPBlockBase *Block);
2136
2137 const Twine getUID(const VPBlockBase *Block);
2138
2139 /// Print the information related to a CFG edge between two VPBlockBases.
2140 void drawEdge(const VPBlockBase *From, const VPBlockBase *To, bool Hidden,
2141 const Twine &Label);
2142
2143 public:
2144 VPlanPrinter(raw_ostream &O, const VPlan &P)
2145 : OS(O), Plan(P), SlotTracker(&P) {}
2146
2147 LLVM_DUMP_METHOD void dump();
2148 };
2149
2150 struct VPlanIngredient {
2151 const Value *V;
2152
2153 VPlanIngredient(const Value *V) : V(V) {}
2154
2155 void print(raw_ostream &O) const;
2156 };
2157
2158 inline raw_ostream &operator<<(raw_ostream &OS, const VPlanIngredient &I) {
2159 I.print(OS);
2160 return OS;
2161 }
2162
2163 inline raw_ostream &operator<<(raw_ostream &OS, const VPlan &Plan) {
2164 Plan.print(OS);
2165 return OS;
2166 }
2167 #endif
2168
2169 //===----------------------------------------------------------------------===//
2170 // VPlan Utilities
2171 //===----------------------------------------------------------------------===//
2172
2173 /// Class that provides utilities for VPBlockBases in VPlan.
2174 class VPBlockUtils {
2175 public:
2176 VPBlockUtils() = delete;
2177
2178 /// Insert disconnected VPBlockBase \p NewBlock after \p BlockPtr. Add \p
2179 /// NewBlock as successor of \p BlockPtr and \p BlockPtr as predecessor of \p
2180 /// NewBlock, and propagate \p BlockPtr parent to \p NewBlock. If \p BlockPtr
2181 /// has more than one successor, its conditional bit is propagated to \p
2182 /// NewBlock. \p NewBlock must have neither successors nor predecessors.
2183 static void insertBlockAfter(VPBlockBase *NewBlock, VPBlockBase *BlockPtr) {
2184 assert(NewBlock->getSuccessors().empty() &&
2185 "Can't insert new block with successors.");
2186 // TODO: move successors from BlockPtr to NewBlock when this functionality
2187 // is necessary. For now, setBlockSingleSuccessor will assert if BlockPtr
2188 // already has successors.
2189 BlockPtr->setOneSuccessor(NewBlock);
2190 NewBlock->setPredecessors({BlockPtr});
2191 NewBlock->setParent(BlockPtr->getParent());
2192 }
2193
2194 /// Insert disconnected VPBlockBases \p IfTrue and \p IfFalse after \p
2195 /// BlockPtr. Add \p IfTrue and \p IfFalse as succesors of \p BlockPtr and \p
2196 /// BlockPtr as predecessor of \p IfTrue and \p IfFalse. Propagate \p BlockPtr
2197 /// parent to \p IfTrue and \p IfFalse. \p Condition is set as the successor
2198 /// selector. \p BlockPtr must have no successors and \p IfTrue and \p IfFalse
2199 /// must have neither successors nor predecessors.
2200 static void insertTwoBlocksAfter(VPBlockBase *IfTrue, VPBlockBase *IfFalse,
2201 VPValue *Condition, VPBlockBase *BlockPtr) {
2202 assert(IfTrue->getSuccessors().empty() &&
2203 "Can't insert IfTrue with successors.");
2204 assert(IfFalse->getSuccessors().empty() &&
2205 "Can't insert IfFalse with successors.");
2206 BlockPtr->setTwoSuccessors(IfTrue, IfFalse, Condition);
2207 IfTrue->setPredecessors({BlockPtr});
2208 IfFalse->setPredecessors({BlockPtr});
2209 IfTrue->setParent(BlockPtr->getParent());
2210 IfFalse->setParent(BlockPtr->getParent());
2211 }
2212
2213 /// Connect VPBlockBases \p From and \p To bi-directionally. Append \p To to
2214 /// the successors of \p From and \p From to the predecessors of \p To. Both
2215 /// VPBlockBases must have the same parent, which can be null. Both
2216 /// VPBlockBases can be already connected to other VPBlockBases.
2217 static void connectBlocks(VPBlockBase *From, VPBlockBase *To) {
2218 assert((From->getParent() == To->getParent()) &&
2219 "Can't connect two block with different parents");
2220 assert(From->getNumSuccessors() < 2 &&
2221 "Blocks can't have more than two successors.");
2222 From->appendSuccessor(To);
2223 To->appendPredecessor(From);
2224 }
2225
2226 /// Disconnect VPBlockBases \p From and \p To bi-directionally. Remove \p To
2227 /// from the successors of \p From and \p From from the predecessors of \p To.
2228 static void disconnectBlocks(VPBlockBase *From, VPBlockBase *To) {
2229 assert(To && "Successor to disconnect is null.");
2230 From->removeSuccessor(To);
2231 To->removePredecessor(From);
2232 }
2233
2234 /// Returns true if the edge \p FromBlock -> \p ToBlock is a back-edge.
2235 static bool isBackEdge(const VPBlockBase *FromBlock,
2236 const VPBlockBase *ToBlock, const VPLoopInfo *VPLI) {
2237 assert(FromBlock->getParent() == ToBlock->getParent() &&
2238 FromBlock->getParent() && "Must be in same region");
2239 const VPLoop *FromLoop = VPLI->getLoopFor(FromBlock);
2240 const VPLoop *ToLoop = VPLI->getLoopFor(ToBlock);
2241 if (!FromLoop || !ToLoop || FromLoop != ToLoop)
2242 return false;
2243
2244 // A back-edge is a branch from the loop latch to its header.
2245 return ToLoop->isLoopLatch(FromBlock) && ToBlock == ToLoop->getHeader();
2246 }
2247
2248 /// Returns true if \p Block is a loop latch
2249 static bool blockIsLoopLatch(const VPBlockBase *Block,
2250 const VPLoopInfo *VPLInfo) {
2251 if (const VPLoop *ParentVPL = VPLInfo->getLoopFor(Block))
2252 return ParentVPL->isLoopLatch(Block);
2253
2254 return false;
2255 }
2256
2257 /// Count and return the number of succesors of \p PredBlock excluding any
2258 /// backedges.
2259 static unsigned countSuccessorsNoBE(VPBlockBase *PredBlock,
2260 VPLoopInfo *VPLI) {
2261 unsigned Count = 0;
2262 for (VPBlockBase *SuccBlock : PredBlock->getSuccessors()) {
2263 if (!VPBlockUtils::isBackEdge(PredBlock, SuccBlock, VPLI))
2264 Count++;
2265 }
2266 return Count;
2267 }
2268
2269 /// Return an iterator range over \p Range which only includes \p BlockTy
2270 /// blocks. The accesses are casted to \p BlockTy.
2271 template <typename BlockTy, typename T>
2272 static auto blocksOnly(const T &Range) {
2273 // Create BaseTy with correct const-ness based on BlockTy.
2274 using BaseTy =
2275 typename std::conditional<std::is_const<BlockTy>::value,
2276 const VPBlockBase, VPBlockBase>::type;
2277
2278 // We need to first create an iterator range over (const) BlocktTy & instead
2279 // of (const) BlockTy * for filter_range to work properly.
2280 auto Mapped =
2281 map_range(Range, [](BaseTy *Block) -> BaseTy & { return *Block; });
2282 auto Filter = make_filter_range(
2283 Mapped, [](BaseTy &Block) { return isa<BlockTy>(&Block); });
2284 return map_range(Filter, [](BaseTy &Block) -> BlockTy * {
2285 return cast<BlockTy>(&Block);
2286 });
2287 }
2288 };
2289
2290 class VPInterleavedAccessInfo {
2291 DenseMap<VPInstruction *, InterleaveGroup<VPInstruction> *>
2292 InterleaveGroupMap;
2293
2294 /// Type for mapping of instruction based interleave groups to VPInstruction
2295 /// interleave groups
2296 using Old2NewTy = DenseMap<InterleaveGroup<Instruction> *,
2297 InterleaveGroup<VPInstruction> *>;
2298
2299 /// Recursively \p Region and populate VPlan based interleave groups based on
2300 /// \p IAI.
2301 void visitRegion(VPRegionBlock *Region, Old2NewTy &Old2New,
2302 InterleavedAccessInfo &IAI);
2303 /// Recursively traverse \p Block and populate VPlan based interleave groups
2304 /// based on \p IAI.
2305 void visitBlock(VPBlockBase *Block, Old2NewTy &Old2New,
2306 InterleavedAccessInfo &IAI);
2307
2308 public:
2309 VPInterleavedAccessInfo(VPlan &Plan, InterleavedAccessInfo &IAI);
2310
2311 ~VPInterleavedAccessInfo() {
2312 SmallPtrSet<InterleaveGroup<VPInstruction> *, 4> DelSet;
2313 // Avoid releasing a pointer twice.
2314 for (auto &I : InterleaveGroupMap)
2315 DelSet.insert(I.second);
2316 for (auto *Ptr : DelSet)
2317 delete Ptr;
2318 }
2319
2320 /// Get the interleave group that \p Instr belongs to.
2321 ///
2322 /// \returns nullptr if doesn't have such group.
2323 InterleaveGroup<VPInstruction> *
2324 getInterleaveGroup(VPInstruction *Instr) const {
2325 return InterleaveGroupMap.lookup(Instr);
2326 }
2327 };
2328
2329 /// Class that maps (parts of) an existing VPlan to trees of combined
2330 /// VPInstructions.
2331 class VPlanSlp {
2332 enum class OpMode { Failed, Load, Opcode };
2333
2334 /// A DenseMapInfo implementation for using SmallVector<VPValue *, 4> as
2335 /// DenseMap keys.
2336 struct BundleDenseMapInfo {
2337 static SmallVector<VPValue *, 4> getEmptyKey() {
2338 return {reinterpret_cast<VPValue *>(-1)};
2339 }
2340
2341 static SmallVector<VPValue *, 4> getTombstoneKey() {
2342 return {reinterpret_cast<VPValue *>(-2)};
2343 }
2344
2345 static unsigned getHashValue(const SmallVector<VPValue *, 4> &V) {
2346 return static_cast<unsigned>(hash_combine_range(V.begin(), V.end()));
2347 }
2348
2349 static bool isEqual(const SmallVector<VPValue *, 4> &LHS,
2350 const SmallVector<VPValue *, 4> &RHS) {
2351 return LHS == RHS;
2352 }
2353 };
2354
2355 /// Mapping of values in the original VPlan to a combined VPInstruction.
2356 DenseMap<SmallVector<VPValue *, 4>, VPInstruction *, BundleDenseMapInfo>
2357 BundleToCombined;
2358
2359 VPInterleavedAccessInfo &IAI;
2360
2361 /// Basic block to operate on. For now, only instructions in a single BB are
2362 /// considered.
2363 const VPBasicBlock &BB;
2364
2365 /// Indicates whether we managed to combine all visited instructions or not.
2366 bool CompletelySLP = true;
2367
2368 /// Width of the widest combined bundle in bits.
2369 unsigned WidestBundleBits = 0;
2370
2371 using MultiNodeOpTy =
2372 typename std::pair<VPInstruction *, SmallVector<VPValue *, 4>>;
2373
2374 // Input operand bundles for the current multi node. Each multi node operand
2375 // bundle contains values not matching the multi node's opcode. They will
2376 // be reordered in reorderMultiNodeOps, once we completed building a
2377 // multi node.
2378 SmallVector<MultiNodeOpTy, 4> MultiNodeOps;
2379
2380 /// Indicates whether we are building a multi node currently.
2381 bool MultiNodeActive = false;
2382
2383 /// Check if we can vectorize Operands together.
2384 bool areVectorizable(ArrayRef<VPValue *> Operands) const;
2385
2386 /// Add combined instruction \p New for the bundle \p Operands.
2387 void addCombined(ArrayRef<VPValue *> Operands, VPInstruction *New);
2388
2389 /// Indicate we hit a bundle we failed to combine. Returns nullptr for now.
2390 VPInstruction *markFailed();
2391
2392 /// Reorder operands in the multi node to maximize sequential memory access
2393 /// and commutative operations.
2394 SmallVector<MultiNodeOpTy, 4> reorderMultiNodeOps();
2395
2396 /// Choose the best candidate to use for the lane after \p Last. The set of
2397 /// candidates to choose from are values with an opcode matching \p Last's
2398 /// or loads consecutive to \p Last.
2399 std::pair<OpMode, VPValue *> getBest(OpMode Mode, VPValue *Last,
2400 SmallPtrSetImpl<VPValue *> &Candidates,
2401 VPInterleavedAccessInfo &IAI);
2402
2403 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2404 /// Print bundle \p Values to dbgs().
2405 void dumpBundle(ArrayRef<VPValue *> Values);
2406 #endif
2407
2408 public:
2409 VPlanSlp(VPInterleavedAccessInfo &IAI, VPBasicBlock &BB) : IAI(IAI), BB(BB) {}
2410
2411 ~VPlanSlp() = default;
2412
2413 /// Tries to build an SLP tree rooted at \p Operands and returns a
2414 /// VPInstruction combining \p Operands, if they can be combined.
2415 VPInstruction *buildGraph(ArrayRef<VPValue *> Operands);
2416
2417 /// Return the width of the widest combined bundle in bits.
2418 unsigned getWidestBundleBits() const { return WidestBundleBits; }
2419
2420 /// Return true if all visited instruction can be combined.
2421 bool isCompletelySLP() const { return CompletelySLP; }
2422 };
2423 } // end namespace llvm
2424
2425 #endif // LLVM_TRANSFORMS_VECTORIZE_VPLAN_H
2426