1 //===- llvm/Transforms/Vectorize/LoopVectorizationLegality.h ----*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file
10 /// This file defines the LoopVectorizationLegality class. Original code
11 /// in Loop Vectorizer has been moved out to its own file for modularity
12 /// and reusability.
13 ///
14 /// Currently, it works for innermost loop vectorization. Extending this to
15 /// outer loop vectorization is a TODO item.
16 ///
17 /// Also provides:
18 /// 1) LoopVectorizeHints class which keeps a number of loop annotations
19 /// locally for easy look up. It has the ability to write them back as
20 /// loop metadata, upon request.
21 /// 2) LoopVectorizationRequirements class for lazy bail out for the purpose
22 /// of reporting useful failure to vectorize message.
23 //
24 //===----------------------------------------------------------------------===//
25 
26 #ifndef LLVM_TRANSFORMS_VECTORIZE_LOOPVECTORIZATIONLEGALITY_H
27 #define LLVM_TRANSFORMS_VECTORIZE_LOOPVECTORIZATIONLEGALITY_H
28 
29 #include "llvm/ADT/MapVector.h"
30 #include "llvm/Analysis/LoopAccessAnalysis.h"
31 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
32 #include "llvm/Support/TypeSize.h"
33 #include "llvm/Transforms/Utils/LoopUtils.h"
34 
35 namespace llvm {
36 
37 /// Utility class for getting and setting loop vectorizer hints in the form
38 /// of loop metadata.
39 /// This class keeps a number of loop annotations locally (as member variables)
40 /// and can, upon request, write them back as metadata on the loop. It will
41 /// initially scan the loop for existing metadata, and will update the local
42 /// values based on information in the loop.
43 /// We cannot write all values to metadata, as the mere presence of some info,
44 /// for example 'force', means a decision has been made. So, we need to be
45 /// careful NOT to add them if the user hasn't specifically asked so.
46 class LoopVectorizeHints {
47   enum HintKind {
48     HK_WIDTH,
49     HK_INTERLEAVE,
50     HK_FORCE,
51     HK_ISVECTORIZED,
52     HK_PREDICATE,
53     HK_SCALABLE
54   };
55 
56   /// Hint - associates name and validation with the hint value.
57   struct Hint {
58     const char *Name;
59     unsigned Value; // This may have to change for non-numeric values.
60     HintKind Kind;
61 
HintHint62     Hint(const char *Name, unsigned Value, HintKind Kind)
63         : Name(Name), Value(Value), Kind(Kind) {}
64 
65     bool validate(unsigned Val);
66   };
67 
68   /// Vectorization width.
69   Hint Width;
70 
71   /// Vectorization interleave factor.
72   Hint Interleave;
73 
74   /// Vectorization forced
75   Hint Force;
76 
77   /// Already Vectorized
78   Hint IsVectorized;
79 
80   /// Vector Predicate
81   Hint Predicate;
82 
83   /// Says whether we should use fixed width or scalable vectorization.
84   Hint Scalable;
85 
86   /// Return the loop metadata prefix.
Prefix()87   static StringRef Prefix() { return "llvm.loop."; }
88 
89   /// True if there is any unsafe math in the loop.
90   bool PotentiallyUnsafe = false;
91 
92 public:
93   enum ForceKind {
94     FK_Undefined = -1, ///< Not selected.
95     FK_Disabled = 0,   ///< Forcing disabled.
96     FK_Enabled = 1,    ///< Forcing enabled.
97   };
98 
99   enum ScalableForceKind {
100     /// Not selected.
101     SK_Unspecified = -1,
102     /// Disables vectorization with scalable vectors.
103     SK_FixedWidthOnly = 0,
104     /// Vectorize loops using scalable vectors or fixed-width vectors, but favor
105     /// scalable vectors when the cost-model is inconclusive. This is the
106     /// default when the scalable.enable hint is enabled through a pragma.
107     SK_PreferScalable = 1,
108     /// Vectorize loops using scalable vectors or fixed-width  vectors, but
109     /// favor fixed-width vectors when the cost is inconclusive.
110     SK_PreferFixedWidth = 2,
111   };
112 
113   LoopVectorizeHints(const Loop *L, bool InterleaveOnlyWhenForced,
114                      OptimizationRemarkEmitter &ORE);
115 
116   /// Mark the loop L as already vectorized by setting the width to 1.
117   void setAlreadyVectorized();
118 
119   bool allowVectorization(Function *F, Loop *L,
120                           bool VectorizeOnlyWhenForced) const;
121 
122   /// Dumps all the hint information.
123   void emitRemarkWithHints() const;
124 
getWidth()125   ElementCount getWidth() const {
126     return ElementCount::get(Width.Value,
127                              isScalableVectorizationExplicitlyEnabled());
128   }
getInterleave()129   unsigned getInterleave() const {
130     if (Interleave.Value)
131       return Interleave.Value;
132     // If interleaving is not explicitly set, assume that if we do not want
133     // unrolling, we also don't want any interleaving.
134     if (llvm::hasUnrollTransformation(TheLoop) & TM_Disable)
135       return 1;
136     return 0;
137   }
getIsVectorized()138   unsigned getIsVectorized() const { return IsVectorized.Value; }
getPredicate()139   unsigned getPredicate() const { return Predicate.Value; }
getForce()140   enum ForceKind getForce() const {
141     if ((ForceKind)Force.Value == FK_Undefined &&
142         hasDisableAllTransformsHint(TheLoop))
143       return FK_Disabled;
144     return (ForceKind)Force.Value;
145   }
146 
147   /// \return true if the cost-model for scalable vectorization should
148   /// favor vectorization with scalable vectors over fixed-width vectors when
149   /// the cost-model is inconclusive.
isScalableVectorizationPreferred()150   bool isScalableVectorizationPreferred() const {
151     return Scalable.Value == SK_PreferScalable;
152   }
153 
154   /// \return true if scalable vectorization has been explicitly enabled.
isScalableVectorizationExplicitlyEnabled()155   bool isScalableVectorizationExplicitlyEnabled() const {
156     return Scalable.Value == SK_PreferFixedWidth ||
157            Scalable.Value == SK_PreferScalable;
158   }
159 
160   /// \return true if scalable vectorization has been explicitly disabled.
isScalableVectorizationDisabled()161   bool isScalableVectorizationDisabled() const {
162     return Scalable.Value == SK_FixedWidthOnly;
163   }
164 
165   /// If hints are provided that force vectorization, use the AlwaysPrint
166   /// pass name to force the frontend to print the diagnostic.
167   const char *vectorizeAnalysisPassName() const;
168 
169   /// When enabling loop hints are provided we allow the vectorizer to change
170   /// the order of operations that is given by the scalar loop. This is not
171   /// enabled by default because can be unsafe or inefficient. For example,
172   /// reordering floating-point operations will change the way round-off
173   /// error accumulates in the loop.
174   bool allowReordering() const;
175 
isPotentiallyUnsafe()176   bool isPotentiallyUnsafe() const {
177     // Avoid FP vectorization if the target is unsure about proper support.
178     // This may be related to the SIMD unit in the target not handling
179     // IEEE 754 FP ops properly, or bad single-to-double promotions.
180     // Otherwise, a sequence of vectorized loops, even without reduction,
181     // could lead to different end results on the destination vectors.
182     return getForce() != LoopVectorizeHints::FK_Enabled && PotentiallyUnsafe;
183   }
184 
setPotentiallyUnsafe()185   void setPotentiallyUnsafe() { PotentiallyUnsafe = true; }
186 
187 private:
188   /// Find hints specified in the loop metadata and update local values.
189   void getHintsFromMetadata();
190 
191   /// Checks string hint with one operand and set value if valid.
192   void setHint(StringRef Name, Metadata *Arg);
193 
194   /// The loop these hints belong to.
195   const Loop *TheLoop;
196 
197   /// Interface to emit optimization remarks.
198   OptimizationRemarkEmitter &ORE;
199 };
200 
201 /// This holds vectorization requirements that must be verified late in
202 /// the process. The requirements are set by legalize and costmodel. Once
203 /// vectorization has been determined to be possible and profitable the
204 /// requirements can be verified by looking for metadata or compiler options.
205 /// For example, some loops require FP commutativity which is only allowed if
206 /// vectorization is explicitly specified or if the fast-math compiler option
207 /// has been provided.
208 /// Late evaluation of these requirements allows helpful diagnostics to be
209 /// composed that tells the user what need to be done to vectorize the loop. For
210 /// example, by specifying #pragma clang loop vectorize or -ffast-math. Late
211 /// evaluation should be used only when diagnostics can generated that can be
212 /// followed by a non-expert user.
213 class LoopVectorizationRequirements {
214 public:
215   /// Track the 1st floating-point instruction that can not be reassociated.
addExactFPMathInst(Instruction * I)216   void addExactFPMathInst(Instruction *I) {
217     if (I && !ExactFPMathInst)
218       ExactFPMathInst = I;
219   }
220 
addRuntimePointerChecks(unsigned Num)221   void addRuntimePointerChecks(unsigned Num) { NumRuntimePointerChecks = Num; }
222 
223 
getExactFPInst()224   Instruction *getExactFPInst() { return ExactFPMathInst; }
225 
getNumRuntimePointerChecks()226   unsigned getNumRuntimePointerChecks() const {
227     return NumRuntimePointerChecks;
228   }
229 
230 private:
231   unsigned NumRuntimePointerChecks = 0;
232   Instruction *ExactFPMathInst = nullptr;
233 };
234 
235 /// LoopVectorizationLegality checks if it is legal to vectorize a loop, and
236 /// to what vectorization factor.
237 /// This class does not look at the profitability of vectorization, only the
238 /// legality. This class has two main kinds of checks:
239 /// * Memory checks - The code in canVectorizeMemory checks if vectorization
240 ///   will change the order of memory accesses in a way that will change the
241 ///   correctness of the program.
242 /// * Scalars checks - The code in canVectorizeInstrs and canVectorizeMemory
243 /// checks for a number of different conditions, such as the availability of a
244 /// single induction variable, that all types are supported and vectorize-able,
245 /// etc. This code reflects the capabilities of InnerLoopVectorizer.
246 /// This class is also used by InnerLoopVectorizer for identifying
247 /// induction variable and the different reduction variables.
248 class LoopVectorizationLegality {
249 public:
LoopVectorizationLegality(Loop * L,PredicatedScalarEvolution & PSE,DominatorTree * DT,TargetTransformInfo * TTI,TargetLibraryInfo * TLI,AAResults * AA,Function * F,std::function<const LoopAccessInfo & (Loop &)> * GetLAA,LoopInfo * LI,OptimizationRemarkEmitter * ORE,LoopVectorizationRequirements * R,LoopVectorizeHints * H,DemandedBits * DB,AssumptionCache * AC,BlockFrequencyInfo * BFI,ProfileSummaryInfo * PSI)250   LoopVectorizationLegality(
251       Loop *L, PredicatedScalarEvolution &PSE, DominatorTree *DT,
252       TargetTransformInfo *TTI, TargetLibraryInfo *TLI, AAResults *AA,
253       Function *F, std::function<const LoopAccessInfo &(Loop &)> *GetLAA,
254       LoopInfo *LI, OptimizationRemarkEmitter *ORE,
255       LoopVectorizationRequirements *R, LoopVectorizeHints *H, DemandedBits *DB,
256       AssumptionCache *AC, BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI)
257       : TheLoop(L), LI(LI), PSE(PSE), TTI(TTI), TLI(TLI), DT(DT),
258         GetLAA(GetLAA), ORE(ORE), Requirements(R), Hints(H), DB(DB), AC(AC),
259         BFI(BFI), PSI(PSI) {}
260 
261   /// ReductionList contains the reduction descriptors for all
262   /// of the reductions that were found in the loop.
263   using ReductionList = MapVector<PHINode *, RecurrenceDescriptor>;
264 
265   /// InductionList saves induction variables and maps them to the
266   /// induction descriptor.
267   using InductionList = MapVector<PHINode *, InductionDescriptor>;
268 
269   /// RecurrenceSet contains the phi nodes that are recurrences other than
270   /// inductions and reductions.
271   using RecurrenceSet = SmallPtrSet<const PHINode *, 8>;
272 
273   /// Returns true if it is legal to vectorize this loop.
274   /// This does not mean that it is profitable to vectorize this
275   /// loop, only that it is legal to do so.
276   /// Temporarily taking UseVPlanNativePath parameter. If true, take
277   /// the new code path being implemented for outer loop vectorization
278   /// (should be functional for inner loop vectorization) based on VPlan.
279   /// If false, good old LV code.
280   bool canVectorize(bool UseVPlanNativePath);
281 
282   /// Returns true if it is legal to vectorize the FP math operations in this
283   /// loop. Vectorizing is legal if we allow reordering of FP operations, or if
284   /// we can use in-order reductions.
285   bool canVectorizeFPMath(bool EnableStrictReductions);
286 
287   /// Return true if we can vectorize this loop while folding its tail by
288   /// masking, and mark all respective loads/stores for masking.
289   /// This object's state is only modified iff this function returns true.
290   bool prepareToFoldTailByMasking();
291 
292   /// Returns the primary induction variable.
getPrimaryInduction()293   PHINode *getPrimaryInduction() { return PrimaryInduction; }
294 
295   /// Returns the reduction variables found in the loop.
getReductionVars()296   ReductionList &getReductionVars() { return Reductions; }
297 
298   /// Returns the induction variables found in the loop.
getInductionVars()299   InductionList &getInductionVars() { return Inductions; }
300 
301   /// Return the first-order recurrences found in the loop.
getFirstOrderRecurrences()302   RecurrenceSet &getFirstOrderRecurrences() { return FirstOrderRecurrences; }
303 
304   /// Return the set of instructions to sink to handle first-order recurrences.
getSinkAfter()305   MapVector<Instruction *, Instruction *> &getSinkAfter() { return SinkAfter; }
306 
307   /// Returns the widest induction type.
getWidestInductionType()308   Type *getWidestInductionType() { return WidestIndTy; }
309 
310   /// Returns True if V is a Phi node of an induction variable in this loop.
311   bool isInductionPhi(const Value *V);
312 
313   /// Returns True if V is a cast that is part of an induction def-use chain,
314   /// and had been proven to be redundant under a runtime guard (in other
315   /// words, the cast has the same SCEV expression as the induction phi).
316   bool isCastedInductionVariable(const Value *V);
317 
318   /// Returns True if V can be considered as an induction variable in this
319   /// loop. V can be the induction phi, or some redundant cast in the def-use
320   /// chain of the inducion phi.
321   bool isInductionVariable(const Value *V);
322 
323   /// Returns True if PN is a reduction variable in this loop.
isReductionVariable(PHINode * PN)324   bool isReductionVariable(PHINode *PN) { return Reductions.count(PN); }
325 
326   /// Returns True if Phi is a first-order recurrence in this loop.
327   bool isFirstOrderRecurrence(const PHINode *Phi);
328 
329   /// Return true if the block BB needs to be predicated in order for the loop
330   /// to be vectorized.
331   bool blockNeedsPredication(BasicBlock *BB) const;
332 
333   /// Check if this pointer is consecutive when vectorizing. This happens
334   /// when the last index of the GEP is the induction variable, or that the
335   /// pointer itself is an induction variable.
336   /// This check allows us to vectorize A[idx] into a wide load/store.
337   /// Returns:
338   /// 0 - Stride is unknown or non-consecutive.
339   /// 1 - Address is consecutive.
340   /// -1 - Address is consecutive, and decreasing.
341   /// NOTE: This method must only be used before modifying the original scalar
342   /// loop. Do not use after invoking 'createVectorizedLoopSkeleton' (PR34965).
343   int isConsecutivePtr(Value *Ptr) const;
344 
345   /// Returns true if the value V is uniform within the loop.
346   bool isUniform(Value *V);
347 
348   /// A uniform memory op is a load or store which accesses the same memory
349   /// location on all lanes.
isUniformMemOp(Instruction & I)350   bool isUniformMemOp(Instruction &I) {
351     Value *Ptr = getLoadStorePointerOperand(&I);
352     if (!Ptr)
353       return false;
354     // Note: There's nothing inherent which prevents predicated loads and
355     // stores from being uniform.  The current lowering simply doesn't handle
356     // it; in particular, the cost model distinguishes scatter/gather from
357     // scalar w/predication, and we currently rely on the scalar path.
358     return isUniform(Ptr) && !blockNeedsPredication(I.getParent());
359   }
360 
361   /// Returns the information that we collected about runtime memory check.
getRuntimePointerChecking()362   const RuntimePointerChecking *getRuntimePointerChecking() const {
363     return LAI->getRuntimePointerChecking();
364   }
365 
getLAI()366   const LoopAccessInfo *getLAI() const { return LAI; }
367 
isSafeForAnyVectorWidth()368   bool isSafeForAnyVectorWidth() const {
369     return LAI->getDepChecker().isSafeForAnyVectorWidth();
370   }
371 
getMaxSafeDepDistBytes()372   unsigned getMaxSafeDepDistBytes() { return LAI->getMaxSafeDepDistBytes(); }
373 
getMaxSafeVectorWidthInBits()374   uint64_t getMaxSafeVectorWidthInBits() const {
375     return LAI->getDepChecker().getMaxSafeVectorWidthInBits();
376   }
377 
hasStride(Value * V)378   bool hasStride(Value *V) { return LAI->hasStride(V); }
379 
380   /// Returns true if vector representation of the instruction \p I
381   /// requires mask.
isMaskRequired(const Instruction * I)382   bool isMaskRequired(const Instruction *I) const {
383     return MaskedOp.contains(I);
384   }
385 
getNumStores()386   unsigned getNumStores() const { return LAI->getNumStores(); }
getNumLoads()387   unsigned getNumLoads() const { return LAI->getNumLoads(); }
388 
389   /// Returns all assume calls in predicated blocks. They need to be dropped
390   /// when flattening the CFG.
getConditionalAssumes()391   const SmallPtrSetImpl<Instruction *> &getConditionalAssumes() const {
392     return ConditionalAssumes;
393   }
394 
395 private:
396   /// Return true if the pre-header, exiting and latch blocks of \p Lp and all
397   /// its nested loops are considered legal for vectorization. These legal
398   /// checks are common for inner and outer loop vectorization.
399   /// Temporarily taking UseVPlanNativePath parameter. If true, take
400   /// the new code path being implemented for outer loop vectorization
401   /// (should be functional for inner loop vectorization) based on VPlan.
402   /// If false, good old LV code.
403   bool canVectorizeLoopNestCFG(Loop *Lp, bool UseVPlanNativePath);
404 
405   /// Set up outer loop inductions by checking Phis in outer loop header for
406   /// supported inductions (int inductions). Return false if any of these Phis
407   /// is not a supported induction or if we fail to find an induction.
408   bool setupOuterLoopInductions();
409 
410   /// Return true if the pre-header, exiting and latch blocks of \p Lp
411   /// (non-recursive) are considered legal for vectorization.
412   /// Temporarily taking UseVPlanNativePath parameter. If true, take
413   /// the new code path being implemented for outer loop vectorization
414   /// (should be functional for inner loop vectorization) based on VPlan.
415   /// If false, good old LV code.
416   bool canVectorizeLoopCFG(Loop *Lp, bool UseVPlanNativePath);
417 
418   /// Check if a single basic block loop is vectorizable.
419   /// At this point we know that this is a loop with a constant trip count
420   /// and we only need to check individual instructions.
421   bool canVectorizeInstrs();
422 
423   /// When we vectorize loops we may change the order in which
424   /// we read and write from memory. This method checks if it is
425   /// legal to vectorize the code, considering only memory constrains.
426   /// Returns true if the loop is vectorizable
427   bool canVectorizeMemory();
428 
429   /// Return true if we can vectorize this loop using the IF-conversion
430   /// transformation.
431   bool canVectorizeWithIfConvert();
432 
433   /// Return true if we can vectorize this outer loop. The method performs
434   /// specific checks for outer loop vectorization.
435   bool canVectorizeOuterLoop();
436 
437   /// Return true if all of the instructions in the block can be speculatively
438   /// executed, and record the loads/stores that require masking.
439   /// \p SafePtrs is a list of addresses that are known to be legal and we know
440   /// that we can read from them without segfault.
441   /// \p MaskedOp is a list of instructions that have to be transformed into
442   /// calls to the appropriate masked intrinsic when the loop is vectorized.
443   /// \p ConditionalAssumes is a list of assume instructions in predicated
444   /// blocks that must be dropped if the CFG gets flattened.
445   bool blockCanBePredicated(
446       BasicBlock *BB, SmallPtrSetImpl<Value *> &SafePtrs,
447       SmallPtrSetImpl<const Instruction *> &MaskedOp,
448       SmallPtrSetImpl<Instruction *> &ConditionalAssumes) const;
449 
450   /// Updates the vectorization state by adding \p Phi to the inductions list.
451   /// This can set \p Phi as the main induction of the loop if \p Phi is a
452   /// better choice for the main induction than the existing one.
453   void addInductionPhi(PHINode *Phi, const InductionDescriptor &ID,
454                        SmallPtrSetImpl<Value *> &AllowedExit);
455 
456   /// If an access has a symbolic strides, this maps the pointer value to
457   /// the stride symbol.
getSymbolicStrides()458   const ValueToValueMap *getSymbolicStrides() const {
459     // FIXME: Currently, the set of symbolic strides is sometimes queried before
460     // it's collected.  This happens from canVectorizeWithIfConvert, when the
461     // pointer is checked to reference consecutive elements suitable for a
462     // masked access.
463     return LAI ? &LAI->getSymbolicStrides() : nullptr;
464   }
465 
466   /// The loop that we evaluate.
467   Loop *TheLoop;
468 
469   /// Loop Info analysis.
470   LoopInfo *LI;
471 
472   /// A wrapper around ScalarEvolution used to add runtime SCEV checks.
473   /// Applies dynamic knowledge to simplify SCEV expressions in the context
474   /// of existing SCEV assumptions. The analysis will also add a minimal set
475   /// of new predicates if this is required to enable vectorization and
476   /// unrolling.
477   PredicatedScalarEvolution &PSE;
478 
479   /// Target Transform Info.
480   TargetTransformInfo *TTI;
481 
482   /// Target Library Info.
483   TargetLibraryInfo *TLI;
484 
485   /// Dominator Tree.
486   DominatorTree *DT;
487 
488   // LoopAccess analysis.
489   std::function<const LoopAccessInfo &(Loop &)> *GetLAA;
490 
491   // And the loop-accesses info corresponding to this loop.  This pointer is
492   // null until canVectorizeMemory sets it up.
493   const LoopAccessInfo *LAI = nullptr;
494 
495   /// Interface to emit optimization remarks.
496   OptimizationRemarkEmitter *ORE;
497 
498   //  ---  vectorization state --- //
499 
500   /// Holds the primary induction variable. This is the counter of the
501   /// loop.
502   PHINode *PrimaryInduction = nullptr;
503 
504   /// Holds the reduction variables.
505   ReductionList Reductions;
506 
507   /// Holds all of the induction variables that we found in the loop.
508   /// Notice that inductions don't need to start at zero and that induction
509   /// variables can be pointers.
510   InductionList Inductions;
511 
512   /// Holds all the casts that participate in the update chain of the induction
513   /// variables, and that have been proven to be redundant (possibly under a
514   /// runtime guard). These casts can be ignored when creating the vectorized
515   /// loop body.
516   SmallPtrSet<Instruction *, 4> InductionCastsToIgnore;
517 
518   /// Holds the phi nodes that are first-order recurrences.
519   RecurrenceSet FirstOrderRecurrences;
520 
521   /// Holds instructions that need to sink past other instructions to handle
522   /// first-order recurrences.
523   MapVector<Instruction *, Instruction *> SinkAfter;
524 
525   /// Holds the widest induction type encountered.
526   Type *WidestIndTy = nullptr;
527 
528   /// Allowed outside users. This holds the variables that can be accessed from
529   /// outside the loop.
530   SmallPtrSet<Value *, 4> AllowedExit;
531 
532   /// Vectorization requirements that will go through late-evaluation.
533   LoopVectorizationRequirements *Requirements;
534 
535   /// Used to emit an analysis of any legality issues.
536   LoopVectorizeHints *Hints;
537 
538   /// The demanded bits analysis is used to compute the minimum type size in
539   /// which a reduction can be computed.
540   DemandedBits *DB;
541 
542   /// The assumption cache analysis is used to compute the minimum type size in
543   /// which a reduction can be computed.
544   AssumptionCache *AC;
545 
546   /// While vectorizing these instructions we have to generate a
547   /// call to the appropriate masked intrinsic
548   SmallPtrSet<const Instruction *, 8> MaskedOp;
549 
550   /// Assume instructions in predicated blocks must be dropped if the CFG gets
551   /// flattened.
552   SmallPtrSet<Instruction *, 8> ConditionalAssumes;
553 
554   /// BFI and PSI are used to check for profile guided size optimizations.
555   BlockFrequencyInfo *BFI;
556   ProfileSummaryInfo *PSI;
557 };
558 
559 } // namespace llvm
560 
561 #endif // LLVM_TRANSFORMS_VECTORIZE_LOOPVECTORIZATIONLEGALITY_H
562