1 //===- Attributor.h --- Module-wide attribute deduction ---------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // Attributor: An inter procedural (abstract) "attribute" deduction framework.
10 //
11 // The Attributor framework is an inter procedural abstract analysis (fixpoint
12 // iteration analysis). The goal is to allow easy deduction of new attributes as
13 // well as information exchange between abstract attributes in-flight.
14 //
15 // The Attributor class is the driver and the link between the various abstract
16 // attributes. The Attributor will iterate until a fixpoint state is reached by
17 // all abstract attributes in-flight, or until it will enforce a pessimistic fix
18 // point because an iteration limit is reached.
19 //
20 // Abstract attributes, derived from the AbstractAttribute class, actually
21 // describe properties of the code. They can correspond to actual LLVM-IR
22 // attributes, or they can be more general, ultimately unrelated to LLVM-IR
23 // attributes. The latter is useful when an abstract attributes provides
24 // information to other abstract attributes in-flight but we might not want to
25 // manifest the information. The Attributor allows to query in-flight abstract
26 // attributes through the `Attributor::getAAFor` method (see the method
27 // description for an example). If the method is used by an abstract attribute
28 // P, and it results in an abstract attribute Q, the Attributor will
29 // automatically capture a potential dependence from Q to P. This dependence
30 // will cause P to be reevaluated whenever Q changes in the future.
31 //
32 // The Attributor will only reevaluate abstract attributes that might have
33 // changed since the last iteration. That means that the Attribute will not
34 // revisit all instructions/blocks/functions in the module but only query
35 // an update from a subset of the abstract attributes.
36 //
37 // The update method `AbstractAttribute::updateImpl` is implemented by the
38 // specific "abstract attribute" subclasses. The method is invoked whenever the
39 // currently assumed state (see the AbstractState class) might not be valid
40 // anymore. This can, for example, happen if the state was dependent on another
41 // abstract attribute that changed. In every invocation, the update method has
42 // to adjust the internal state of an abstract attribute to a point that is
43 // justifiable by the underlying IR and the current state of abstract attributes
44 // in-flight. Since the IR is given and assumed to be valid, the information
45 // derived from it can be assumed to hold. However, information derived from
46 // other abstract attributes is conditional on various things. If the justifying
47 // state changed, the `updateImpl` has to revisit the situation and potentially
48 // find another justification or limit the optimistic assumes made.
49 //
50 // Change is the key in this framework. Until a state of no-change, thus a
51 // fixpoint, is reached, the Attributor will query the abstract attributes
52 // in-flight to re-evaluate their state. If the (current) state is too
53 // optimistic, hence it cannot be justified anymore through other abstract
54 // attributes or the state of the IR, the state of the abstract attribute will
55 // have to change. Generally, we assume abstract attribute state to be a finite
56 // height lattice and the update function to be monotone. However, these
57 // conditions are not enforced because the iteration limit will guarantee
58 // termination. If an optimistic fixpoint is reached, or a pessimistic fix
59 // point is enforced after a timeout, the abstract attributes are tasked to
60 // manifest their result in the IR for passes to come.
61 //
62 // Attribute manifestation is not mandatory. If desired, there is support to
63 // generate a single or multiple LLVM-IR attributes already in the helper struct
64 // IRAttribute. In the simplest case, a subclass inherits from IRAttribute with
65 // a proper Attribute::AttrKind as template parameter. The Attributor
66 // manifestation framework will then create and place a new attribute if it is
67 // allowed to do so (based on the abstract state). Other use cases can be
68 // achieved by overloading AbstractAttribute or IRAttribute methods.
69 //
70 //
71 // The "mechanics" of adding a new "abstract attribute":
72 // - Define a class (transitively) inheriting from AbstractAttribute and one
73 //   (which could be the same) that (transitively) inherits from AbstractState.
74 //   For the latter, consider the already available BooleanState and
75 //   {Inc,Dec,Bit}IntegerState if they fit your needs, e.g., you require only a
76 //   number tracking or bit-encoding.
77 // - Implement all pure methods. Also use overloading if the attribute is not
78 //   conforming with the "default" behavior: A (set of) LLVM-IR attribute(s) for
79 //   an argument, call site argument, function return value, or function. See
80 //   the class and method descriptions for more information on the two
81 //   "Abstract" classes and their respective methods.
82 // - Register opportunities for the new abstract attribute in the
83 //   `Attributor::identifyDefaultAbstractAttributes` method if it should be
84 //   counted as a 'default' attribute.
85 // - Add sufficient tests.
86 // - Add a Statistics object for bookkeeping. If it is a simple (set of)
87 //   attribute(s) manifested through the Attributor manifestation framework, see
88 //   the bookkeeping function in Attributor.cpp.
89 // - If instructions with a certain opcode are interesting to the attribute, add
90 //   that opcode to the switch in `Attributor::identifyAbstractAttributes`. This
91 //   will make it possible to query all those instructions through the
92 //   `InformationCache::getOpcodeInstMapForFunction` interface and eliminate the
93 //   need to traverse the IR repeatedly.
94 //
95 //===----------------------------------------------------------------------===//
96 
97 #ifndef LLVM_TRANSFORMS_IPO_ATTRIBUTOR_H
98 #define LLVM_TRANSFORMS_IPO_ATTRIBUTOR_H
99 
100 #include "llvm/ADT/DenseSet.h"
101 #include "llvm/ADT/GraphTraits.h"
102 #include "llvm/ADT/MapVector.h"
103 #include "llvm/ADT/STLExtras.h"
104 #include "llvm/ADT/SetOperations.h"
105 #include "llvm/ADT/SetVector.h"
106 #include "llvm/ADT/iterator.h"
107 #include "llvm/Analysis/AssumeBundleQueries.h"
108 #include "llvm/Analysis/CFG.h"
109 #include "llvm/Analysis/CGSCCPassManager.h"
110 #include "llvm/Analysis/LazyCallGraph.h"
111 #include "llvm/Analysis/LoopInfo.h"
112 #include "llvm/Analysis/MemoryLocation.h"
113 #include "llvm/Analysis/MustExecute.h"
114 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
115 #include "llvm/Analysis/PostDominators.h"
116 #include "llvm/Analysis/TargetLibraryInfo.h"
117 #include "llvm/IR/AbstractCallSite.h"
118 #include "llvm/IR/Attributes.h"
119 #include "llvm/IR/ConstantRange.h"
120 #include "llvm/IR/Constants.h"
121 #include "llvm/IR/InstIterator.h"
122 #include "llvm/IR/Instruction.h"
123 #include "llvm/IR/Instructions.h"
124 #include "llvm/IR/PassManager.h"
125 #include "llvm/IR/Value.h"
126 #include "llvm/Support/Alignment.h"
127 #include "llvm/Support/Allocator.h"
128 #include "llvm/Support/Casting.h"
129 #include "llvm/Support/DOTGraphTraits.h"
130 #include "llvm/Support/ErrorHandling.h"
131 #include "llvm/Support/ModRef.h"
132 #include "llvm/Support/TimeProfiler.h"
133 #include "llvm/TargetParser/Triple.h"
134 #include "llvm/Transforms/Utils/CallGraphUpdater.h"
135 
136 #include <limits>
137 #include <map>
138 #include <optional>
139 
140 namespace llvm {
141 
142 class DataLayout;
143 class LLVMContext;
144 class Pass;
145 template <typename Fn> class function_ref;
146 struct AADepGraphNode;
147 struct AADepGraph;
148 struct Attributor;
149 struct AbstractAttribute;
150 struct InformationCache;
151 struct AAIsDead;
152 struct AttributorCallGraph;
153 struct IRPosition;
154 
155 class Function;
156 
157 /// Abstract Attribute helper functions.
158 namespace AA {
159 using InstExclusionSetTy = SmallPtrSet<Instruction *, 4>;
160 
161 enum class GPUAddressSpace : unsigned {
162   Generic = 0,
163   Global = 1,
164   Shared = 3,
165   Constant = 4,
166   Local = 5,
167 };
168 
169 /// Return true iff \p M target a GPU (and we can use GPU AS reasoning).
170 bool isGPU(const Module &M);
171 
172 /// Flags to distinguish intra-procedural queries from *potentially*
173 /// inter-procedural queries. Not that information can be valid for both and
174 /// therefore both bits might be set.
175 enum ValueScope : uint8_t {
176   Intraprocedural = 1,
177   Interprocedural = 2,
178   AnyScope = Intraprocedural | Interprocedural,
179 };
180 
181 struct ValueAndContext : public std::pair<Value *, const Instruction *> {
182   using Base = std::pair<Value *, const Instruction *>;
183   ValueAndContext(const Base &B) : Base(B) {}
184   ValueAndContext(Value &V, const Instruction *CtxI) : Base(&V, CtxI) {}
185   ValueAndContext(Value &V, const Instruction &CtxI) : Base(&V, &CtxI) {}
186 
187   Value *getValue() const { return this->first; }
188   const Instruction *getCtxI() const { return this->second; }
189 };
190 
191 /// Return true if \p I is a `nosync` instruction. Use generic reasoning and
192 /// potentially the corresponding AANoSync.
193 bool isNoSyncInst(Attributor &A, const Instruction &I,
194                   const AbstractAttribute &QueryingAA);
195 
196 /// Return true if \p V is dynamically unique, that is, there are no two
197 /// "instances" of \p V at runtime with different values.
198 /// Note: If \p ForAnalysisOnly is set we only check that the Attributor will
199 /// never use \p V to represent two "instances" not that \p V could not
200 /// technically represent them.
201 bool isDynamicallyUnique(Attributor &A, const AbstractAttribute &QueryingAA,
202                          const Value &V, bool ForAnalysisOnly = true);
203 
204 /// Return true if \p V is a valid value in \p Scope, that is a constant or an
205 /// instruction/argument of \p Scope.
206 bool isValidInScope(const Value &V, const Function *Scope);
207 
208 /// Return true if the value of \p VAC is a valid at the position of \p VAC,
209 /// that is a constant, an argument of the same function, or an instruction in
210 /// that function that dominates the position.
211 bool isValidAtPosition(const ValueAndContext &VAC, InformationCache &InfoCache);
212 
213 /// Try to convert \p V to type \p Ty without introducing new instructions. If
214 /// this is not possible return `nullptr`. Note: this function basically knows
215 /// how to cast various constants.
216 Value *getWithType(Value &V, Type &Ty);
217 
218 /// Return the combination of \p A and \p B such that the result is a possible
219 /// value of both. \p B is potentially casted to match the type \p Ty or the
220 /// type of \p A if \p Ty is null.
221 ///
222 /// Examples:
223 ///        X + none  => X
224 /// not_none + undef => not_none
225 ///          V1 + V2 => nullptr
226 std::optional<Value *>
227 combineOptionalValuesInAAValueLatice(const std::optional<Value *> &A,
228                                      const std::optional<Value *> &B, Type *Ty);
229 
230 /// Helper to represent an access offset and size, with logic to deal with
231 /// uncertainty and check for overlapping accesses.
232 struct RangeTy {
233   int64_t Offset = Unassigned;
234   int64_t Size = Unassigned;
235 
236   RangeTy(int64_t Offset, int64_t Size) : Offset(Offset), Size(Size) {}
237   RangeTy() = default;
238   static RangeTy getUnknown() { return RangeTy{Unknown, Unknown}; }
239 
240   /// Return true if offset or size are unknown.
241   bool offsetOrSizeAreUnknown() const {
242     return Offset == RangeTy::Unknown || Size == RangeTy::Unknown;
243   }
244 
245   /// Return true if offset and size are unknown, thus this is the default
246   /// unknown object.
247   bool offsetAndSizeAreUnknown() const {
248     return Offset == RangeTy::Unknown && Size == RangeTy::Unknown;
249   }
250 
251   /// Return true if the offset and size are unassigned.
252   bool isUnassigned() const {
253     assert((Offset == RangeTy::Unassigned) == (Size == RangeTy::Unassigned) &&
254            "Inconsistent state!");
255     return Offset == RangeTy::Unassigned;
256   }
257 
258   /// Return true if this offset and size pair might describe an address that
259   /// overlaps with \p Range.
260   bool mayOverlap(const RangeTy &Range) const {
261     // Any unknown value and we are giving up -> overlap.
262     if (offsetOrSizeAreUnknown() || Range.offsetOrSizeAreUnknown())
263       return true;
264 
265     // Check if one offset point is in the other interval [offset,
266     // offset+size].
267     return Range.Offset + Range.Size > Offset && Range.Offset < Offset + Size;
268   }
269 
270   RangeTy &operator&=(const RangeTy &R) {
271     if (R.isUnassigned())
272       return *this;
273     if (isUnassigned())
274       return *this = R;
275     if (Offset == Unknown || R.Offset == Unknown)
276       Offset = Unknown;
277     if (Size == Unknown || R.Size == Unknown)
278       Size = Unknown;
279     if (offsetAndSizeAreUnknown())
280       return *this;
281     if (Offset == Unknown) {
282       Size = std::max(Size, R.Size);
283     } else if (Size == Unknown) {
284       Offset = std::min(Offset, R.Offset);
285     } else {
286       Offset = std::min(Offset, R.Offset);
287       Size = std::max(Offset + Size, R.Offset + R.Size) - Offset;
288     }
289     return *this;
290   }
291 
292   /// Comparison for sorting ranges by offset.
293   ///
294   /// Returns true if the offset \p L is less than that of \p R.
295   inline static bool OffsetLessThan(const RangeTy &L, const RangeTy &R) {
296     return L.Offset < R.Offset;
297   }
298 
299   /// Constants used to represent special offsets or sizes.
300   /// - We cannot assume that Offsets and Size are non-negative.
301   /// - The constants should not clash with DenseMapInfo, such as EmptyKey
302   ///   (INT64_MAX) and TombstoneKey (INT64_MIN).
303   /// We use values "in the middle" of the 64 bit range to represent these
304   /// special cases.
305   static constexpr int64_t Unassigned = std::numeric_limits<int32_t>::min();
306   static constexpr int64_t Unknown = std::numeric_limits<int32_t>::max();
307 };
308 
309 inline raw_ostream &operator<<(raw_ostream &OS, const RangeTy &R) {
310   OS << "[" << R.Offset << ", " << R.Size << "]";
311   return OS;
312 }
313 
314 inline bool operator==(const RangeTy &A, const RangeTy &B) {
315   return A.Offset == B.Offset && A.Size == B.Size;
316 }
317 
318 inline bool operator!=(const RangeTy &A, const RangeTy &B) { return !(A == B); }
319 
320 /// Return the initial value of \p Obj with type \p Ty if that is a constant.
321 Constant *getInitialValueForObj(Attributor &A, Value &Obj, Type &Ty,
322                                 const TargetLibraryInfo *TLI,
323                                 const DataLayout &DL,
324                                 RangeTy *RangePtr = nullptr);
325 
326 /// Collect all potential values \p LI could read into \p PotentialValues. That
327 /// is, the only values read by \p LI are assumed to be known and all are in
328 /// \p PotentialValues. \p PotentialValueOrigins will contain all the
329 /// instructions that might have put a potential value into \p PotentialValues.
330 /// Dependences onto \p QueryingAA are properly tracked, \p
331 /// UsedAssumedInformation will inform the caller if assumed information was
332 /// used.
333 ///
334 /// \returns True if the assumed potential copies are all in \p PotentialValues,
335 ///          false if something went wrong and the copies could not be
336 ///          determined.
337 bool getPotentiallyLoadedValues(
338     Attributor &A, LoadInst &LI, SmallSetVector<Value *, 4> &PotentialValues,
339     SmallSetVector<Instruction *, 4> &PotentialValueOrigins,
340     const AbstractAttribute &QueryingAA, bool &UsedAssumedInformation,
341     bool OnlyExact = false);
342 
343 /// Collect all potential values of the one stored by \p SI into
344 /// \p PotentialCopies. That is, the only copies that were made via the
345 /// store are assumed to be known and all are in \p PotentialCopies. Dependences
346 /// onto \p QueryingAA are properly tracked, \p UsedAssumedInformation will
347 /// inform the caller if assumed information was used.
348 ///
349 /// \returns True if the assumed potential copies are all in \p PotentialCopies,
350 ///          false if something went wrong and the copies could not be
351 ///          determined.
352 bool getPotentialCopiesOfStoredValue(
353     Attributor &A, StoreInst &SI, SmallSetVector<Value *, 4> &PotentialCopies,
354     const AbstractAttribute &QueryingAA, bool &UsedAssumedInformation,
355     bool OnlyExact = false);
356 
357 /// Return true if \p IRP is readonly. This will query respective AAs that
358 /// deduce the information and introduce dependences for \p QueryingAA.
359 bool isAssumedReadOnly(Attributor &A, const IRPosition &IRP,
360                        const AbstractAttribute &QueryingAA, bool &IsKnown);
361 
362 /// Return true if \p IRP is readnone. This will query respective AAs that
363 /// deduce the information and introduce dependences for \p QueryingAA.
364 bool isAssumedReadNone(Attributor &A, const IRPosition &IRP,
365                        const AbstractAttribute &QueryingAA, bool &IsKnown);
366 
367 /// Return true if \p ToI is potentially reachable from \p FromI without running
368 /// into any instruction in \p ExclusionSet The two instructions do not need to
369 /// be in the same function. \p GoBackwardsCB can be provided to convey domain
370 /// knowledge about the "lifespan" the user is interested in. By default, the
371 /// callers of \p FromI are checked as well to determine if \p ToI can be
372 /// reached. If the query is not interested in callers beyond a certain point,
373 /// e.g., a GPU kernel entry or the function containing an alloca, the
374 /// \p GoBackwardsCB should return false.
375 bool isPotentiallyReachable(
376     Attributor &A, const Instruction &FromI, const Instruction &ToI,
377     const AbstractAttribute &QueryingAA,
378     const AA::InstExclusionSetTy *ExclusionSet = nullptr,
379     std::function<bool(const Function &F)> GoBackwardsCB = nullptr);
380 
381 /// Same as above but it is sufficient to reach any instruction in \p ToFn.
382 bool isPotentiallyReachable(
383     Attributor &A, const Instruction &FromI, const Function &ToFn,
384     const AbstractAttribute &QueryingAA,
385     const AA::InstExclusionSetTy *ExclusionSet = nullptr,
386     std::function<bool(const Function &F)> GoBackwardsCB = nullptr);
387 
388 /// Return true if \p Obj is assumed to be a thread local object.
389 bool isAssumedThreadLocalObject(Attributor &A, Value &Obj,
390                                 const AbstractAttribute &QueryingAA);
391 
392 /// Return true if \p I is potentially affected by a barrier.
393 bool isPotentiallyAffectedByBarrier(Attributor &A, const Instruction &I,
394                                     const AbstractAttribute &QueryingAA);
395 bool isPotentiallyAffectedByBarrier(Attributor &A, ArrayRef<const Value *> Ptrs,
396                                     const AbstractAttribute &QueryingAA,
397                                     const Instruction *CtxI);
398 } // namespace AA
399 
400 template <>
401 struct DenseMapInfo<AA::ValueAndContext>
402     : public DenseMapInfo<AA::ValueAndContext::Base> {
403   using Base = DenseMapInfo<AA::ValueAndContext::Base>;
404   static inline AA::ValueAndContext getEmptyKey() {
405     return Base::getEmptyKey();
406   }
407   static inline AA::ValueAndContext getTombstoneKey() {
408     return Base::getTombstoneKey();
409   }
410   static unsigned getHashValue(const AA::ValueAndContext &VAC) {
411     return Base::getHashValue(VAC);
412   }
413 
414   static bool isEqual(const AA::ValueAndContext &LHS,
415                       const AA::ValueAndContext &RHS) {
416     return Base::isEqual(LHS, RHS);
417   }
418 };
419 
420 template <>
421 struct DenseMapInfo<AA::ValueScope> : public DenseMapInfo<unsigned char> {
422   using Base = DenseMapInfo<unsigned char>;
423   static inline AA::ValueScope getEmptyKey() {
424     return AA::ValueScope(Base::getEmptyKey());
425   }
426   static inline AA::ValueScope getTombstoneKey() {
427     return AA::ValueScope(Base::getTombstoneKey());
428   }
429   static unsigned getHashValue(const AA::ValueScope &S) {
430     return Base::getHashValue(S);
431   }
432 
433   static bool isEqual(const AA::ValueScope &LHS, const AA::ValueScope &RHS) {
434     return Base::isEqual(LHS, RHS);
435   }
436 };
437 
438 template <>
439 struct DenseMapInfo<const AA::InstExclusionSetTy *>
440     : public DenseMapInfo<void *> {
441   using super = DenseMapInfo<void *>;
442   static inline const AA::InstExclusionSetTy *getEmptyKey() {
443     return static_cast<const AA::InstExclusionSetTy *>(super::getEmptyKey());
444   }
445   static inline const AA::InstExclusionSetTy *getTombstoneKey() {
446     return static_cast<const AA::InstExclusionSetTy *>(
447         super::getTombstoneKey());
448   }
449   static unsigned getHashValue(const AA::InstExclusionSetTy *BES) {
450     unsigned H = 0;
451     if (BES)
452       for (const auto *II : *BES)
453         H += DenseMapInfo<const Instruction *>::getHashValue(II);
454     return H;
455   }
456   static bool isEqual(const AA::InstExclusionSetTy *LHS,
457                       const AA::InstExclusionSetTy *RHS) {
458     if (LHS == RHS)
459       return true;
460     if (LHS == getEmptyKey() || RHS == getEmptyKey() ||
461         LHS == getTombstoneKey() || RHS == getTombstoneKey())
462       return false;
463     auto SizeLHS = LHS ? LHS->size() : 0;
464     auto SizeRHS = RHS ? RHS->size() : 0;
465     if (SizeLHS != SizeRHS)
466       return false;
467     if (SizeRHS == 0)
468       return true;
469     return llvm::set_is_subset(*LHS, *RHS);
470   }
471 };
472 
473 /// The value passed to the line option that defines the maximal initialization
474 /// chain length.
475 extern unsigned MaxInitializationChainLength;
476 
477 ///{
478 enum class ChangeStatus {
479   CHANGED,
480   UNCHANGED,
481 };
482 
483 ChangeStatus operator|(ChangeStatus l, ChangeStatus r);
484 ChangeStatus &operator|=(ChangeStatus &l, ChangeStatus r);
485 ChangeStatus operator&(ChangeStatus l, ChangeStatus r);
486 ChangeStatus &operator&=(ChangeStatus &l, ChangeStatus r);
487 
488 enum class DepClassTy {
489   REQUIRED, ///< The target cannot be valid if the source is not.
490   OPTIONAL, ///< The target may be valid if the source is not.
491   NONE,     ///< Do not track a dependence between source and target.
492 };
493 ///}
494 
495 /// The data structure for the nodes of a dependency graph
496 struct AADepGraphNode {
497 public:
498   virtual ~AADepGraphNode() = default;
499   using DepTy = PointerIntPair<AADepGraphNode *, 1>;
500   using DepSetTy = SmallSetVector<DepTy, 2>;
501 
502 protected:
503   /// Set of dependency graph nodes which should be updated if this one
504   /// is updated. The bit encodes if it is optional.
505   DepSetTy Deps;
506 
507   static AADepGraphNode *DepGetVal(const DepTy &DT) { return DT.getPointer(); }
508   static AbstractAttribute *DepGetValAA(const DepTy &DT) {
509     return cast<AbstractAttribute>(DT.getPointer());
510   }
511 
512   operator AbstractAttribute *() { return cast<AbstractAttribute>(this); }
513 
514 public:
515   using iterator = mapped_iterator<DepSetTy::iterator, decltype(&DepGetVal)>;
516   using aaiterator =
517       mapped_iterator<DepSetTy::iterator, decltype(&DepGetValAA)>;
518 
519   aaiterator begin() { return aaiterator(Deps.begin(), &DepGetValAA); }
520   aaiterator end() { return aaiterator(Deps.end(), &DepGetValAA); }
521   iterator child_begin() { return iterator(Deps.begin(), &DepGetVal); }
522   iterator child_end() { return iterator(Deps.end(), &DepGetVal); }
523 
524   void print(raw_ostream &OS) const { print(nullptr, OS); }
525   virtual void print(Attributor *, raw_ostream &OS) const {
526     OS << "AADepNode Impl\n";
527   }
528   DepSetTy &getDeps() { return Deps; }
529 
530   friend struct Attributor;
531   friend struct AADepGraph;
532 };
533 
534 /// The data structure for the dependency graph
535 ///
536 /// Note that in this graph if there is an edge from A to B (A -> B),
537 /// then it means that B depends on A, and when the state of A is
538 /// updated, node B should also be updated
539 struct AADepGraph {
540   AADepGraph() = default;
541   ~AADepGraph() = default;
542 
543   using DepTy = AADepGraphNode::DepTy;
544   static AADepGraphNode *DepGetVal(const DepTy &DT) { return DT.getPointer(); }
545   using iterator =
546       mapped_iterator<AADepGraphNode::DepSetTy::iterator, decltype(&DepGetVal)>;
547 
548   /// There is no root node for the dependency graph. But the SCCIterator
549   /// requires a single entry point, so we maintain a fake("synthetic") root
550   /// node that depends on every node.
551   AADepGraphNode SyntheticRoot;
552   AADepGraphNode *GetEntryNode() { return &SyntheticRoot; }
553 
554   iterator begin() { return SyntheticRoot.child_begin(); }
555   iterator end() { return SyntheticRoot.child_end(); }
556 
557   void viewGraph();
558 
559   /// Dump graph to file
560   void dumpGraph();
561 
562   /// Print dependency graph
563   void print();
564 };
565 
566 /// Helper to describe and deal with positions in the LLVM-IR.
567 ///
568 /// A position in the IR is described by an anchor value and an "offset" that
569 /// could be the argument number, for call sites and arguments, or an indicator
570 /// of the "position kind". The kinds, specified in the Kind enum below, include
571 /// the locations in the attribute list, i.a., function scope and return value,
572 /// as well as a distinction between call sites and functions. Finally, there
573 /// are floating values that do not have a corresponding attribute list
574 /// position.
575 struct IRPosition {
576   // NOTE: In the future this definition can be changed to support recursive
577   // functions.
578   using CallBaseContext = CallBase;
579 
580   /// The positions we distinguish in the IR.
581   enum Kind : char {
582     IRP_INVALID,  ///< An invalid position.
583     IRP_FLOAT,    ///< A position that is not associated with a spot suitable
584                   ///< for attributes. This could be any value or instruction.
585     IRP_RETURNED, ///< An attribute for the function return value.
586     IRP_CALL_SITE_RETURNED, ///< An attribute for a call site return value.
587     IRP_FUNCTION,           ///< An attribute for a function (scope).
588     IRP_CALL_SITE,          ///< An attribute for a call site (function scope).
589     IRP_ARGUMENT,           ///< An attribute for a function argument.
590     IRP_CALL_SITE_ARGUMENT, ///< An attribute for a call site argument.
591   };
592 
593   /// Default constructor available to create invalid positions implicitly. All
594   /// other positions need to be created explicitly through the appropriate
595   /// static member function.
596   IRPosition() : Enc(nullptr, ENC_VALUE) { verify(); }
597 
598   /// Create a position describing the value of \p V.
599   static const IRPosition value(const Value &V,
600                                 const CallBaseContext *CBContext = nullptr) {
601     if (auto *Arg = dyn_cast<Argument>(&V))
602       return IRPosition::argument(*Arg, CBContext);
603     if (auto *CB = dyn_cast<CallBase>(&V))
604       return IRPosition::callsite_returned(*CB);
605     return IRPosition(const_cast<Value &>(V), IRP_FLOAT, CBContext);
606   }
607 
608   /// Create a position describing the instruction \p I. This is different from
609   /// the value version because call sites are treated as intrusctions rather
610   /// than their return value in this function.
611   static const IRPosition inst(const Instruction &I,
612                                const CallBaseContext *CBContext = nullptr) {
613     return IRPosition(const_cast<Instruction &>(I), IRP_FLOAT, CBContext);
614   }
615 
616   /// Create a position describing the function scope of \p F.
617   /// \p CBContext is used for call base specific analysis.
618   static const IRPosition function(const Function &F,
619                                    const CallBaseContext *CBContext = nullptr) {
620     return IRPosition(const_cast<Function &>(F), IRP_FUNCTION, CBContext);
621   }
622 
623   /// Create a position describing the returned value of \p F.
624   /// \p CBContext is used for call base specific analysis.
625   static const IRPosition returned(const Function &F,
626                                    const CallBaseContext *CBContext = nullptr) {
627     return IRPosition(const_cast<Function &>(F), IRP_RETURNED, CBContext);
628   }
629 
630   /// Create a position describing the argument \p Arg.
631   /// \p CBContext is used for call base specific analysis.
632   static const IRPosition argument(const Argument &Arg,
633                                    const CallBaseContext *CBContext = nullptr) {
634     return IRPosition(const_cast<Argument &>(Arg), IRP_ARGUMENT, CBContext);
635   }
636 
637   /// Create a position describing the function scope of \p CB.
638   static const IRPosition callsite_function(const CallBase &CB) {
639     return IRPosition(const_cast<CallBase &>(CB), IRP_CALL_SITE);
640   }
641 
642   /// Create a position describing the returned value of \p CB.
643   static const IRPosition callsite_returned(const CallBase &CB) {
644     return IRPosition(const_cast<CallBase &>(CB), IRP_CALL_SITE_RETURNED);
645   }
646 
647   /// Create a position describing the argument of \p CB at position \p ArgNo.
648   static const IRPosition callsite_argument(const CallBase &CB,
649                                             unsigned ArgNo) {
650     return IRPosition(const_cast<Use &>(CB.getArgOperandUse(ArgNo)),
651                       IRP_CALL_SITE_ARGUMENT);
652   }
653 
654   /// Create a position describing the argument of \p ACS at position \p ArgNo.
655   static const IRPosition callsite_argument(AbstractCallSite ACS,
656                                             unsigned ArgNo) {
657     if (ACS.getNumArgOperands() <= ArgNo)
658       return IRPosition();
659     int CSArgNo = ACS.getCallArgOperandNo(ArgNo);
660     if (CSArgNo >= 0)
661       return IRPosition::callsite_argument(
662           cast<CallBase>(*ACS.getInstruction()), CSArgNo);
663     return IRPosition();
664   }
665 
666   /// Create a position with function scope matching the "context" of \p IRP.
667   /// If \p IRP is a call site (see isAnyCallSitePosition()) then the result
668   /// will be a call site position, otherwise the function position of the
669   /// associated function.
670   static const IRPosition
671   function_scope(const IRPosition &IRP,
672                  const CallBaseContext *CBContext = nullptr) {
673     if (IRP.isAnyCallSitePosition()) {
674       return IRPosition::callsite_function(
675           cast<CallBase>(IRP.getAnchorValue()));
676     }
677     assert(IRP.getAssociatedFunction());
678     return IRPosition::function(*IRP.getAssociatedFunction(), CBContext);
679   }
680 
681   bool operator==(const IRPosition &RHS) const {
682     return Enc == RHS.Enc && RHS.CBContext == CBContext;
683   }
684   bool operator!=(const IRPosition &RHS) const { return !(*this == RHS); }
685 
686   /// Return the value this abstract attribute is anchored with.
687   ///
688   /// The anchor value might not be the associated value if the latter is not
689   /// sufficient to determine where arguments will be manifested. This is, so
690   /// far, only the case for call site arguments as the value is not sufficient
691   /// to pinpoint them. Instead, we can use the call site as an anchor.
692   Value &getAnchorValue() const {
693     switch (getEncodingBits()) {
694     case ENC_VALUE:
695     case ENC_RETURNED_VALUE:
696     case ENC_FLOATING_FUNCTION:
697       return *getAsValuePtr();
698     case ENC_CALL_SITE_ARGUMENT_USE:
699       return *(getAsUsePtr()->getUser());
700     default:
701       llvm_unreachable("Unkown encoding!");
702     };
703   }
704 
705   /// Return the associated function, if any.
706   Function *getAssociatedFunction() const {
707     if (auto *CB = dyn_cast<CallBase>(&getAnchorValue())) {
708       // We reuse the logic that associates callback calles to arguments of a
709       // call site here to identify the callback callee as the associated
710       // function.
711       if (Argument *Arg = getAssociatedArgument())
712         return Arg->getParent();
713       return dyn_cast_if_present<Function>(
714           CB->getCalledOperand()->stripPointerCasts());
715     }
716     return getAnchorScope();
717   }
718 
719   /// Return the associated argument, if any.
720   Argument *getAssociatedArgument() const;
721 
722   /// Return true if the position refers to a function interface, that is the
723   /// function scope, the function return, or an argument.
724   bool isFnInterfaceKind() const {
725     switch (getPositionKind()) {
726     case IRPosition::IRP_FUNCTION:
727     case IRPosition::IRP_RETURNED:
728     case IRPosition::IRP_ARGUMENT:
729       return true;
730     default:
731       return false;
732     }
733   }
734 
735   /// Return true if this is a function or call site position.
736   bool isFunctionScope() const {
737     switch (getPositionKind()) {
738     case IRPosition::IRP_CALL_SITE:
739     case IRPosition::IRP_FUNCTION:
740       return true;
741     default:
742       return false;
743     };
744   }
745 
746   /// Return the Function surrounding the anchor value.
747   Function *getAnchorScope() const {
748     Value &V = getAnchorValue();
749     if (isa<Function>(V))
750       return &cast<Function>(V);
751     if (isa<Argument>(V))
752       return cast<Argument>(V).getParent();
753     if (isa<Instruction>(V))
754       return cast<Instruction>(V).getFunction();
755     return nullptr;
756   }
757 
758   /// Return the context instruction, if any.
759   Instruction *getCtxI() const {
760     Value &V = getAnchorValue();
761     if (auto *I = dyn_cast<Instruction>(&V))
762       return I;
763     if (auto *Arg = dyn_cast<Argument>(&V))
764       if (!Arg->getParent()->isDeclaration())
765         return &Arg->getParent()->getEntryBlock().front();
766     if (auto *F = dyn_cast<Function>(&V))
767       if (!F->isDeclaration())
768         return &(F->getEntryBlock().front());
769     return nullptr;
770   }
771 
772   /// Return the value this abstract attribute is associated with.
773   Value &getAssociatedValue() const {
774     if (getCallSiteArgNo() < 0 || isa<Argument>(&getAnchorValue()))
775       return getAnchorValue();
776     assert(isa<CallBase>(&getAnchorValue()) && "Expected a call base!");
777     return *cast<CallBase>(&getAnchorValue())
778                 ->getArgOperand(getCallSiteArgNo());
779   }
780 
781   /// Return the type this abstract attribute is associated with.
782   Type *getAssociatedType() const {
783     if (getPositionKind() == IRPosition::IRP_RETURNED)
784       return getAssociatedFunction()->getReturnType();
785     return getAssociatedValue().getType();
786   }
787 
788   /// Return the callee argument number of the associated value if it is an
789   /// argument or call site argument, otherwise a negative value. In contrast to
790   /// `getCallSiteArgNo` this method will always return the "argument number"
791   /// from the perspective of the callee. This may not the same as the call site
792   /// if this is a callback call.
793   int getCalleeArgNo() const {
794     return getArgNo(/* CallbackCalleeArgIfApplicable */ true);
795   }
796 
797   /// Return the call site argument number of the associated value if it is an
798   /// argument or call site argument, otherwise a negative value. In contrast to
799   /// `getCalleArgNo` this method will always return the "operand number" from
800   /// the perspective of the call site. This may not the same as the callee
801   /// perspective if this is a callback call.
802   int getCallSiteArgNo() const {
803     return getArgNo(/* CallbackCalleeArgIfApplicable */ false);
804   }
805 
806   /// Return the index in the attribute list for this position.
807   unsigned getAttrIdx() const {
808     switch (getPositionKind()) {
809     case IRPosition::IRP_INVALID:
810     case IRPosition::IRP_FLOAT:
811       break;
812     case IRPosition::IRP_FUNCTION:
813     case IRPosition::IRP_CALL_SITE:
814       return AttributeList::FunctionIndex;
815     case IRPosition::IRP_RETURNED:
816     case IRPosition::IRP_CALL_SITE_RETURNED:
817       return AttributeList::ReturnIndex;
818     case IRPosition::IRP_ARGUMENT:
819       return getCalleeArgNo() + AttributeList::FirstArgIndex;
820     case IRPosition::IRP_CALL_SITE_ARGUMENT:
821       return getCallSiteArgNo() + AttributeList::FirstArgIndex;
822     }
823     llvm_unreachable(
824         "There is no attribute index for a floating or invalid position!");
825   }
826 
827   /// Return the value attributes are attached to.
828   Value *getAttrListAnchor() const {
829     if (auto *CB = dyn_cast<CallBase>(&getAnchorValue()))
830       return CB;
831     return getAssociatedFunction();
832   }
833 
834   /// Return the attributes associated with this function or call site scope.
835   AttributeList getAttrList() const {
836     if (auto *CB = dyn_cast<CallBase>(&getAnchorValue()))
837       return CB->getAttributes();
838     return getAssociatedFunction()->getAttributes();
839   }
840 
841   /// Update the attributes associated with this function or call site scope.
842   void setAttrList(const AttributeList &AttrList) const {
843     if (auto *CB = dyn_cast<CallBase>(&getAnchorValue()))
844       return CB->setAttributes(AttrList);
845     return getAssociatedFunction()->setAttributes(AttrList);
846   }
847 
848   /// Return the number of arguments associated with this function or call site
849   /// scope.
850   unsigned getNumArgs() const {
851     assert((getPositionKind() == IRP_CALL_SITE ||
852             getPositionKind() == IRP_FUNCTION) &&
853            "Only valid for function/call site positions!");
854     if (auto *CB = dyn_cast<CallBase>(&getAnchorValue()))
855       return CB->arg_size();
856     return getAssociatedFunction()->arg_size();
857   }
858 
859   /// Return theargument \p ArgNo associated with this function or call site
860   /// scope.
861   Value *getArg(unsigned ArgNo) const {
862     assert((getPositionKind() == IRP_CALL_SITE ||
863             getPositionKind() == IRP_FUNCTION) &&
864            "Only valid for function/call site positions!");
865     if (auto *CB = dyn_cast<CallBase>(&getAnchorValue()))
866       return CB->getArgOperand(ArgNo);
867     return getAssociatedFunction()->getArg(ArgNo);
868   }
869 
870   /// Return the associated position kind.
871   Kind getPositionKind() const {
872     char EncodingBits = getEncodingBits();
873     if (EncodingBits == ENC_CALL_SITE_ARGUMENT_USE)
874       return IRP_CALL_SITE_ARGUMENT;
875     if (EncodingBits == ENC_FLOATING_FUNCTION)
876       return IRP_FLOAT;
877 
878     Value *V = getAsValuePtr();
879     if (!V)
880       return IRP_INVALID;
881     if (isa<Argument>(V))
882       return IRP_ARGUMENT;
883     if (isa<Function>(V))
884       return isReturnPosition(EncodingBits) ? IRP_RETURNED : IRP_FUNCTION;
885     if (isa<CallBase>(V))
886       return isReturnPosition(EncodingBits) ? IRP_CALL_SITE_RETURNED
887                                             : IRP_CALL_SITE;
888     return IRP_FLOAT;
889   }
890 
891   bool isAnyCallSitePosition() const {
892     switch (getPositionKind()) {
893     case IRPosition::IRP_CALL_SITE:
894     case IRPosition::IRP_CALL_SITE_RETURNED:
895     case IRPosition::IRP_CALL_SITE_ARGUMENT:
896       return true;
897     default:
898       return false;
899     }
900   }
901 
902   /// Return true if the position is an argument or call site argument.
903   bool isArgumentPosition() const {
904     switch (getPositionKind()) {
905     case IRPosition::IRP_ARGUMENT:
906     case IRPosition::IRP_CALL_SITE_ARGUMENT:
907       return true;
908     default:
909       return false;
910     }
911   }
912 
913   /// Return the same position without the call base context.
914   IRPosition stripCallBaseContext() const {
915     IRPosition Result = *this;
916     Result.CBContext = nullptr;
917     return Result;
918   }
919 
920   /// Get the call base context from the position.
921   const CallBaseContext *getCallBaseContext() const { return CBContext; }
922 
923   /// Check if the position has any call base context.
924   bool hasCallBaseContext() const { return CBContext != nullptr; }
925 
926   /// Special DenseMap key values.
927   ///
928   ///{
929   static const IRPosition EmptyKey;
930   static const IRPosition TombstoneKey;
931   ///}
932 
933   /// Conversion into a void * to allow reuse of pointer hashing.
934   operator void *() const { return Enc.getOpaqueValue(); }
935 
936 private:
937   /// Private constructor for special values only!
938   explicit IRPosition(void *Ptr, const CallBaseContext *CBContext = nullptr)
939       : CBContext(CBContext) {
940     Enc.setFromOpaqueValue(Ptr);
941   }
942 
943   /// IRPosition anchored at \p AnchorVal with kind/argument numbet \p PK.
944   explicit IRPosition(Value &AnchorVal, Kind PK,
945                       const CallBaseContext *CBContext = nullptr)
946       : CBContext(CBContext) {
947     switch (PK) {
948     case IRPosition::IRP_INVALID:
949       llvm_unreachable("Cannot create invalid IRP with an anchor value!");
950       break;
951     case IRPosition::IRP_FLOAT:
952       // Special case for floating functions.
953       if (isa<Function>(AnchorVal) || isa<CallBase>(AnchorVal))
954         Enc = {&AnchorVal, ENC_FLOATING_FUNCTION};
955       else
956         Enc = {&AnchorVal, ENC_VALUE};
957       break;
958     case IRPosition::IRP_FUNCTION:
959     case IRPosition::IRP_CALL_SITE:
960       Enc = {&AnchorVal, ENC_VALUE};
961       break;
962     case IRPosition::IRP_RETURNED:
963     case IRPosition::IRP_CALL_SITE_RETURNED:
964       Enc = {&AnchorVal, ENC_RETURNED_VALUE};
965       break;
966     case IRPosition::IRP_ARGUMENT:
967       Enc = {&AnchorVal, ENC_VALUE};
968       break;
969     case IRPosition::IRP_CALL_SITE_ARGUMENT:
970       llvm_unreachable(
971           "Cannot create call site argument IRP with an anchor value!");
972       break;
973     }
974     verify();
975   }
976 
977   /// Return the callee argument number of the associated value if it is an
978   /// argument or call site argument. See also `getCalleeArgNo` and
979   /// `getCallSiteArgNo`.
980   int getArgNo(bool CallbackCalleeArgIfApplicable) const {
981     if (CallbackCalleeArgIfApplicable)
982       if (Argument *Arg = getAssociatedArgument())
983         return Arg->getArgNo();
984     switch (getPositionKind()) {
985     case IRPosition::IRP_ARGUMENT:
986       return cast<Argument>(getAsValuePtr())->getArgNo();
987     case IRPosition::IRP_CALL_SITE_ARGUMENT: {
988       Use &U = *getAsUsePtr();
989       return cast<CallBase>(U.getUser())->getArgOperandNo(&U);
990     }
991     default:
992       return -1;
993     }
994   }
995 
996   /// IRPosition for the use \p U. The position kind \p PK needs to be
997   /// IRP_CALL_SITE_ARGUMENT, the anchor value is the user, the associated value
998   /// the used value.
999   explicit IRPosition(Use &U, Kind PK) {
1000     assert(PK == IRP_CALL_SITE_ARGUMENT &&
1001            "Use constructor is for call site arguments only!");
1002     Enc = {&U, ENC_CALL_SITE_ARGUMENT_USE};
1003     verify();
1004   }
1005 
1006   /// Verify internal invariants.
1007   void verify();
1008 
1009   /// Return the underlying pointer as Value *, valid for all positions but
1010   /// IRP_CALL_SITE_ARGUMENT.
1011   Value *getAsValuePtr() const {
1012     assert(getEncodingBits() != ENC_CALL_SITE_ARGUMENT_USE &&
1013            "Not a value pointer!");
1014     return reinterpret_cast<Value *>(Enc.getPointer());
1015   }
1016 
1017   /// Return the underlying pointer as Use *, valid only for
1018   /// IRP_CALL_SITE_ARGUMENT positions.
1019   Use *getAsUsePtr() const {
1020     assert(getEncodingBits() == ENC_CALL_SITE_ARGUMENT_USE &&
1021            "Not a value pointer!");
1022     return reinterpret_cast<Use *>(Enc.getPointer());
1023   }
1024 
1025   /// Return true if \p EncodingBits describe a returned or call site returned
1026   /// position.
1027   static bool isReturnPosition(char EncodingBits) {
1028     return EncodingBits == ENC_RETURNED_VALUE;
1029   }
1030 
1031   /// Return true if the encoding bits describe a returned or call site returned
1032   /// position.
1033   bool isReturnPosition() const { return isReturnPosition(getEncodingBits()); }
1034 
1035   /// The encoding of the IRPosition is a combination of a pointer and two
1036   /// encoding bits. The values of the encoding bits are defined in the enum
1037   /// below. The pointer is either a Value* (for the first three encoding bit
1038   /// combinations) or Use* (for ENC_CALL_SITE_ARGUMENT_USE).
1039   ///
1040   ///{
1041   enum {
1042     ENC_VALUE = 0b00,
1043     ENC_RETURNED_VALUE = 0b01,
1044     ENC_FLOATING_FUNCTION = 0b10,
1045     ENC_CALL_SITE_ARGUMENT_USE = 0b11,
1046   };
1047 
1048   // Reserve the maximal amount of bits so there is no need to mask out the
1049   // remaining ones. We will not encode anything else in the pointer anyway.
1050   static constexpr int NumEncodingBits =
1051       PointerLikeTypeTraits<void *>::NumLowBitsAvailable;
1052   static_assert(NumEncodingBits >= 2, "At least two bits are required!");
1053 
1054   /// The pointer with the encoding bits.
1055   PointerIntPair<void *, NumEncodingBits, char> Enc;
1056   ///}
1057 
1058   /// Call base context. Used for callsite specific analysis.
1059   const CallBaseContext *CBContext = nullptr;
1060 
1061   /// Return the encoding bits.
1062   char getEncodingBits() const { return Enc.getInt(); }
1063 };
1064 
1065 /// Helper that allows IRPosition as a key in a DenseMap.
1066 template <> struct DenseMapInfo<IRPosition> {
1067   static inline IRPosition getEmptyKey() { return IRPosition::EmptyKey; }
1068   static inline IRPosition getTombstoneKey() {
1069     return IRPosition::TombstoneKey;
1070   }
1071   static unsigned getHashValue(const IRPosition &IRP) {
1072     return (DenseMapInfo<void *>::getHashValue(IRP) << 4) ^
1073            (DenseMapInfo<Value *>::getHashValue(IRP.getCallBaseContext()));
1074   }
1075 
1076   static bool isEqual(const IRPosition &a, const IRPosition &b) {
1077     return a == b;
1078   }
1079 };
1080 
1081 /// A visitor class for IR positions.
1082 ///
1083 /// Given a position P, the SubsumingPositionIterator allows to visit "subsuming
1084 /// positions" wrt. attributes/information. Thus, if a piece of information
1085 /// holds for a subsuming position, it also holds for the position P.
1086 ///
1087 /// The subsuming positions always include the initial position and then,
1088 /// depending on the position kind, additionally the following ones:
1089 /// - for IRP_RETURNED:
1090 ///   - the function (IRP_FUNCTION)
1091 /// - for IRP_ARGUMENT:
1092 ///   - the function (IRP_FUNCTION)
1093 /// - for IRP_CALL_SITE:
1094 ///   - the callee (IRP_FUNCTION), if known
1095 /// - for IRP_CALL_SITE_RETURNED:
1096 ///   - the callee (IRP_RETURNED), if known
1097 ///   - the call site (IRP_FUNCTION)
1098 ///   - the callee (IRP_FUNCTION), if known
1099 /// - for IRP_CALL_SITE_ARGUMENT:
1100 ///   - the argument of the callee (IRP_ARGUMENT), if known
1101 ///   - the callee (IRP_FUNCTION), if known
1102 ///   - the position the call site argument is associated with if it is not
1103 ///     anchored to the call site, e.g., if it is an argument then the argument
1104 ///     (IRP_ARGUMENT)
1105 class SubsumingPositionIterator {
1106   SmallVector<IRPosition, 4> IRPositions;
1107   using iterator = decltype(IRPositions)::iterator;
1108 
1109 public:
1110   SubsumingPositionIterator(const IRPosition &IRP);
1111   iterator begin() { return IRPositions.begin(); }
1112   iterator end() { return IRPositions.end(); }
1113 };
1114 
1115 /// Wrapper for FunctionAnalysisManager.
1116 struct AnalysisGetter {
1117   // The client may be running the old pass manager, in which case, we need to
1118   // map the requested Analysis to its equivalent wrapper in the old pass
1119   // manager. The scheme implemented here does not require every Analysis to be
1120   // updated. Only those new analyses that the client cares about in the old
1121   // pass manager need to expose a LegacyWrapper type, and that wrapper should
1122   // support a getResult() method that matches the new Analysis.
1123   //
1124   // We need SFINAE to check for the LegacyWrapper, but function templates don't
1125   // allow partial specialization, which is needed in this case. So instead, we
1126   // use a constexpr bool to perform the SFINAE, and then use this information
1127   // inside the function template.
1128   template <typename, typename = void>
1129   static constexpr bool HasLegacyWrapper = false;
1130 
1131   template <typename Analysis>
1132   typename Analysis::Result *getAnalysis(const Function &F,
1133                                          bool RequestCachedOnly = false) {
1134     if (!LegacyPass && !FAM)
1135       return nullptr;
1136     if (FAM) {
1137       if (CachedOnly || RequestCachedOnly)
1138         return FAM->getCachedResult<Analysis>(const_cast<Function &>(F));
1139       return &FAM->getResult<Analysis>(const_cast<Function &>(F));
1140     }
1141     if constexpr (HasLegacyWrapper<Analysis>) {
1142       if (!CachedOnly && !RequestCachedOnly)
1143         return &LegacyPass
1144                     ->getAnalysis<typename Analysis::LegacyWrapper>(
1145                         const_cast<Function &>(F))
1146                     .getResult();
1147       if (auto *P =
1148               LegacyPass
1149                   ->getAnalysisIfAvailable<typename Analysis::LegacyWrapper>())
1150         return &P->getResult();
1151     }
1152     return nullptr;
1153   }
1154 
1155   AnalysisGetter(FunctionAnalysisManager &FAM, bool CachedOnly = false)
1156       : FAM(&FAM), CachedOnly(CachedOnly) {}
1157   AnalysisGetter(Pass *P, bool CachedOnly = false)
1158       : LegacyPass(P), CachedOnly(CachedOnly) {}
1159   AnalysisGetter() = default;
1160 
1161 private:
1162   FunctionAnalysisManager *FAM = nullptr;
1163   Pass *LegacyPass = nullptr;
1164 
1165   /// If \p CachedOnly is true, no pass is created, just existing results are
1166   /// used. Also available per request.
1167   bool CachedOnly = false;
1168 };
1169 
1170 template <typename Analysis>
1171 constexpr bool AnalysisGetter::HasLegacyWrapper<
1172     Analysis, std::void_t<typename Analysis::LegacyWrapper>> = true;
1173 
1174 /// Data structure to hold cached (LLVM-IR) information.
1175 ///
1176 /// All attributes are given an InformationCache object at creation time to
1177 /// avoid inspection of the IR by all of them individually. This default
1178 /// InformationCache will hold information required by 'default' attributes,
1179 /// thus the ones deduced when Attributor::identifyDefaultAbstractAttributes(..)
1180 /// is called.
1181 ///
1182 /// If custom abstract attributes, registered manually through
1183 /// Attributor::registerAA(...), need more information, especially if it is not
1184 /// reusable, it is advised to inherit from the InformationCache and cast the
1185 /// instance down in the abstract attributes.
1186 struct InformationCache {
1187   InformationCache(const Module &M, AnalysisGetter &AG,
1188                    BumpPtrAllocator &Allocator, SetVector<Function *> *CGSCC,
1189                    bool UseExplorer = true)
1190       : CGSCC(CGSCC), DL(M.getDataLayout()), Allocator(Allocator), AG(AG),
1191         TargetTriple(M.getTargetTriple()) {
1192     if (UseExplorer)
1193       Explorer = new (Allocator) MustBeExecutedContextExplorer(
1194           /* ExploreInterBlock */ true, /* ExploreCFGForward */ true,
1195           /* ExploreCFGBackward */ true,
1196           /* LIGetter */
1197           [&](const Function &F) { return AG.getAnalysis<LoopAnalysis>(F); },
1198           /* DTGetter */
1199           [&](const Function &F) {
1200             return AG.getAnalysis<DominatorTreeAnalysis>(F);
1201           },
1202           /* PDTGetter */
1203           [&](const Function &F) {
1204             return AG.getAnalysis<PostDominatorTreeAnalysis>(F);
1205           });
1206   }
1207 
1208   ~InformationCache() {
1209     // The FunctionInfo objects are allocated via a BumpPtrAllocator, we call
1210     // the destructor manually.
1211     for (auto &It : FuncInfoMap)
1212       It.getSecond()->~FunctionInfo();
1213     // Same is true for the instruction exclusions sets.
1214     using AA::InstExclusionSetTy;
1215     for (auto *BES : BESets)
1216       BES->~InstExclusionSetTy();
1217     if (Explorer)
1218       Explorer->~MustBeExecutedContextExplorer();
1219   }
1220 
1221   /// Apply \p CB to all uses of \p F. If \p LookThroughConstantExprUses is
1222   /// true, constant expression users are not given to \p CB but their uses are
1223   /// traversed transitively.
1224   template <typename CBTy>
1225   static void foreachUse(Function &F, CBTy CB,
1226                          bool LookThroughConstantExprUses = true) {
1227     SmallVector<Use *, 8> Worklist(make_pointer_range(F.uses()));
1228 
1229     for (unsigned Idx = 0; Idx < Worklist.size(); ++Idx) {
1230       Use &U = *Worklist[Idx];
1231 
1232       // Allow use in constant bitcasts and simply look through them.
1233       if (LookThroughConstantExprUses && isa<ConstantExpr>(U.getUser())) {
1234         for (Use &CEU : cast<ConstantExpr>(U.getUser())->uses())
1235           Worklist.push_back(&CEU);
1236         continue;
1237       }
1238 
1239       CB(U);
1240     }
1241   }
1242 
1243   /// The CG-SCC the pass is run on, or nullptr if it is a module pass.
1244   const SetVector<Function *> *const CGSCC = nullptr;
1245 
1246   /// A vector type to hold instructions.
1247   using InstructionVectorTy = SmallVector<Instruction *, 8>;
1248 
1249   /// A map type from opcodes to instructions with this opcode.
1250   using OpcodeInstMapTy = DenseMap<unsigned, InstructionVectorTy *>;
1251 
1252   /// Return the map that relates "interesting" opcodes with all instructions
1253   /// with that opcode in \p F.
1254   OpcodeInstMapTy &getOpcodeInstMapForFunction(const Function &F) {
1255     return getFunctionInfo(F).OpcodeInstMap;
1256   }
1257 
1258   /// Return the instructions in \p F that may read or write memory.
1259   InstructionVectorTy &getReadOrWriteInstsForFunction(const Function &F) {
1260     return getFunctionInfo(F).RWInsts;
1261   }
1262 
1263   /// Return MustBeExecutedContextExplorer
1264   MustBeExecutedContextExplorer *getMustBeExecutedContextExplorer() {
1265     return Explorer;
1266   }
1267 
1268   /// Return TargetLibraryInfo for function \p F.
1269   TargetLibraryInfo *getTargetLibraryInfoForFunction(const Function &F) {
1270     return AG.getAnalysis<TargetLibraryAnalysis>(F);
1271   }
1272 
1273   /// Return true if \p Arg is involved in a must-tail call, thus the argument
1274   /// of the caller or callee.
1275   bool isInvolvedInMustTailCall(const Argument &Arg) {
1276     FunctionInfo &FI = getFunctionInfo(*Arg.getParent());
1277     return FI.CalledViaMustTail || FI.ContainsMustTailCall;
1278   }
1279 
1280   bool isOnlyUsedByAssume(const Instruction &I) const {
1281     return AssumeOnlyValues.contains(&I);
1282   }
1283 
1284   /// Return the analysis result from a pass \p AP for function \p F.
1285   template <typename AP>
1286   typename AP::Result *getAnalysisResultForFunction(const Function &F,
1287                                                     bool CachedOnly = false) {
1288     return AG.getAnalysis<AP>(F, CachedOnly);
1289   }
1290 
1291   /// Return datalayout used in the module.
1292   const DataLayout &getDL() { return DL; }
1293 
1294   /// Return the map conaining all the knowledge we have from `llvm.assume`s.
1295   const RetainedKnowledgeMap &getKnowledgeMap() const { return KnowledgeMap; }
1296 
1297   /// Given \p BES, return a uniqued version.
1298   const AA::InstExclusionSetTy *
1299   getOrCreateUniqueBlockExecutionSet(const AA::InstExclusionSetTy *BES) {
1300     auto It = BESets.find(BES);
1301     if (It != BESets.end())
1302       return *It;
1303     auto *UniqueBES = new (Allocator) AA::InstExclusionSetTy(*BES);
1304     bool Success = BESets.insert(UniqueBES).second;
1305     (void)Success;
1306     assert(Success && "Expected only new entries to be added");
1307     return UniqueBES;
1308   }
1309 
1310   /// Return true if the stack (llvm::Alloca) can be accessed by other threads.
1311   bool stackIsAccessibleByOtherThreads() { return !targetIsGPU(); }
1312 
1313   /// Return true if the target is a GPU.
1314   bool targetIsGPU() {
1315     return TargetTriple.isAMDGPU() || TargetTriple.isNVPTX();
1316   }
1317 
1318 private:
1319   struct FunctionInfo {
1320     ~FunctionInfo();
1321 
1322     /// A nested map that remembers all instructions in a function with a
1323     /// certain instruction opcode (Instruction::getOpcode()).
1324     OpcodeInstMapTy OpcodeInstMap;
1325 
1326     /// A map from functions to their instructions that may read or write
1327     /// memory.
1328     InstructionVectorTy RWInsts;
1329 
1330     /// Function is called by a `musttail` call.
1331     bool CalledViaMustTail;
1332 
1333     /// Function contains a `musttail` call.
1334     bool ContainsMustTailCall;
1335   };
1336 
1337   /// A map type from functions to informatio about it.
1338   DenseMap<const Function *, FunctionInfo *> FuncInfoMap;
1339 
1340   /// Return information about the function \p F, potentially by creating it.
1341   FunctionInfo &getFunctionInfo(const Function &F) {
1342     FunctionInfo *&FI = FuncInfoMap[&F];
1343     if (!FI) {
1344       FI = new (Allocator) FunctionInfo();
1345       initializeInformationCache(F, *FI);
1346     }
1347     return *FI;
1348   }
1349 
1350   /// Initialize the function information cache \p FI for the function \p F.
1351   ///
1352   /// This method needs to be called for all function that might be looked at
1353   /// through the information cache interface *prior* to looking at them.
1354   void initializeInformationCache(const Function &F, FunctionInfo &FI);
1355 
1356   /// The datalayout used in the module.
1357   const DataLayout &DL;
1358 
1359   /// The allocator used to allocate memory, e.g. for `FunctionInfo`s.
1360   BumpPtrAllocator &Allocator;
1361 
1362   /// MustBeExecutedContextExplorer
1363   MustBeExecutedContextExplorer *Explorer = nullptr;
1364 
1365   /// A map with knowledge retained in `llvm.assume` instructions.
1366   RetainedKnowledgeMap KnowledgeMap;
1367 
1368   /// A container for all instructions that are only used by `llvm.assume`.
1369   SetVector<const Instruction *> AssumeOnlyValues;
1370 
1371   /// Cache for block sets to allow reuse.
1372   DenseSet<const AA::InstExclusionSetTy *> BESets;
1373 
1374   /// Getters for analysis.
1375   AnalysisGetter &AG;
1376 
1377   /// Set of inlineable functions
1378   SmallPtrSet<const Function *, 8> InlineableFunctions;
1379 
1380   /// The triple describing the target machine.
1381   Triple TargetTriple;
1382 
1383   /// Give the Attributor access to the members so
1384   /// Attributor::identifyDefaultAbstractAttributes(...) can initialize them.
1385   friend struct Attributor;
1386 };
1387 
1388 /// Configuration for the Attributor.
1389 struct AttributorConfig {
1390 
1391   AttributorConfig(CallGraphUpdater &CGUpdater) : CGUpdater(CGUpdater) {}
1392 
1393   /// Is the user of the Attributor a module pass or not. This determines what
1394   /// IR we can look at and modify. If it is a module pass we might deduce facts
1395   /// outside the initial function set and modify functions outside that set,
1396   /// but only as part of the optimization of the functions in the initial
1397   /// function set. For CGSCC passes we can look at the IR of the module slice
1398   /// but never run any deduction, or perform any modification, outside the
1399   /// initial function set (which we assume is the SCC).
1400   bool IsModulePass = true;
1401 
1402   /// Flag to determine if we can delete functions or keep dead ones around.
1403   bool DeleteFns = true;
1404 
1405   /// Flag to determine if we rewrite function signatures.
1406   bool RewriteSignatures = true;
1407 
1408   /// Flag to determine if we want to initialize all default AAs for an internal
1409   /// function marked live. See also: InitializationCallback>
1410   bool DefaultInitializeLiveInternals = true;
1411 
1412   /// Flag to determine if we should skip all liveness checks early on.
1413   bool UseLiveness = true;
1414 
1415   /// Callback function to be invoked on internal functions marked live.
1416   std::function<void(Attributor &A, const Function &F)> InitializationCallback =
1417       nullptr;
1418 
1419   /// Helper to update an underlying call graph and to delete functions.
1420   CallGraphUpdater &CGUpdater;
1421 
1422   /// If not null, a set limiting the attribute opportunities.
1423   DenseSet<const char *> *Allowed = nullptr;
1424 
1425   /// Maximum number of iterations to run until fixpoint.
1426   std::optional<unsigned> MaxFixpointIterations;
1427 
1428   /// A callback function that returns an ORE object from a Function pointer.
1429   ///{
1430   using OptimizationRemarkGetter =
1431       function_ref<OptimizationRemarkEmitter &(Function *)>;
1432   OptimizationRemarkGetter OREGetter = nullptr;
1433   ///}
1434 
1435   /// The name of the pass running the attributor, used to emit remarks.
1436   const char *PassName = nullptr;
1437 
1438   using IPOAmendableCBTy = function_ref<bool(const Function &F)>;
1439   IPOAmendableCBTy IPOAmendableCB;
1440 };
1441 
1442 /// The fixpoint analysis framework that orchestrates the attribute deduction.
1443 ///
1444 /// The Attributor provides a general abstract analysis framework (guided
1445 /// fixpoint iteration) as well as helper functions for the deduction of
1446 /// (LLVM-IR) attributes. However, also other code properties can be deduced,
1447 /// propagated, and ultimately manifested through the Attributor framework. This
1448 /// is particularly useful if these properties interact with attributes and a
1449 /// co-scheduled deduction allows to improve the solution. Even if not, thus if
1450 /// attributes/properties are completely isolated, they should use the
1451 /// Attributor framework to reduce the number of fixpoint iteration frameworks
1452 /// in the code base. Note that the Attributor design makes sure that isolated
1453 /// attributes are not impacted, in any way, by others derived at the same time
1454 /// if there is no cross-reasoning performed.
1455 ///
1456 /// The public facing interface of the Attributor is kept simple and basically
1457 /// allows abstract attributes to one thing, query abstract attributes
1458 /// in-flight. There are two reasons to do this:
1459 ///    a) The optimistic state of one abstract attribute can justify an
1460 ///       optimistic state of another, allowing to framework to end up with an
1461 ///       optimistic (=best possible) fixpoint instead of one based solely on
1462 ///       information in the IR.
1463 ///    b) This avoids reimplementing various kinds of lookups, e.g., to check
1464 ///       for existing IR attributes, in favor of a single lookups interface
1465 ///       provided by an abstract attribute subclass.
1466 ///
1467 /// NOTE: The mechanics of adding a new "concrete" abstract attribute are
1468 ///       described in the file comment.
1469 struct Attributor {
1470 
1471   /// Constructor
1472   ///
1473   /// \param Functions The set of functions we are deriving attributes for.
1474   /// \param InfoCache Cache to hold various information accessible for
1475   ///                  the abstract attributes.
1476   /// \param Configuration The Attributor configuration which determines what
1477   ///                      generic features to use.
1478   Attributor(SetVector<Function *> &Functions, InformationCache &InfoCache,
1479              AttributorConfig Configuration)
1480       : Allocator(InfoCache.Allocator), Functions(Functions),
1481         InfoCache(InfoCache), Configuration(Configuration) {}
1482 
1483   ~Attributor();
1484 
1485   /// Run the analyses until a fixpoint is reached or enforced (timeout).
1486   ///
1487   /// The attributes registered with this Attributor can be used after as long
1488   /// as the Attributor is not destroyed (it owns the attributes now).
1489   ///
1490   /// \Returns CHANGED if the IR was changed, otherwise UNCHANGED.
1491   ChangeStatus run();
1492 
1493   /// Lookup an abstract attribute of type \p AAType at position \p IRP. While
1494   /// no abstract attribute is found equivalent positions are checked, see
1495   /// SubsumingPositionIterator. Thus, the returned abstract attribute
1496   /// might be anchored at a different position, e.g., the callee if \p IRP is a
1497   /// call base.
1498   ///
1499   /// This method is the only (supported) way an abstract attribute can retrieve
1500   /// information from another abstract attribute. As an example, take an
1501   /// abstract attribute that determines the memory access behavior for a
1502   /// argument (readnone, readonly, ...). It should use `getAAFor` to get the
1503   /// most optimistic information for other abstract attributes in-flight, e.g.
1504   /// the one reasoning about the "captured" state for the argument or the one
1505   /// reasoning on the memory access behavior of the function as a whole.
1506   ///
1507   /// If the DepClass enum is set to `DepClassTy::None` the dependence from
1508   /// \p QueryingAA to the return abstract attribute is not automatically
1509   /// recorded. This should only be used if the caller will record the
1510   /// dependence explicitly if necessary, thus if it the returned abstract
1511   /// attribute is used for reasoning. To record the dependences explicitly use
1512   /// the `Attributor::recordDependence` method.
1513   template <typename AAType>
1514   const AAType *getAAFor(const AbstractAttribute &QueryingAA,
1515                          const IRPosition &IRP, DepClassTy DepClass) {
1516     return getOrCreateAAFor<AAType>(IRP, &QueryingAA, DepClass,
1517                                     /* ForceUpdate */ false);
1518   }
1519 
1520   /// Similar to getAAFor but the return abstract attribute will be updated (via
1521   /// `AbstractAttribute::update`) even if it is found in the cache. This is
1522   /// especially useful for AAIsDead as changes in liveness can make updates
1523   /// possible/useful that were not happening before as the abstract attribute
1524   /// was assumed dead.
1525   template <typename AAType>
1526   const AAType *getAndUpdateAAFor(const AbstractAttribute &QueryingAA,
1527                                   const IRPosition &IRP, DepClassTy DepClass) {
1528     return getOrCreateAAFor<AAType>(IRP, &QueryingAA, DepClass,
1529                                     /* ForceUpdate */ true);
1530   }
1531 
1532   /// The version of getAAFor that allows to omit a querying abstract
1533   /// attribute. Using this after Attributor started running is restricted to
1534   /// only the Attributor itself. Initial seeding of AAs can be done via this
1535   /// function.
1536   /// NOTE: ForceUpdate is ignored in any stage other than the update stage.
1537   template <typename AAType>
1538   const AAType *getOrCreateAAFor(IRPosition IRP,
1539                                  const AbstractAttribute *QueryingAA,
1540                                  DepClassTy DepClass, bool ForceUpdate = false,
1541                                  bool UpdateAfterInit = true) {
1542     if (!shouldPropagateCallBaseContext(IRP))
1543       IRP = IRP.stripCallBaseContext();
1544 
1545     if (AAType *AAPtr = lookupAAFor<AAType>(IRP, QueryingAA, DepClass,
1546                                             /* AllowInvalidState */ true)) {
1547       if (ForceUpdate && Phase == AttributorPhase::UPDATE)
1548         updateAA(*AAPtr);
1549       return AAPtr;
1550     }
1551 
1552     bool ShouldUpdateAA;
1553     if (!shouldInitialize<AAType>(IRP, ShouldUpdateAA))
1554       return nullptr;
1555 
1556     // No matching attribute found, create one.
1557     // Use the static create method.
1558     auto &AA = AAType::createForPosition(IRP, *this);
1559 
1560     // Always register a new attribute to make sure we clean up the allocated
1561     // memory properly.
1562     registerAA(AA);
1563 
1564     // If we are currenty seeding attributes, enforce seeding rules.
1565     if (Phase == AttributorPhase::SEEDING && !shouldSeedAttribute(AA)) {
1566       AA.getState().indicatePessimisticFixpoint();
1567       return &AA;
1568     }
1569 
1570     // Bootstrap the new attribute with an initial update to propagate
1571     // information, e.g., function -> call site.
1572     {
1573       TimeTraceScope TimeScope("initialize", [&]() {
1574         return AA.getName() +
1575                std::to_string(AA.getIRPosition().getPositionKind());
1576       });
1577       ++InitializationChainLength;
1578       AA.initialize(*this);
1579       --InitializationChainLength;
1580     }
1581 
1582     if (!ShouldUpdateAA) {
1583       AA.getState().indicatePessimisticFixpoint();
1584       return &AA;
1585     }
1586 
1587     // Allow seeded attributes to declare dependencies.
1588     // Remember the seeding state.
1589     if (UpdateAfterInit) {
1590       AttributorPhase OldPhase = Phase;
1591       Phase = AttributorPhase::UPDATE;
1592 
1593       updateAA(AA);
1594 
1595       Phase = OldPhase;
1596     }
1597 
1598     if (QueryingAA && AA.getState().isValidState())
1599       recordDependence(AA, const_cast<AbstractAttribute &>(*QueryingAA),
1600                        DepClass);
1601     return &AA;
1602   }
1603 
1604   template <typename AAType>
1605   const AAType *getOrCreateAAFor(const IRPosition &IRP) {
1606     return getOrCreateAAFor<AAType>(IRP, /* QueryingAA */ nullptr,
1607                                     DepClassTy::NONE);
1608   }
1609 
1610   /// Return the attribute of \p AAType for \p IRP if existing and valid. This
1611   /// also allows non-AA users lookup.
1612   template <typename AAType>
1613   AAType *lookupAAFor(const IRPosition &IRP,
1614                       const AbstractAttribute *QueryingAA = nullptr,
1615                       DepClassTy DepClass = DepClassTy::OPTIONAL,
1616                       bool AllowInvalidState = false) {
1617     static_assert(std::is_base_of<AbstractAttribute, AAType>::value,
1618                   "Cannot query an attribute with a type not derived from "
1619                   "'AbstractAttribute'!");
1620     // Lookup the abstract attribute of type AAType. If found, return it after
1621     // registering a dependence of QueryingAA on the one returned attribute.
1622     AbstractAttribute *AAPtr = AAMap.lookup({&AAType::ID, IRP});
1623     if (!AAPtr)
1624       return nullptr;
1625 
1626     AAType *AA = static_cast<AAType *>(AAPtr);
1627 
1628     // Do not register a dependence on an attribute with an invalid state.
1629     if (DepClass != DepClassTy::NONE && QueryingAA &&
1630         AA->getState().isValidState())
1631       recordDependence(*AA, const_cast<AbstractAttribute &>(*QueryingAA),
1632                        DepClass);
1633 
1634     // Return nullptr if this attribute has an invalid state.
1635     if (!AllowInvalidState && !AA->getState().isValidState())
1636       return nullptr;
1637     return AA;
1638   }
1639 
1640   /// Allows a query AA to request an update if a new query was received.
1641   void registerForUpdate(AbstractAttribute &AA);
1642 
1643   /// Explicitly record a dependence from \p FromAA to \p ToAA, that is if
1644   /// \p FromAA changes \p ToAA should be updated as well.
1645   ///
1646   /// This method should be used in conjunction with the `getAAFor` method and
1647   /// with the DepClass enum passed to the method set to None. This can
1648   /// be beneficial to avoid false dependences but it requires the users of
1649   /// `getAAFor` to explicitly record true dependences through this method.
1650   /// The \p DepClass flag indicates if the dependence is striclty necessary.
1651   /// That means for required dependences, if \p FromAA changes to an invalid
1652   /// state, \p ToAA can be moved to a pessimistic fixpoint because it required
1653   /// information from \p FromAA but none are available anymore.
1654   void recordDependence(const AbstractAttribute &FromAA,
1655                         const AbstractAttribute &ToAA, DepClassTy DepClass);
1656 
1657   /// Introduce a new abstract attribute into the fixpoint analysis.
1658   ///
1659   /// Note that ownership of the attribute is given to the Attributor. It will
1660   /// invoke delete for the Attributor on destruction of the Attributor.
1661   ///
1662   /// Attributes are identified by their IR position (AAType::getIRPosition())
1663   /// and the address of their static member (see AAType::ID).
1664   template <typename AAType> AAType &registerAA(AAType &AA) {
1665     static_assert(std::is_base_of<AbstractAttribute, AAType>::value,
1666                   "Cannot register an attribute with a type not derived from "
1667                   "'AbstractAttribute'!");
1668     // Put the attribute in the lookup map structure and the container we use to
1669     // keep track of all attributes.
1670     const IRPosition &IRP = AA.getIRPosition();
1671     AbstractAttribute *&AAPtr = AAMap[{&AAType::ID, IRP}];
1672 
1673     assert(!AAPtr && "Attribute already in map!");
1674     AAPtr = &AA;
1675 
1676     // Register AA with the synthetic root only before the manifest stage.
1677     if (Phase == AttributorPhase::SEEDING || Phase == AttributorPhase::UPDATE)
1678       DG.SyntheticRoot.Deps.insert(
1679           AADepGraphNode::DepTy(&AA, unsigned(DepClassTy::REQUIRED)));
1680 
1681     return AA;
1682   }
1683 
1684   /// Return the internal information cache.
1685   InformationCache &getInfoCache() { return InfoCache; }
1686 
1687   /// Return true if this is a module pass, false otherwise.
1688   bool isModulePass() const { return Configuration.IsModulePass; }
1689 
1690   /// Return true if we derive attributes for \p Fn
1691   bool isRunOn(Function &Fn) const { return isRunOn(&Fn); }
1692   bool isRunOn(Function *Fn) const {
1693     return Functions.empty() || Functions.count(Fn);
1694   }
1695 
1696   template <typename AAType> bool shouldUpdateAA(const IRPosition &IRP) {
1697     // If this is queried in the manifest stage, we force the AA to indicate
1698     // pessimistic fixpoint immediately.
1699     if (Phase == AttributorPhase::MANIFEST || Phase == AttributorPhase::CLEANUP)
1700       return false;
1701 
1702     Function *AssociatedFn = IRP.getAssociatedFunction();
1703 
1704     // Check if we require a callee but there is none.
1705     if (!AssociatedFn && AAType::requiresCalleeForCallBase() &&
1706         IRP.isAnyCallSitePosition())
1707       return false;
1708 
1709     // Check if we require a calles but we can't see all.
1710     if (AAType::requiresCallersForArgOrFunction())
1711       if (IRP.getPositionKind() == IRPosition::IRP_FUNCTION ||
1712           IRP.getPositionKind() == IRPosition::IRP_ARGUMENT)
1713         if (!AssociatedFn->hasLocalLinkage())
1714           return false;
1715 
1716     if (!AAType::isValidIRPositionForUpdate(*this, IRP))
1717       return false;
1718 
1719     // We update only AAs associated with functions in the Functions set or
1720     // call sites of them.
1721     return (!AssociatedFn || isModulePass() || isRunOn(AssociatedFn) ||
1722             isRunOn(IRP.getAnchorScope()));
1723   }
1724 
1725   template <typename AAType>
1726   bool shouldInitialize(const IRPosition &IRP, bool &ShouldUpdateAA) {
1727     if (!AAType::isValidIRPositionForInit(*this, IRP))
1728       return false;
1729 
1730     if (Configuration.Allowed && !Configuration.Allowed->count(&AAType::ID))
1731       return false;
1732 
1733     // For now we skip anything in naked and optnone functions.
1734     const Function *AnchorFn = IRP.getAnchorScope();
1735     if (AnchorFn && (AnchorFn->hasFnAttribute(Attribute::Naked) ||
1736                      AnchorFn->hasFnAttribute(Attribute::OptimizeNone)))
1737       return false;
1738 
1739     // Avoid too many nested initializations to prevent a stack overflow.
1740     if (InitializationChainLength > MaxInitializationChainLength)
1741       return false;
1742 
1743     ShouldUpdateAA = shouldUpdateAA<AAType>(IRP);
1744 
1745     return !AAType::hasTrivialInitializer() || ShouldUpdateAA;
1746   }
1747 
1748   /// Determine opportunities to derive 'default' attributes in \p F and create
1749   /// abstract attribute objects for them.
1750   ///
1751   /// \param F The function that is checked for attribute opportunities.
1752   ///
1753   /// Note that abstract attribute instances are generally created even if the
1754   /// IR already contains the information they would deduce. The most important
1755   /// reason for this is the single interface, the one of the abstract attribute
1756   /// instance, which can be queried without the need to look at the IR in
1757   /// various places.
1758   void identifyDefaultAbstractAttributes(Function &F);
1759 
1760   /// Determine whether the function \p F is IPO amendable
1761   ///
1762   /// If a function is exactly defined or it has alwaysinline attribute
1763   /// and is viable to be inlined, we say it is IPO amendable
1764   bool isFunctionIPOAmendable(const Function &F) {
1765     return F.hasExactDefinition() || InfoCache.InlineableFunctions.count(&F) ||
1766            (Configuration.IPOAmendableCB && Configuration.IPOAmendableCB(F));
1767   }
1768 
1769   /// Mark the internal function \p F as live.
1770   ///
1771   /// This will trigger the identification and initialization of attributes for
1772   /// \p F.
1773   void markLiveInternalFunction(const Function &F) {
1774     assert(F.hasLocalLinkage() &&
1775            "Only local linkage is assumed dead initially.");
1776 
1777     if (Configuration.DefaultInitializeLiveInternals)
1778       identifyDefaultAbstractAttributes(const_cast<Function &>(F));
1779     if (Configuration.InitializationCallback)
1780       Configuration.InitializationCallback(*this, F);
1781   }
1782 
1783   /// Helper function to remove callsite.
1784   void removeCallSite(CallInst *CI) {
1785     if (!CI)
1786       return;
1787 
1788     Configuration.CGUpdater.removeCallSite(*CI);
1789   }
1790 
1791   /// Record that \p U is to be replaces with \p NV after information was
1792   /// manifested. This also triggers deletion of trivially dead istructions.
1793   bool changeUseAfterManifest(Use &U, Value &NV) {
1794     Value *&V = ToBeChangedUses[&U];
1795     if (V && (V->stripPointerCasts() == NV.stripPointerCasts() ||
1796               isa_and_nonnull<UndefValue>(V)))
1797       return false;
1798     assert((!V || V == &NV || isa<UndefValue>(NV)) &&
1799            "Use was registered twice for replacement with different values!");
1800     V = &NV;
1801     return true;
1802   }
1803 
1804   /// Helper function to replace all uses associated with \p IRP with \p NV.
1805   /// Return true if there is any change. The flag \p ChangeDroppable indicates
1806   /// if dropppable uses should be changed too.
1807   bool changeAfterManifest(const IRPosition IRP, Value &NV,
1808                            bool ChangeDroppable = true) {
1809     if (IRP.getPositionKind() == IRPosition::IRP_CALL_SITE_ARGUMENT) {
1810       auto *CB = cast<CallBase>(IRP.getCtxI());
1811       return changeUseAfterManifest(
1812           CB->getArgOperandUse(IRP.getCallSiteArgNo()), NV);
1813     }
1814     Value &V = IRP.getAssociatedValue();
1815     auto &Entry = ToBeChangedValues[&V];
1816     Value *CurNV = get<0>(Entry);
1817     if (CurNV && (CurNV->stripPointerCasts() == NV.stripPointerCasts() ||
1818                   isa<UndefValue>(CurNV)))
1819       return false;
1820     assert((!CurNV || CurNV == &NV || isa<UndefValue>(NV)) &&
1821            "Value replacement was registered twice with different values!");
1822     Entry = {&NV, ChangeDroppable};
1823     return true;
1824   }
1825 
1826   /// Record that \p I is to be replaced with `unreachable` after information
1827   /// was manifested.
1828   void changeToUnreachableAfterManifest(Instruction *I) {
1829     ToBeChangedToUnreachableInsts.insert(I);
1830   }
1831 
1832   /// Record that \p II has at least one dead successor block. This information
1833   /// is used, e.g., to replace \p II with a call, after information was
1834   /// manifested.
1835   void registerInvokeWithDeadSuccessor(InvokeInst &II) {
1836     InvokeWithDeadSuccessor.insert(&II);
1837   }
1838 
1839   /// Record that \p I is deleted after information was manifested. This also
1840   /// triggers deletion of trivially dead istructions.
1841   void deleteAfterManifest(Instruction &I) { ToBeDeletedInsts.insert(&I); }
1842 
1843   /// Record that \p BB is deleted after information was manifested. This also
1844   /// triggers deletion of trivially dead istructions.
1845   void deleteAfterManifest(BasicBlock &BB) { ToBeDeletedBlocks.insert(&BB); }
1846 
1847   // Record that \p BB is added during the manifest of an AA. Added basic blocks
1848   // are preserved in the IR.
1849   void registerManifestAddedBasicBlock(BasicBlock &BB) {
1850     ManifestAddedBlocks.insert(&BB);
1851   }
1852 
1853   /// Record that \p F is deleted after information was manifested.
1854   void deleteAfterManifest(Function &F) {
1855     if (Configuration.DeleteFns)
1856       ToBeDeletedFunctions.insert(&F);
1857   }
1858 
1859   /// Return the attributes of kind \p AK existing in the IR as operand bundles
1860   /// of an llvm.assume.
1861   bool getAttrsFromAssumes(const IRPosition &IRP, Attribute::AttrKind AK,
1862                            SmallVectorImpl<Attribute> &Attrs);
1863 
1864   /// Return true if any kind in \p AKs existing in the IR at a position that
1865   /// will affect this one. See also getAttrs(...).
1866   /// \param IgnoreSubsumingPositions Flag to determine if subsuming positions,
1867   ///                                 e.g., the function position if this is an
1868   ///                                 argument position, should be ignored.
1869   bool hasAttr(const IRPosition &IRP, ArrayRef<Attribute::AttrKind> AKs,
1870                bool IgnoreSubsumingPositions = false,
1871                Attribute::AttrKind ImpliedAttributeKind = Attribute::None);
1872 
1873   /// Return the attributes of any kind in \p AKs existing in the IR at a
1874   /// position that will affect this one. While each position can only have a
1875   /// single attribute of any kind in \p AKs, there are "subsuming" positions
1876   /// that could have an attribute as well. This method returns all attributes
1877   /// found in \p Attrs.
1878   /// \param IgnoreSubsumingPositions Flag to determine if subsuming positions,
1879   ///                                 e.g., the function position if this is an
1880   ///                                 argument position, should be ignored.
1881   void getAttrs(const IRPosition &IRP, ArrayRef<Attribute::AttrKind> AKs,
1882                 SmallVectorImpl<Attribute> &Attrs,
1883                 bool IgnoreSubsumingPositions = false);
1884 
1885   /// Remove all \p AttrKinds attached to \p IRP.
1886   ChangeStatus removeAttrs(const IRPosition &IRP,
1887                            const ArrayRef<Attribute::AttrKind> &AttrKinds);
1888 
1889   /// Attach \p DeducedAttrs to \p IRP, if \p ForceReplace is set we do this
1890   /// even if the same attribute kind was already present.
1891   ChangeStatus manifestAttrs(const IRPosition &IRP,
1892                              const ArrayRef<Attribute> &DeducedAttrs,
1893                              bool ForceReplace = false);
1894 
1895 private:
1896   /// Helper to check \p Attrs for \p AK, if not found, check if \p
1897   /// AAType::isImpliedByIR is true, and if not, create AAType for \p IRP.
1898   template <Attribute::AttrKind AK, typename AAType>
1899   void checkAndQueryIRAttr(const IRPosition &IRP, AttributeSet Attrs);
1900 
1901   /// Helper to apply \p CB on all attributes of type \p AttrDescs of \p IRP.
1902   template <typename DescTy>
1903   ChangeStatus updateAttrMap(const IRPosition &IRP,
1904                              const ArrayRef<DescTy> &AttrDescs,
1905                              function_ref<bool(const DescTy &, AttributeSet,
1906                                                AttributeMask &, AttrBuilder &)>
1907                                  CB);
1908 
1909   /// Mapping from functions/call sites to their attributes.
1910   DenseMap<Value *, AttributeList> AttrsMap;
1911 
1912 public:
1913   /// If \p IRP is assumed to be a constant, return it, if it is unclear yet,
1914   /// return std::nullopt, otherwise return `nullptr`.
1915   std::optional<Constant *> getAssumedConstant(const IRPosition &IRP,
1916                                                const AbstractAttribute &AA,
1917                                                bool &UsedAssumedInformation);
1918   std::optional<Constant *> getAssumedConstant(const Value &V,
1919                                                const AbstractAttribute &AA,
1920                                                bool &UsedAssumedInformation) {
1921     return getAssumedConstant(IRPosition::value(V), AA, UsedAssumedInformation);
1922   }
1923 
1924   /// If \p V is assumed simplified, return it, if it is unclear yet,
1925   /// return std::nullopt, otherwise return `nullptr`.
1926   std::optional<Value *> getAssumedSimplified(const IRPosition &IRP,
1927                                               const AbstractAttribute &AA,
1928                                               bool &UsedAssumedInformation,
1929                                               AA::ValueScope S) {
1930     return getAssumedSimplified(IRP, &AA, UsedAssumedInformation, S);
1931   }
1932   std::optional<Value *> getAssumedSimplified(const Value &V,
1933                                               const AbstractAttribute &AA,
1934                                               bool &UsedAssumedInformation,
1935                                               AA::ValueScope S) {
1936     return getAssumedSimplified(IRPosition::value(V), AA,
1937                                 UsedAssumedInformation, S);
1938   }
1939 
1940   /// If \p V is assumed simplified, return it, if it is unclear yet,
1941   /// return std::nullopt, otherwise return `nullptr`. Same as the public
1942   /// version except that it can be used without recording dependences on any \p
1943   /// AA.
1944   std::optional<Value *> getAssumedSimplified(const IRPosition &V,
1945                                               const AbstractAttribute *AA,
1946                                               bool &UsedAssumedInformation,
1947                                               AA::ValueScope S);
1948 
1949   /// Try to simplify \p IRP and in the scope \p S. If successful, true is
1950   /// returned and all potential values \p IRP can take are put into \p Values.
1951   /// If the result in \p Values contains select or PHI instructions it means
1952   /// those could not be simplified to a single value. Recursive calls with
1953   /// these instructions will yield their respective potential values. If false
1954   /// is returned no other information is valid.
1955   bool getAssumedSimplifiedValues(const IRPosition &IRP,
1956                                   const AbstractAttribute *AA,
1957                                   SmallVectorImpl<AA::ValueAndContext> &Values,
1958                                   AA::ValueScope S,
1959                                   bool &UsedAssumedInformation,
1960                                   bool RecurseForSelectAndPHI = true);
1961 
1962   /// Register \p CB as a simplification callback.
1963   /// `Attributor::getAssumedSimplified` will use these callbacks before
1964   /// we it will ask `AAValueSimplify`. It is important to ensure this
1965   /// is called before `identifyDefaultAbstractAttributes`, assuming the
1966   /// latter is called at all.
1967   using SimplifictionCallbackTy = std::function<std::optional<Value *>(
1968       const IRPosition &, const AbstractAttribute *, bool &)>;
1969   void registerSimplificationCallback(const IRPosition &IRP,
1970                                       const SimplifictionCallbackTy &CB) {
1971     SimplificationCallbacks[IRP].emplace_back(CB);
1972   }
1973 
1974   /// Return true if there is a simplification callback for \p IRP.
1975   bool hasSimplificationCallback(const IRPosition &IRP) {
1976     return SimplificationCallbacks.count(IRP);
1977   }
1978 
1979   /// Register \p CB as a simplification callback.
1980   /// Similar to \p registerSimplificationCallback, the call back will be called
1981   /// first when we simplify a global variable \p GV.
1982   using GlobalVariableSimplifictionCallbackTy =
1983       std::function<std::optional<Constant *>(
1984           const GlobalVariable &, const AbstractAttribute *, bool &)>;
1985   void registerGlobalVariableSimplificationCallback(
1986       const GlobalVariable &GV,
1987       const GlobalVariableSimplifictionCallbackTy &CB) {
1988     GlobalVariableSimplificationCallbacks[&GV].emplace_back(CB);
1989   }
1990 
1991   /// Return true if there is a simplification callback for \p GV.
1992   bool hasGlobalVariableSimplificationCallback(const GlobalVariable &GV) {
1993     return GlobalVariableSimplificationCallbacks.count(&GV);
1994   }
1995 
1996   /// Return \p std::nullopt if there is no call back registered for \p GV or
1997   /// the call back is still not sure if \p GV can be simplified. Return \p
1998   /// nullptr if \p GV can't be simplified.
1999   std::optional<Constant *>
2000   getAssumedInitializerFromCallBack(const GlobalVariable &GV,
2001                                     const AbstractAttribute *AA,
2002                                     bool &UsedAssumedInformation) {
2003     assert(GlobalVariableSimplificationCallbacks.contains(&GV));
2004     for (auto &CB : GlobalVariableSimplificationCallbacks.lookup(&GV)) {
2005       auto SimplifiedGV = CB(GV, AA, UsedAssumedInformation);
2006       // For now we assume the call back will not return a std::nullopt.
2007       assert(SimplifiedGV.has_value() && "SimplifiedGV has not value");
2008       return *SimplifiedGV;
2009     }
2010     llvm_unreachable("there must be a callback registered");
2011   }
2012 
2013   using VirtualUseCallbackTy =
2014       std::function<bool(Attributor &, const AbstractAttribute *)>;
2015   void registerVirtualUseCallback(const Value &V,
2016                                   const VirtualUseCallbackTy &CB) {
2017     VirtualUseCallbacks[&V].emplace_back(CB);
2018   }
2019 
2020 private:
2021   /// The vector with all simplification callbacks registered by outside AAs.
2022   DenseMap<IRPosition, SmallVector<SimplifictionCallbackTy, 1>>
2023       SimplificationCallbacks;
2024 
2025   /// The vector with all simplification callbacks for global variables
2026   /// registered by outside AAs.
2027   DenseMap<const GlobalVariable *,
2028            SmallVector<GlobalVariableSimplifictionCallbackTy, 1>>
2029       GlobalVariableSimplificationCallbacks;
2030 
2031   DenseMap<const Value *, SmallVector<VirtualUseCallbackTy, 1>>
2032       VirtualUseCallbacks;
2033 
2034 public:
2035   /// Translate \p V from the callee context into the call site context.
2036   std::optional<Value *>
2037   translateArgumentToCallSiteContent(std::optional<Value *> V, CallBase &CB,
2038                                      const AbstractAttribute &AA,
2039                                      bool &UsedAssumedInformation);
2040 
2041   /// Return true if \p AA (or its context instruction) is assumed dead.
2042   ///
2043   /// If \p LivenessAA is not provided it is queried.
2044   bool isAssumedDead(const AbstractAttribute &AA, const AAIsDead *LivenessAA,
2045                      bool &UsedAssumedInformation,
2046                      bool CheckBBLivenessOnly = false,
2047                      DepClassTy DepClass = DepClassTy::OPTIONAL);
2048 
2049   /// Return true if \p I is assumed dead.
2050   ///
2051   /// If \p LivenessAA is not provided it is queried.
2052   bool isAssumedDead(const Instruction &I, const AbstractAttribute *QueryingAA,
2053                      const AAIsDead *LivenessAA, bool &UsedAssumedInformation,
2054                      bool CheckBBLivenessOnly = false,
2055                      DepClassTy DepClass = DepClassTy::OPTIONAL,
2056                      bool CheckForDeadStore = false);
2057 
2058   /// Return true if \p U is assumed dead.
2059   ///
2060   /// If \p FnLivenessAA is not provided it is queried.
2061   bool isAssumedDead(const Use &U, const AbstractAttribute *QueryingAA,
2062                      const AAIsDead *FnLivenessAA, bool &UsedAssumedInformation,
2063                      bool CheckBBLivenessOnly = false,
2064                      DepClassTy DepClass = DepClassTy::OPTIONAL);
2065 
2066   /// Return true if \p IRP is assumed dead.
2067   ///
2068   /// If \p FnLivenessAA is not provided it is queried.
2069   bool isAssumedDead(const IRPosition &IRP, const AbstractAttribute *QueryingAA,
2070                      const AAIsDead *FnLivenessAA, bool &UsedAssumedInformation,
2071                      bool CheckBBLivenessOnly = false,
2072                      DepClassTy DepClass = DepClassTy::OPTIONAL);
2073 
2074   /// Return true if \p BB is assumed dead.
2075   ///
2076   /// If \p LivenessAA is not provided it is queried.
2077   bool isAssumedDead(const BasicBlock &BB, const AbstractAttribute *QueryingAA,
2078                      const AAIsDead *FnLivenessAA,
2079                      DepClassTy DepClass = DepClassTy::OPTIONAL);
2080 
2081   /// Check \p Pred on all (transitive) uses of \p V.
2082   ///
2083   /// This method will evaluate \p Pred on all (transitive) uses of the
2084   /// associated value and return true if \p Pred holds every time.
2085   /// If uses are skipped in favor of equivalent ones, e.g., if we look through
2086   /// memory, the \p EquivalentUseCB will be used to give the caller an idea
2087   /// what original used was replaced by a new one (or new ones). The visit is
2088   /// cut short if \p EquivalentUseCB returns false and the function will return
2089   /// false as well.
2090   bool checkForAllUses(function_ref<bool(const Use &, bool &)> Pred,
2091                        const AbstractAttribute &QueryingAA, const Value &V,
2092                        bool CheckBBLivenessOnly = false,
2093                        DepClassTy LivenessDepClass = DepClassTy::OPTIONAL,
2094                        bool IgnoreDroppableUses = true,
2095                        function_ref<bool(const Use &OldU, const Use &NewU)>
2096                            EquivalentUseCB = nullptr);
2097 
2098   /// Emit a remark generically.
2099   ///
2100   /// This template function can be used to generically emit a remark. The
2101   /// RemarkKind should be one of the following:
2102   ///   - OptimizationRemark to indicate a successful optimization attempt
2103   ///   - OptimizationRemarkMissed to report a failed optimization attempt
2104   ///   - OptimizationRemarkAnalysis to provide additional information about an
2105   ///     optimization attempt
2106   ///
2107   /// The remark is built using a callback function \p RemarkCB that takes a
2108   /// RemarkKind as input and returns a RemarkKind.
2109   template <typename RemarkKind, typename RemarkCallBack>
2110   void emitRemark(Instruction *I, StringRef RemarkName,
2111                   RemarkCallBack &&RemarkCB) const {
2112     if (!Configuration.OREGetter)
2113       return;
2114 
2115     Function *F = I->getFunction();
2116     auto &ORE = Configuration.OREGetter(F);
2117 
2118     if (RemarkName.startswith("OMP"))
2119       ORE.emit([&]() {
2120         return RemarkCB(RemarkKind(Configuration.PassName, RemarkName, I))
2121                << " [" << RemarkName << "]";
2122       });
2123     else
2124       ORE.emit([&]() {
2125         return RemarkCB(RemarkKind(Configuration.PassName, RemarkName, I));
2126       });
2127   }
2128 
2129   /// Emit a remark on a function.
2130   template <typename RemarkKind, typename RemarkCallBack>
2131   void emitRemark(Function *F, StringRef RemarkName,
2132                   RemarkCallBack &&RemarkCB) const {
2133     if (!Configuration.OREGetter)
2134       return;
2135 
2136     auto &ORE = Configuration.OREGetter(F);
2137 
2138     if (RemarkName.startswith("OMP"))
2139       ORE.emit([&]() {
2140         return RemarkCB(RemarkKind(Configuration.PassName, RemarkName, F))
2141                << " [" << RemarkName << "]";
2142       });
2143     else
2144       ORE.emit([&]() {
2145         return RemarkCB(RemarkKind(Configuration.PassName, RemarkName, F));
2146       });
2147   }
2148 
2149   /// Helper struct used in the communication between an abstract attribute (AA)
2150   /// that wants to change the signature of a function and the Attributor which
2151   /// applies the changes. The struct is partially initialized with the
2152   /// information from the AA (see the constructor). All other members are
2153   /// provided by the Attributor prior to invoking any callbacks.
2154   struct ArgumentReplacementInfo {
2155     /// Callee repair callback type
2156     ///
2157     /// The function repair callback is invoked once to rewire the replacement
2158     /// arguments in the body of the new function. The argument replacement info
2159     /// is passed, as build from the registerFunctionSignatureRewrite call, as
2160     /// well as the replacement function and an iteratore to the first
2161     /// replacement argument.
2162     using CalleeRepairCBTy = std::function<void(
2163         const ArgumentReplacementInfo &, Function &, Function::arg_iterator)>;
2164 
2165     /// Abstract call site (ACS) repair callback type
2166     ///
2167     /// The abstract call site repair callback is invoked once on every abstract
2168     /// call site of the replaced function (\see ReplacedFn). The callback needs
2169     /// to provide the operands for the call to the new replacement function.
2170     /// The number and type of the operands appended to the provided vector
2171     /// (second argument) is defined by the number and types determined through
2172     /// the replacement type vector (\see ReplacementTypes). The first argument
2173     /// is the ArgumentReplacementInfo object registered with the Attributor
2174     /// through the registerFunctionSignatureRewrite call.
2175     using ACSRepairCBTy =
2176         std::function<void(const ArgumentReplacementInfo &, AbstractCallSite,
2177                            SmallVectorImpl<Value *> &)>;
2178 
2179     /// Simple getters, see the corresponding members for details.
2180     ///{
2181 
2182     Attributor &getAttributor() const { return A; }
2183     const Function &getReplacedFn() const { return ReplacedFn; }
2184     const Argument &getReplacedArg() const { return ReplacedArg; }
2185     unsigned getNumReplacementArgs() const { return ReplacementTypes.size(); }
2186     const SmallVectorImpl<Type *> &getReplacementTypes() const {
2187       return ReplacementTypes;
2188     }
2189 
2190     ///}
2191 
2192   private:
2193     /// Constructor that takes the argument to be replaced, the types of
2194     /// the replacement arguments, as well as callbacks to repair the call sites
2195     /// and new function after the replacement happened.
2196     ArgumentReplacementInfo(Attributor &A, Argument &Arg,
2197                             ArrayRef<Type *> ReplacementTypes,
2198                             CalleeRepairCBTy &&CalleeRepairCB,
2199                             ACSRepairCBTy &&ACSRepairCB)
2200         : A(A), ReplacedFn(*Arg.getParent()), ReplacedArg(Arg),
2201           ReplacementTypes(ReplacementTypes.begin(), ReplacementTypes.end()),
2202           CalleeRepairCB(std::move(CalleeRepairCB)),
2203           ACSRepairCB(std::move(ACSRepairCB)) {}
2204 
2205     /// Reference to the attributor to allow access from the callbacks.
2206     Attributor &A;
2207 
2208     /// The "old" function replaced by ReplacementFn.
2209     const Function &ReplacedFn;
2210 
2211     /// The "old" argument replaced by new ones defined via ReplacementTypes.
2212     const Argument &ReplacedArg;
2213 
2214     /// The types of the arguments replacing ReplacedArg.
2215     const SmallVector<Type *, 8> ReplacementTypes;
2216 
2217     /// Callee repair callback, see CalleeRepairCBTy.
2218     const CalleeRepairCBTy CalleeRepairCB;
2219 
2220     /// Abstract call site (ACS) repair callback, see ACSRepairCBTy.
2221     const ACSRepairCBTy ACSRepairCB;
2222 
2223     /// Allow access to the private members from the Attributor.
2224     friend struct Attributor;
2225   };
2226 
2227   /// Check if we can rewrite a function signature.
2228   ///
2229   /// The argument \p Arg is replaced with new ones defined by the number,
2230   /// order, and types in \p ReplacementTypes.
2231   ///
2232   /// \returns True, if the replacement can be registered, via
2233   /// registerFunctionSignatureRewrite, false otherwise.
2234   bool isValidFunctionSignatureRewrite(Argument &Arg,
2235                                        ArrayRef<Type *> ReplacementTypes);
2236 
2237   /// Register a rewrite for a function signature.
2238   ///
2239   /// The argument \p Arg is replaced with new ones defined by the number,
2240   /// order, and types in \p ReplacementTypes. The rewiring at the call sites is
2241   /// done through \p ACSRepairCB and at the callee site through
2242   /// \p CalleeRepairCB.
2243   ///
2244   /// \returns True, if the replacement was registered, false otherwise.
2245   bool registerFunctionSignatureRewrite(
2246       Argument &Arg, ArrayRef<Type *> ReplacementTypes,
2247       ArgumentReplacementInfo::CalleeRepairCBTy &&CalleeRepairCB,
2248       ArgumentReplacementInfo::ACSRepairCBTy &&ACSRepairCB);
2249 
2250   /// Check \p Pred on all function call sites.
2251   ///
2252   /// This method will evaluate \p Pred on call sites and return
2253   /// true if \p Pred holds in every call sites. However, this is only possible
2254   /// all call sites are known, hence the function has internal linkage.
2255   /// If true is returned, \p UsedAssumedInformation is set if assumed
2256   /// information was used to skip or simplify potential call sites.
2257   bool checkForAllCallSites(function_ref<bool(AbstractCallSite)> Pred,
2258                             const AbstractAttribute &QueryingAA,
2259                             bool RequireAllCallSites,
2260                             bool &UsedAssumedInformation);
2261 
2262   /// Check \p Pred on all call sites of \p Fn.
2263   ///
2264   /// This method will evaluate \p Pred on call sites and return
2265   /// true if \p Pred holds in every call sites. However, this is only possible
2266   /// all call sites are known, hence the function has internal linkage.
2267   /// If true is returned, \p UsedAssumedInformation is set if assumed
2268   /// information was used to skip or simplify potential call sites.
2269   bool checkForAllCallSites(function_ref<bool(AbstractCallSite)> Pred,
2270                             const Function &Fn, bool RequireAllCallSites,
2271                             const AbstractAttribute *QueryingAA,
2272                             bool &UsedAssumedInformation,
2273                             bool CheckPotentiallyDead = false);
2274 
2275   /// Check \p Pred on all values potentially returned by the function
2276   /// associated with \p QueryingAA.
2277   ///
2278   /// This is the context insensitive version of the method above.
2279   bool
2280   checkForAllReturnedValues(function_ref<bool(Value &)> Pred,
2281                             const AbstractAttribute &QueryingAA,
2282                             AA::ValueScope S = AA::ValueScope::Intraprocedural,
2283                             bool RecurseForSelectAndPHI = true);
2284 
2285   /// Check \p Pred on all instructions in \p Fn with an opcode present in
2286   /// \p Opcodes.
2287   ///
2288   /// This method will evaluate \p Pred on all instructions with an opcode
2289   /// present in \p Opcode and return true if \p Pred holds on all of them.
2290   bool checkForAllInstructions(function_ref<bool(Instruction &)> Pred,
2291                                const Function *Fn,
2292                                const AbstractAttribute &QueryingAA,
2293                                const ArrayRef<unsigned> &Opcodes,
2294                                bool &UsedAssumedInformation,
2295                                bool CheckBBLivenessOnly = false,
2296                                bool CheckPotentiallyDead = false);
2297 
2298   /// Check \p Pred on all instructions with an opcode present in \p Opcodes.
2299   ///
2300   /// This method will evaluate \p Pred on all instructions with an opcode
2301   /// present in \p Opcode and return true if \p Pred holds on all of them.
2302   bool checkForAllInstructions(function_ref<bool(Instruction &)> Pred,
2303                                const AbstractAttribute &QueryingAA,
2304                                const ArrayRef<unsigned> &Opcodes,
2305                                bool &UsedAssumedInformation,
2306                                bool CheckBBLivenessOnly = false,
2307                                bool CheckPotentiallyDead = false);
2308 
2309   /// Check \p Pred on all call-like instructions (=CallBased derived).
2310   ///
2311   /// See checkForAllCallLikeInstructions(...) for more information.
2312   bool checkForAllCallLikeInstructions(function_ref<bool(Instruction &)> Pred,
2313                                        const AbstractAttribute &QueryingAA,
2314                                        bool &UsedAssumedInformation,
2315                                        bool CheckBBLivenessOnly = false,
2316                                        bool CheckPotentiallyDead = false) {
2317     return checkForAllInstructions(
2318         Pred, QueryingAA,
2319         {(unsigned)Instruction::Invoke, (unsigned)Instruction::CallBr,
2320          (unsigned)Instruction::Call},
2321         UsedAssumedInformation, CheckBBLivenessOnly, CheckPotentiallyDead);
2322   }
2323 
2324   /// Check \p Pred on all Read/Write instructions.
2325   ///
2326   /// This method will evaluate \p Pred on all instructions that read or write
2327   /// to memory present in the information cache and return true if \p Pred
2328   /// holds on all of them.
2329   bool checkForAllReadWriteInstructions(function_ref<bool(Instruction &)> Pred,
2330                                         AbstractAttribute &QueryingAA,
2331                                         bool &UsedAssumedInformation);
2332 
2333   /// Create a shallow wrapper for \p F such that \p F has internal linkage
2334   /// afterwards. It also sets the original \p F 's name to anonymous
2335   ///
2336   /// A wrapper is a function with the same type (and attributes) as \p F
2337   /// that will only call \p F and return the result, if any.
2338   ///
2339   /// Assuming the declaration of looks like:
2340   ///   rty F(aty0 arg0, ..., atyN argN);
2341   ///
2342   /// The wrapper will then look as follows:
2343   ///   rty wrapper(aty0 arg0, ..., atyN argN) {
2344   ///     return F(arg0, ..., argN);
2345   ///   }
2346   ///
2347   static void createShallowWrapper(Function &F);
2348 
2349   /// Returns true if the function \p F can be internalized. i.e. it has a
2350   /// compatible linkage.
2351   static bool isInternalizable(Function &F);
2352 
2353   /// Make another copy of the function \p F such that the copied version has
2354   /// internal linkage afterwards and can be analysed. Then we replace all uses
2355   /// of the original function to the copied one
2356   ///
2357   /// Only non-locally linked functions that have `linkonce_odr` or `weak_odr`
2358   /// linkage can be internalized because these linkages guarantee that other
2359   /// definitions with the same name have the same semantics as this one.
2360   ///
2361   /// This will only be run if the `attributor-allow-deep-wrappers` option is
2362   /// set, or if the function is called with \p Force set to true.
2363   ///
2364   /// If the function \p F failed to be internalized the return value will be a
2365   /// null pointer.
2366   static Function *internalizeFunction(Function &F, bool Force = false);
2367 
2368   /// Make copies of each function in the set \p FnSet such that the copied
2369   /// version has internal linkage afterwards and can be analysed. Then we
2370   /// replace all uses of the original function to the copied one. The map
2371   /// \p FnMap contains a mapping of functions to their internalized versions.
2372   ///
2373   /// Only non-locally linked functions that have `linkonce_odr` or `weak_odr`
2374   /// linkage can be internalized because these linkages guarantee that other
2375   /// definitions with the same name have the same semantics as this one.
2376   ///
2377   /// This version will internalize all the functions in the set \p FnSet at
2378   /// once and then replace the uses. This prevents internalized functions being
2379   /// called by external functions when there is an internalized version in the
2380   /// module.
2381   static bool internalizeFunctions(SmallPtrSetImpl<Function *> &FnSet,
2382                                    DenseMap<Function *, Function *> &FnMap);
2383 
2384   /// Return the data layout associated with the anchor scope.
2385   const DataLayout &getDataLayout() const { return InfoCache.DL; }
2386 
2387   /// The allocator used to allocate memory, e.g. for `AbstractAttribute`s.
2388   BumpPtrAllocator &Allocator;
2389 
2390 private:
2391   /// This method will do fixpoint iteration until fixpoint or the
2392   /// maximum iteration count is reached.
2393   ///
2394   /// If the maximum iteration count is reached, This method will
2395   /// indicate pessimistic fixpoint on attributes that transitively depend
2396   /// on attributes that were scheduled for an update.
2397   void runTillFixpoint();
2398 
2399   /// Gets called after scheduling, manifests attributes to the LLVM IR.
2400   ChangeStatus manifestAttributes();
2401 
2402   /// Gets called after attributes have been manifested, cleans up the IR.
2403   /// Deletes dead functions, blocks and instructions.
2404   /// Rewrites function signitures and updates the call graph.
2405   ChangeStatus cleanupIR();
2406 
2407   /// Identify internal functions that are effectively dead, thus not reachable
2408   /// from a live entry point. The functions are added to ToBeDeletedFunctions.
2409   void identifyDeadInternalFunctions();
2410 
2411   /// Run `::update` on \p AA and track the dependences queried while doing so.
2412   /// Also adjust the state if we know further updates are not necessary.
2413   ChangeStatus updateAA(AbstractAttribute &AA);
2414 
2415   /// Remember the dependences on the top of the dependence stack such that they
2416   /// may trigger further updates. (\see DependenceStack)
2417   void rememberDependences();
2418 
2419   /// Determine if CallBase context in \p IRP should be propagated.
2420   bool shouldPropagateCallBaseContext(const IRPosition &IRP);
2421 
2422   /// Apply all requested function signature rewrites
2423   /// (\see registerFunctionSignatureRewrite) and return Changed if the module
2424   /// was altered.
2425   ChangeStatus
2426   rewriteFunctionSignatures(SmallSetVector<Function *, 8> &ModifiedFns);
2427 
2428   /// Check if the Attribute \p AA should be seeded.
2429   /// See getOrCreateAAFor.
2430   bool shouldSeedAttribute(AbstractAttribute &AA);
2431 
2432   /// A nested map to lookup abstract attributes based on the argument position
2433   /// on the outer level, and the addresses of the static member (AAType::ID) on
2434   /// the inner level.
2435   ///{
2436   using AAMapKeyTy = std::pair<const char *, IRPosition>;
2437   DenseMap<AAMapKeyTy, AbstractAttribute *> AAMap;
2438   ///}
2439 
2440   /// Map to remember all requested signature changes (= argument replacements).
2441   DenseMap<Function *, SmallVector<std::unique_ptr<ArgumentReplacementInfo>, 8>>
2442       ArgumentReplacementMap;
2443 
2444   /// The set of functions we are deriving attributes for.
2445   SetVector<Function *> &Functions;
2446 
2447   /// The information cache that holds pre-processed (LLVM-IR) information.
2448   InformationCache &InfoCache;
2449 
2450   /// Abstract Attribute dependency graph
2451   AADepGraph DG;
2452 
2453   /// Set of functions for which we modified the content such that it might
2454   /// impact the call graph.
2455   SmallSetVector<Function *, 8> CGModifiedFunctions;
2456 
2457   /// Information about a dependence. If FromAA is changed ToAA needs to be
2458   /// updated as well.
2459   struct DepInfo {
2460     const AbstractAttribute *FromAA;
2461     const AbstractAttribute *ToAA;
2462     DepClassTy DepClass;
2463   };
2464 
2465   /// The dependence stack is used to track dependences during an
2466   /// `AbstractAttribute::update` call. As `AbstractAttribute::update` can be
2467   /// recursive we might have multiple vectors of dependences in here. The stack
2468   /// size, should be adjusted according to the expected recursion depth and the
2469   /// inner dependence vector size to the expected number of dependences per
2470   /// abstract attribute. Since the inner vectors are actually allocated on the
2471   /// stack we can be generous with their size.
2472   using DependenceVector = SmallVector<DepInfo, 8>;
2473   SmallVector<DependenceVector *, 16> DependenceStack;
2474 
2475   /// A set to remember the functions we already assume to be live and visited.
2476   DenseSet<const Function *> VisitedFunctions;
2477 
2478   /// Uses we replace with a new value after manifest is done. We will remove
2479   /// then trivially dead instructions as well.
2480   SmallMapVector<Use *, Value *, 32> ToBeChangedUses;
2481 
2482   /// Values we replace with a new value after manifest is done. We will remove
2483   /// then trivially dead instructions as well.
2484   SmallMapVector<Value *, PointerIntPair<Value *, 1, bool>, 32>
2485       ToBeChangedValues;
2486 
2487   /// Instructions we replace with `unreachable` insts after manifest is done.
2488   SmallSetVector<WeakVH, 16> ToBeChangedToUnreachableInsts;
2489 
2490   /// Invoke instructions with at least a single dead successor block.
2491   SmallSetVector<WeakVH, 16> InvokeWithDeadSuccessor;
2492 
2493   /// A flag that indicates which stage of the process we are in. Initially, the
2494   /// phase is SEEDING. Phase is changed in `Attributor::run()`
2495   enum class AttributorPhase {
2496     SEEDING,
2497     UPDATE,
2498     MANIFEST,
2499     CLEANUP,
2500   } Phase = AttributorPhase::SEEDING;
2501 
2502   /// The current initialization chain length. Tracked to avoid stack overflows.
2503   unsigned InitializationChainLength = 0;
2504 
2505   /// Functions, blocks, and instructions we delete after manifest is done.
2506   ///
2507   ///{
2508   SmallPtrSet<BasicBlock *, 8> ManifestAddedBlocks;
2509   SmallSetVector<Function *, 8> ToBeDeletedFunctions;
2510   SmallSetVector<BasicBlock *, 8> ToBeDeletedBlocks;
2511   SmallSetVector<WeakVH, 8> ToBeDeletedInsts;
2512   ///}
2513 
2514   /// Container with all the query AAs that requested an update via
2515   /// registerForUpdate.
2516   SmallSetVector<AbstractAttribute *, 16> QueryAAsAwaitingUpdate;
2517 
2518   /// User provided configuration for this Attributor instance.
2519   const AttributorConfig Configuration;
2520 
2521   friend AADepGraph;
2522   friend AttributorCallGraph;
2523 };
2524 
2525 /// An interface to query the internal state of an abstract attribute.
2526 ///
2527 /// The abstract state is a minimal interface that allows the Attributor to
2528 /// communicate with the abstract attributes about their internal state without
2529 /// enforcing or exposing implementation details, e.g., the (existence of an)
2530 /// underlying lattice.
2531 ///
2532 /// It is sufficient to be able to query if a state is (1) valid or invalid, (2)
2533 /// at a fixpoint, and to indicate to the state that (3) an optimistic fixpoint
2534 /// was reached or (4) a pessimistic fixpoint was enforced.
2535 ///
2536 /// All methods need to be implemented by the subclass. For the common use case,
2537 /// a single boolean state or a bit-encoded state, the BooleanState and
2538 /// {Inc,Dec,Bit}IntegerState classes are already provided. An abstract
2539 /// attribute can inherit from them to get the abstract state interface and
2540 /// additional methods to directly modify the state based if needed. See the
2541 /// class comments for help.
2542 struct AbstractState {
2543   virtual ~AbstractState() = default;
2544 
2545   /// Return if this abstract state is in a valid state. If false, no
2546   /// information provided should be used.
2547   virtual bool isValidState() const = 0;
2548 
2549   /// Return if this abstract state is fixed, thus does not need to be updated
2550   /// if information changes as it cannot change itself.
2551   virtual bool isAtFixpoint() const = 0;
2552 
2553   /// Indicate that the abstract state should converge to the optimistic state.
2554   ///
2555   /// This will usually make the optimistically assumed state the known to be
2556   /// true state.
2557   ///
2558   /// \returns ChangeStatus::UNCHANGED as the assumed value should not change.
2559   virtual ChangeStatus indicateOptimisticFixpoint() = 0;
2560 
2561   /// Indicate that the abstract state should converge to the pessimistic state.
2562   ///
2563   /// This will usually revert the optimistically assumed state to the known to
2564   /// be true state.
2565   ///
2566   /// \returns ChangeStatus::CHANGED as the assumed value may change.
2567   virtual ChangeStatus indicatePessimisticFixpoint() = 0;
2568 };
2569 
2570 /// Simple state with integers encoding.
2571 ///
2572 /// The interface ensures that the assumed bits are always a subset of the known
2573 /// bits. Users can only add known bits and, except through adding known bits,
2574 /// they can only remove assumed bits. This should guarantee monotonicity and
2575 /// thereby the existence of a fixpoint (if used correctly). The fixpoint is
2576 /// reached when the assumed and known state/bits are equal. Users can
2577 /// force/inidicate a fixpoint. If an optimistic one is indicated, the known
2578 /// state will catch up with the assumed one, for a pessimistic fixpoint it is
2579 /// the other way around.
2580 template <typename base_ty, base_ty BestState, base_ty WorstState>
2581 struct IntegerStateBase : public AbstractState {
2582   using base_t = base_ty;
2583 
2584   IntegerStateBase() = default;
2585   IntegerStateBase(base_t Assumed) : Assumed(Assumed) {}
2586 
2587   /// Return the best possible representable state.
2588   static constexpr base_t getBestState() { return BestState; }
2589   static constexpr base_t getBestState(const IntegerStateBase &) {
2590     return getBestState();
2591   }
2592 
2593   /// Return the worst possible representable state.
2594   static constexpr base_t getWorstState() { return WorstState; }
2595   static constexpr base_t getWorstState(const IntegerStateBase &) {
2596     return getWorstState();
2597   }
2598 
2599   /// See AbstractState::isValidState()
2600   /// NOTE: For now we simply pretend that the worst possible state is invalid.
2601   bool isValidState() const override { return Assumed != getWorstState(); }
2602 
2603   /// See AbstractState::isAtFixpoint()
2604   bool isAtFixpoint() const override { return Assumed == Known; }
2605 
2606   /// See AbstractState::indicateOptimisticFixpoint(...)
2607   ChangeStatus indicateOptimisticFixpoint() override {
2608     Known = Assumed;
2609     return ChangeStatus::UNCHANGED;
2610   }
2611 
2612   /// See AbstractState::indicatePessimisticFixpoint(...)
2613   ChangeStatus indicatePessimisticFixpoint() override {
2614     Assumed = Known;
2615     return ChangeStatus::CHANGED;
2616   }
2617 
2618   /// Return the known state encoding
2619   base_t getKnown() const { return Known; }
2620 
2621   /// Return the assumed state encoding.
2622   base_t getAssumed() const { return Assumed; }
2623 
2624   /// Equality for IntegerStateBase.
2625   bool
2626   operator==(const IntegerStateBase<base_t, BestState, WorstState> &R) const {
2627     return this->getAssumed() == R.getAssumed() &&
2628            this->getKnown() == R.getKnown();
2629   }
2630 
2631   /// Inequality for IntegerStateBase.
2632   bool
2633   operator!=(const IntegerStateBase<base_t, BestState, WorstState> &R) const {
2634     return !(*this == R);
2635   }
2636 
2637   /// "Clamp" this state with \p R. The result is subtype dependent but it is
2638   /// intended that only information assumed in both states will be assumed in
2639   /// this one afterwards.
2640   void operator^=(const IntegerStateBase<base_t, BestState, WorstState> &R) {
2641     handleNewAssumedValue(R.getAssumed());
2642   }
2643 
2644   /// "Clamp" this state with \p R. The result is subtype dependent but it is
2645   /// intended that information known in either state will be known in
2646   /// this one afterwards.
2647   void operator+=(const IntegerStateBase<base_t, BestState, WorstState> &R) {
2648     handleNewKnownValue(R.getKnown());
2649   }
2650 
2651   void operator|=(const IntegerStateBase<base_t, BestState, WorstState> &R) {
2652     joinOR(R.getAssumed(), R.getKnown());
2653   }
2654 
2655   void operator&=(const IntegerStateBase<base_t, BestState, WorstState> &R) {
2656     joinAND(R.getAssumed(), R.getKnown());
2657   }
2658 
2659 protected:
2660   /// Handle a new assumed value \p Value. Subtype dependent.
2661   virtual void handleNewAssumedValue(base_t Value) = 0;
2662 
2663   /// Handle a new known value \p Value. Subtype dependent.
2664   virtual void handleNewKnownValue(base_t Value) = 0;
2665 
2666   /// Handle a  value \p Value. Subtype dependent.
2667   virtual void joinOR(base_t AssumedValue, base_t KnownValue) = 0;
2668 
2669   /// Handle a new assumed value \p Value. Subtype dependent.
2670   virtual void joinAND(base_t AssumedValue, base_t KnownValue) = 0;
2671 
2672   /// The known state encoding in an integer of type base_t.
2673   base_t Known = getWorstState();
2674 
2675   /// The assumed state encoding in an integer of type base_t.
2676   base_t Assumed = getBestState();
2677 };
2678 
2679 /// Specialization of the integer state for a bit-wise encoding.
2680 template <typename base_ty = uint32_t, base_ty BestState = ~base_ty(0),
2681           base_ty WorstState = 0>
2682 struct BitIntegerState
2683     : public IntegerStateBase<base_ty, BestState, WorstState> {
2684   using super = IntegerStateBase<base_ty, BestState, WorstState>;
2685   using base_t = base_ty;
2686   BitIntegerState() = default;
2687   BitIntegerState(base_t Assumed) : super(Assumed) {}
2688 
2689   /// Return true if the bits set in \p BitsEncoding are "known bits".
2690   bool isKnown(base_t BitsEncoding = BestState) const {
2691     return (this->Known & BitsEncoding) == BitsEncoding;
2692   }
2693 
2694   /// Return true if the bits set in \p BitsEncoding are "assumed bits".
2695   bool isAssumed(base_t BitsEncoding = BestState) const {
2696     return (this->Assumed & BitsEncoding) == BitsEncoding;
2697   }
2698 
2699   /// Add the bits in \p BitsEncoding to the "known bits".
2700   BitIntegerState &addKnownBits(base_t Bits) {
2701     // Make sure we never miss any "known bits".
2702     this->Assumed |= Bits;
2703     this->Known |= Bits;
2704     return *this;
2705   }
2706 
2707   /// Remove the bits in \p BitsEncoding from the "assumed bits" if not known.
2708   BitIntegerState &removeAssumedBits(base_t BitsEncoding) {
2709     return intersectAssumedBits(~BitsEncoding);
2710   }
2711 
2712   /// Remove the bits in \p BitsEncoding from the "known bits".
2713   BitIntegerState &removeKnownBits(base_t BitsEncoding) {
2714     this->Known = (this->Known & ~BitsEncoding);
2715     return *this;
2716   }
2717 
2718   /// Keep only "assumed bits" also set in \p BitsEncoding but all known ones.
2719   BitIntegerState &intersectAssumedBits(base_t BitsEncoding) {
2720     // Make sure we never lose any "known bits".
2721     this->Assumed = (this->Assumed & BitsEncoding) | this->Known;
2722     return *this;
2723   }
2724 
2725 private:
2726   void handleNewAssumedValue(base_t Value) override {
2727     intersectAssumedBits(Value);
2728   }
2729   void handleNewKnownValue(base_t Value) override { addKnownBits(Value); }
2730   void joinOR(base_t AssumedValue, base_t KnownValue) override {
2731     this->Known |= KnownValue;
2732     this->Assumed |= AssumedValue;
2733   }
2734   void joinAND(base_t AssumedValue, base_t KnownValue) override {
2735     this->Known &= KnownValue;
2736     this->Assumed &= AssumedValue;
2737   }
2738 };
2739 
2740 /// Specialization of the integer state for an increasing value, hence ~0u is
2741 /// the best state and 0 the worst.
2742 template <typename base_ty = uint32_t, base_ty BestState = ~base_ty(0),
2743           base_ty WorstState = 0>
2744 struct IncIntegerState
2745     : public IntegerStateBase<base_ty, BestState, WorstState> {
2746   using super = IntegerStateBase<base_ty, BestState, WorstState>;
2747   using base_t = base_ty;
2748 
2749   IncIntegerState() : super() {}
2750   IncIntegerState(base_t Assumed) : super(Assumed) {}
2751 
2752   /// Return the best possible representable state.
2753   static constexpr base_t getBestState() { return BestState; }
2754   static constexpr base_t
2755   getBestState(const IncIntegerState<base_ty, BestState, WorstState> &) {
2756     return getBestState();
2757   }
2758 
2759   /// Take minimum of assumed and \p Value.
2760   IncIntegerState &takeAssumedMinimum(base_t Value) {
2761     // Make sure we never lose "known value".
2762     this->Assumed = std::max(std::min(this->Assumed, Value), this->Known);
2763     return *this;
2764   }
2765 
2766   /// Take maximum of known and \p Value.
2767   IncIntegerState &takeKnownMaximum(base_t Value) {
2768     // Make sure we never lose "known value".
2769     this->Assumed = std::max(Value, this->Assumed);
2770     this->Known = std::max(Value, this->Known);
2771     return *this;
2772   }
2773 
2774 private:
2775   void handleNewAssumedValue(base_t Value) override {
2776     takeAssumedMinimum(Value);
2777   }
2778   void handleNewKnownValue(base_t Value) override { takeKnownMaximum(Value); }
2779   void joinOR(base_t AssumedValue, base_t KnownValue) override {
2780     this->Known = std::max(this->Known, KnownValue);
2781     this->Assumed = std::max(this->Assumed, AssumedValue);
2782   }
2783   void joinAND(base_t AssumedValue, base_t KnownValue) override {
2784     this->Known = std::min(this->Known, KnownValue);
2785     this->Assumed = std::min(this->Assumed, AssumedValue);
2786   }
2787 };
2788 
2789 /// Specialization of the integer state for a decreasing value, hence 0 is the
2790 /// best state and ~0u the worst.
2791 template <typename base_ty = uint32_t>
2792 struct DecIntegerState : public IntegerStateBase<base_ty, 0, ~base_ty(0)> {
2793   using base_t = base_ty;
2794 
2795   /// Take maximum of assumed and \p Value.
2796   DecIntegerState &takeAssumedMaximum(base_t Value) {
2797     // Make sure we never lose "known value".
2798     this->Assumed = std::min(std::max(this->Assumed, Value), this->Known);
2799     return *this;
2800   }
2801 
2802   /// Take minimum of known and \p Value.
2803   DecIntegerState &takeKnownMinimum(base_t Value) {
2804     // Make sure we never lose "known value".
2805     this->Assumed = std::min(Value, this->Assumed);
2806     this->Known = std::min(Value, this->Known);
2807     return *this;
2808   }
2809 
2810 private:
2811   void handleNewAssumedValue(base_t Value) override {
2812     takeAssumedMaximum(Value);
2813   }
2814   void handleNewKnownValue(base_t Value) override { takeKnownMinimum(Value); }
2815   void joinOR(base_t AssumedValue, base_t KnownValue) override {
2816     this->Assumed = std::min(this->Assumed, KnownValue);
2817     this->Assumed = std::min(this->Assumed, AssumedValue);
2818   }
2819   void joinAND(base_t AssumedValue, base_t KnownValue) override {
2820     this->Assumed = std::max(this->Assumed, KnownValue);
2821     this->Assumed = std::max(this->Assumed, AssumedValue);
2822   }
2823 };
2824 
2825 /// Simple wrapper for a single bit (boolean) state.
2826 struct BooleanState : public IntegerStateBase<bool, true, false> {
2827   using super = IntegerStateBase<bool, true, false>;
2828   using base_t = IntegerStateBase::base_t;
2829 
2830   BooleanState() = default;
2831   BooleanState(base_t Assumed) : super(Assumed) {}
2832 
2833   /// Set the assumed value to \p Value but never below the known one.
2834   void setAssumed(bool Value) { Assumed &= (Known | Value); }
2835 
2836   /// Set the known and asssumed value to \p Value.
2837   void setKnown(bool Value) {
2838     Known |= Value;
2839     Assumed |= Value;
2840   }
2841 
2842   /// Return true if the state is assumed to hold.
2843   bool isAssumed() const { return getAssumed(); }
2844 
2845   /// Return true if the state is known to hold.
2846   bool isKnown() const { return getKnown(); }
2847 
2848 private:
2849   void handleNewAssumedValue(base_t Value) override {
2850     if (!Value)
2851       Assumed = Known;
2852   }
2853   void handleNewKnownValue(base_t Value) override {
2854     if (Value)
2855       Known = (Assumed = Value);
2856   }
2857   void joinOR(base_t AssumedValue, base_t KnownValue) override {
2858     Known |= KnownValue;
2859     Assumed |= AssumedValue;
2860   }
2861   void joinAND(base_t AssumedValue, base_t KnownValue) override {
2862     Known &= KnownValue;
2863     Assumed &= AssumedValue;
2864   }
2865 };
2866 
2867 /// State for an integer range.
2868 struct IntegerRangeState : public AbstractState {
2869 
2870   /// Bitwidth of the associated value.
2871   uint32_t BitWidth;
2872 
2873   /// State representing assumed range, initially set to empty.
2874   ConstantRange Assumed;
2875 
2876   /// State representing known range, initially set to [-inf, inf].
2877   ConstantRange Known;
2878 
2879   IntegerRangeState(uint32_t BitWidth)
2880       : BitWidth(BitWidth), Assumed(ConstantRange::getEmpty(BitWidth)),
2881         Known(ConstantRange::getFull(BitWidth)) {}
2882 
2883   IntegerRangeState(const ConstantRange &CR)
2884       : BitWidth(CR.getBitWidth()), Assumed(CR),
2885         Known(getWorstState(CR.getBitWidth())) {}
2886 
2887   /// Return the worst possible representable state.
2888   static ConstantRange getWorstState(uint32_t BitWidth) {
2889     return ConstantRange::getFull(BitWidth);
2890   }
2891 
2892   /// Return the best possible representable state.
2893   static ConstantRange getBestState(uint32_t BitWidth) {
2894     return ConstantRange::getEmpty(BitWidth);
2895   }
2896   static ConstantRange getBestState(const IntegerRangeState &IRS) {
2897     return getBestState(IRS.getBitWidth());
2898   }
2899 
2900   /// Return associated values' bit width.
2901   uint32_t getBitWidth() const { return BitWidth; }
2902 
2903   /// See AbstractState::isValidState()
2904   bool isValidState() const override {
2905     return BitWidth > 0 && !Assumed.isFullSet();
2906   }
2907 
2908   /// See AbstractState::isAtFixpoint()
2909   bool isAtFixpoint() const override { return Assumed == Known; }
2910 
2911   /// See AbstractState::indicateOptimisticFixpoint(...)
2912   ChangeStatus indicateOptimisticFixpoint() override {
2913     Known = Assumed;
2914     return ChangeStatus::CHANGED;
2915   }
2916 
2917   /// See AbstractState::indicatePessimisticFixpoint(...)
2918   ChangeStatus indicatePessimisticFixpoint() override {
2919     Assumed = Known;
2920     return ChangeStatus::CHANGED;
2921   }
2922 
2923   /// Return the known state encoding
2924   ConstantRange getKnown() const { return Known; }
2925 
2926   /// Return the assumed state encoding.
2927   ConstantRange getAssumed() const { return Assumed; }
2928 
2929   /// Unite assumed range with the passed state.
2930   void unionAssumed(const ConstantRange &R) {
2931     // Don't lose a known range.
2932     Assumed = Assumed.unionWith(R).intersectWith(Known);
2933   }
2934 
2935   /// See IntegerRangeState::unionAssumed(..).
2936   void unionAssumed(const IntegerRangeState &R) {
2937     unionAssumed(R.getAssumed());
2938   }
2939 
2940   /// Intersect known range with the passed state.
2941   void intersectKnown(const ConstantRange &R) {
2942     Assumed = Assumed.intersectWith(R);
2943     Known = Known.intersectWith(R);
2944   }
2945 
2946   /// See IntegerRangeState::intersectKnown(..).
2947   void intersectKnown(const IntegerRangeState &R) {
2948     intersectKnown(R.getKnown());
2949   }
2950 
2951   /// Equality for IntegerRangeState.
2952   bool operator==(const IntegerRangeState &R) const {
2953     return getAssumed() == R.getAssumed() && getKnown() == R.getKnown();
2954   }
2955 
2956   /// "Clamp" this state with \p R. The result is subtype dependent but it is
2957   /// intended that only information assumed in both states will be assumed in
2958   /// this one afterwards.
2959   IntegerRangeState operator^=(const IntegerRangeState &R) {
2960     // NOTE: `^=` operator seems like `intersect` but in this case, we need to
2961     // take `union`.
2962     unionAssumed(R);
2963     return *this;
2964   }
2965 
2966   IntegerRangeState operator&=(const IntegerRangeState &R) {
2967     // NOTE: `&=` operator seems like `intersect` but in this case, we need to
2968     // take `union`.
2969     Known = Known.unionWith(R.getKnown());
2970     Assumed = Assumed.unionWith(R.getAssumed());
2971     return *this;
2972   }
2973 };
2974 
2975 /// Simple state for a set.
2976 ///
2977 /// This represents a state containing a set of values. The interface supports
2978 /// modelling sets that contain all possible elements. The state's internal
2979 /// value is modified using union or intersection operations.
2980 template <typename BaseTy> struct SetState : public AbstractState {
2981   /// A wrapper around a set that has semantics for handling unions and
2982   /// intersections with a "universal" set that contains all elements.
2983   struct SetContents {
2984     /// Creates a universal set with no concrete elements or an empty set.
2985     SetContents(bool Universal) : Universal(Universal) {}
2986 
2987     /// Creates a non-universal set with concrete values.
2988     SetContents(const DenseSet<BaseTy> &Assumptions)
2989         : Universal(false), Set(Assumptions) {}
2990 
2991     SetContents(bool Universal, const DenseSet<BaseTy> &Assumptions)
2992         : Universal(Universal), Set(Assumptions) {}
2993 
2994     const DenseSet<BaseTy> &getSet() const { return Set; }
2995 
2996     bool isUniversal() const { return Universal; }
2997 
2998     bool empty() const { return Set.empty() && !Universal; }
2999 
3000     /// Finds A := A ^ B where A or B could be the "Universal" set which
3001     /// contains every possible attribute. Returns true if changes were made.
3002     bool getIntersection(const SetContents &RHS) {
3003       bool IsUniversal = Universal;
3004       unsigned Size = Set.size();
3005 
3006       // A := A ^ U = A
3007       if (RHS.isUniversal())
3008         return false;
3009 
3010       // A := U ^ B = B
3011       if (Universal)
3012         Set = RHS.getSet();
3013       else
3014         set_intersect(Set, RHS.getSet());
3015 
3016       Universal &= RHS.isUniversal();
3017       return IsUniversal != Universal || Size != Set.size();
3018     }
3019 
3020     /// Finds A := A u B where A or B could be the "Universal" set which
3021     /// contains every possible attribute. returns true if changes were made.
3022     bool getUnion(const SetContents &RHS) {
3023       bool IsUniversal = Universal;
3024       unsigned Size = Set.size();
3025 
3026       // A := A u U = U = U u B
3027       if (!RHS.isUniversal() && !Universal)
3028         set_union(Set, RHS.getSet());
3029 
3030       Universal |= RHS.isUniversal();
3031       return IsUniversal != Universal || Size != Set.size();
3032     }
3033 
3034   private:
3035     /// Indicates if this set is "universal", containing every possible element.
3036     bool Universal;
3037 
3038     /// The set of currently active assumptions.
3039     DenseSet<BaseTy> Set;
3040   };
3041 
3042   SetState() : Known(false), Assumed(true), IsAtFixedpoint(false) {}
3043 
3044   /// Initializes the known state with an initial set and initializes the
3045   /// assumed state as universal.
3046   SetState(const DenseSet<BaseTy> &Known)
3047       : Known(Known), Assumed(true), IsAtFixedpoint(false) {}
3048 
3049   /// See AbstractState::isValidState()
3050   bool isValidState() const override { return !Assumed.empty(); }
3051 
3052   /// See AbstractState::isAtFixpoint()
3053   bool isAtFixpoint() const override { return IsAtFixedpoint; }
3054 
3055   /// See AbstractState::indicateOptimisticFixpoint(...)
3056   ChangeStatus indicateOptimisticFixpoint() override {
3057     IsAtFixedpoint = true;
3058     Known = Assumed;
3059     return ChangeStatus::UNCHANGED;
3060   }
3061 
3062   /// See AbstractState::indicatePessimisticFixpoint(...)
3063   ChangeStatus indicatePessimisticFixpoint() override {
3064     IsAtFixedpoint = true;
3065     Assumed = Known;
3066     return ChangeStatus::CHANGED;
3067   }
3068 
3069   /// Return the known state encoding.
3070   const SetContents &getKnown() const { return Known; }
3071 
3072   /// Return the assumed state encoding.
3073   const SetContents &getAssumed() const { return Assumed; }
3074 
3075   /// Returns if the set state contains the element.
3076   bool setContains(const BaseTy &Elem) const {
3077     return Assumed.getSet().contains(Elem) || Known.getSet().contains(Elem);
3078   }
3079 
3080   /// Performs the set intersection between this set and \p RHS. Returns true if
3081   /// changes were made.
3082   bool getIntersection(const SetContents &RHS) {
3083     bool IsUniversal = Assumed.isUniversal();
3084     unsigned SizeBefore = Assumed.getSet().size();
3085 
3086     // Get intersection and make sure that the known set is still a proper
3087     // subset of the assumed set. A := K u (A ^ R).
3088     Assumed.getIntersection(RHS);
3089     Assumed.getUnion(Known);
3090 
3091     return SizeBefore != Assumed.getSet().size() ||
3092            IsUniversal != Assumed.isUniversal();
3093   }
3094 
3095   /// Performs the set union between this set and \p RHS. Returns true if
3096   /// changes were made.
3097   bool getUnion(const SetContents &RHS) { return Assumed.getUnion(RHS); }
3098 
3099 private:
3100   /// The set of values known for this state.
3101   SetContents Known;
3102 
3103   /// The set of assumed values for this state.
3104   SetContents Assumed;
3105 
3106   bool IsAtFixedpoint;
3107 };
3108 
3109 /// Helper to tie a abstract state implementation to an abstract attribute.
3110 template <typename StateTy, typename BaseType, class... Ts>
3111 struct StateWrapper : public BaseType, public StateTy {
3112   /// Provide static access to the type of the state.
3113   using StateType = StateTy;
3114 
3115   StateWrapper(const IRPosition &IRP, Ts... Args)
3116       : BaseType(IRP), StateTy(Args...) {}
3117 
3118   /// See AbstractAttribute::getState(...).
3119   StateType &getState() override { return *this; }
3120 
3121   /// See AbstractAttribute::getState(...).
3122   const StateType &getState() const override { return *this; }
3123 };
3124 
3125 /// Helper class that provides common functionality to manifest IR attributes.
3126 template <Attribute::AttrKind AK, typename BaseType, typename AAType>
3127 struct IRAttribute : public BaseType {
3128   IRAttribute(const IRPosition &IRP) : BaseType(IRP) {}
3129 
3130   /// Most boolean IRAttribute AAs don't do anything non-trivial
3131   /// in their initializers while non-boolean ones often do. Subclasses can
3132   /// change this.
3133   static bool hasTrivialInitializer() { return Attribute::isEnumAttrKind(AK); }
3134 
3135   /// Compile time access to the IR attribute kind.
3136   static constexpr Attribute::AttrKind IRAttributeKind = AK;
3137 
3138   /// Return true if the IR attribute(s) associated with this AA are implied for
3139   /// an undef value.
3140   static bool isImpliedByUndef() { return true; }
3141 
3142   /// Return true if the IR attribute(s) associated with this AA are implied for
3143   /// an poison value.
3144   static bool isImpliedByPoison() { return true; }
3145 
3146   static bool isImpliedByIR(Attributor &A, const IRPosition &IRP,
3147                             Attribute::AttrKind ImpliedAttributeKind = AK,
3148                             bool IgnoreSubsumingPositions = false) {
3149     if (AAType::isImpliedByUndef() && isa<UndefValue>(IRP.getAssociatedValue()))
3150       return true;
3151     if (AAType::isImpliedByPoison() &&
3152         isa<PoisonValue>(IRP.getAssociatedValue()))
3153       return true;
3154     return A.hasAttr(IRP, {ImpliedAttributeKind}, IgnoreSubsumingPositions,
3155                      ImpliedAttributeKind);
3156   }
3157 
3158   /// See AbstractAttribute::manifest(...).
3159   ChangeStatus manifest(Attributor &A) override {
3160     if (isa<UndefValue>(this->getIRPosition().getAssociatedValue()))
3161       return ChangeStatus::UNCHANGED;
3162     SmallVector<Attribute, 4> DeducedAttrs;
3163     getDeducedAttributes(A, this->getAnchorValue().getContext(), DeducedAttrs);
3164     if (DeducedAttrs.empty())
3165       return ChangeStatus::UNCHANGED;
3166     return A.manifestAttrs(this->getIRPosition(), DeducedAttrs);
3167   }
3168 
3169   /// Return the kind that identifies the abstract attribute implementation.
3170   Attribute::AttrKind getAttrKind() const { return AK; }
3171 
3172   /// Return the deduced attributes in \p Attrs.
3173   virtual void getDeducedAttributes(Attributor &A, LLVMContext &Ctx,
3174                                     SmallVectorImpl<Attribute> &Attrs) const {
3175     Attrs.emplace_back(Attribute::get(Ctx, getAttrKind()));
3176   }
3177 };
3178 
3179 /// Base struct for all "concrete attribute" deductions.
3180 ///
3181 /// The abstract attribute is a minimal interface that allows the Attributor to
3182 /// orchestrate the abstract/fixpoint analysis. The design allows to hide away
3183 /// implementation choices made for the subclasses but also to structure their
3184 /// implementation and simplify the use of other abstract attributes in-flight.
3185 ///
3186 /// To allow easy creation of new attributes, most methods have default
3187 /// implementations. The ones that do not are generally straight forward, except
3188 /// `AbstractAttribute::updateImpl` which is the location of most reasoning
3189 /// associated with the abstract attribute. The update is invoked by the
3190 /// Attributor in case the situation used to justify the current optimistic
3191 /// state might have changed. The Attributor determines this automatically
3192 /// by monitoring the `Attributor::getAAFor` calls made by abstract attributes.
3193 ///
3194 /// The `updateImpl` method should inspect the IR and other abstract attributes
3195 /// in-flight to justify the best possible (=optimistic) state. The actual
3196 /// implementation is, similar to the underlying abstract state encoding, not
3197 /// exposed. In the most common case, the `updateImpl` will go through a list of
3198 /// reasons why its optimistic state is valid given the current information. If
3199 /// any combination of them holds and is sufficient to justify the current
3200 /// optimistic state, the method shall return UNCHAGED. If not, the optimistic
3201 /// state is adjusted to the situation and the method shall return CHANGED.
3202 ///
3203 /// If the manifestation of the "concrete attribute" deduced by the subclass
3204 /// differs from the "default" behavior, which is a (set of) LLVM-IR
3205 /// attribute(s) for an argument, call site argument, function return value, or
3206 /// function, the `AbstractAttribute::manifest` method should be overloaded.
3207 ///
3208 /// NOTE: If the state obtained via getState() is INVALID, thus if
3209 ///       AbstractAttribute::getState().isValidState() returns false, no
3210 ///       information provided by the methods of this class should be used.
3211 /// NOTE: The Attributor currently has certain limitations to what we can do.
3212 ///       As a general rule of thumb, "concrete" abstract attributes should *for
3213 ///       now* only perform "backward" information propagation. That means
3214 ///       optimistic information obtained through abstract attributes should
3215 ///       only be used at positions that precede the origin of the information
3216 ///       with regards to the program flow. More practically, information can
3217 ///       *now* be propagated from instructions to their enclosing function, but
3218 ///       *not* from call sites to the called function. The mechanisms to allow
3219 ///       both directions will be added in the future.
3220 /// NOTE: The mechanics of adding a new "concrete" abstract attribute are
3221 ///       described in the file comment.
3222 struct AbstractAttribute : public IRPosition, public AADepGraphNode {
3223   using StateType = AbstractState;
3224 
3225   AbstractAttribute(const IRPosition &IRP) : IRPosition(IRP) {}
3226 
3227   /// Virtual destructor.
3228   virtual ~AbstractAttribute() = default;
3229 
3230   /// This function is used to identify if an \p DGN is of type
3231   /// AbstractAttribute so that the dyn_cast and cast can use such information
3232   /// to cast an AADepGraphNode to an AbstractAttribute.
3233   ///
3234   /// We eagerly return true here because all AADepGraphNodes except for the
3235   /// Synthethis Node are of type AbstractAttribute
3236   static bool classof(const AADepGraphNode *DGN) { return true; }
3237 
3238   /// Return false if this AA does anything non-trivial (hence not done by
3239   /// default) in its initializer.
3240   static bool hasTrivialInitializer() { return false; }
3241 
3242   /// Return true if this AA requires a "callee" (or an associted function) for
3243   /// a call site positon. Default is optimistic to minimize AAs.
3244   static bool requiresCalleeForCallBase() { return true; }
3245 
3246   /// Return true if this AA requires all callees for an argument or function
3247   /// positon.
3248   static bool requiresCallersForArgOrFunction() { return false; }
3249 
3250   /// Return false if an AA should not be created for \p IRP.
3251   static bool isValidIRPositionForInit(Attributor &A, const IRPosition &IRP) {
3252     return true;
3253   }
3254 
3255   /// Return false if an AA should not be updated for \p IRP.
3256   static bool isValidIRPositionForUpdate(Attributor &A, const IRPosition &IRP) {
3257     Function *AssociatedFn = IRP.getAssociatedFunction();
3258     bool IsFnInterface = IRP.isFnInterfaceKind();
3259     assert((!IsFnInterface || AssociatedFn) &&
3260            "Function interface without a function?");
3261 
3262     // TODO: Not all attributes require an exact definition. Find a way to
3263     //       enable deduction for some but not all attributes in case the
3264     //       definition might be changed at runtime, see also
3265     //       http://lists.llvm.org/pipermail/llvm-dev/2018-February/121275.html.
3266     // TODO: We could always determine abstract attributes and if sufficient
3267     //       information was found we could duplicate the functions that do not
3268     //       have an exact definition.
3269     return !IsFnInterface || A.isFunctionIPOAmendable(*AssociatedFn);
3270   }
3271 
3272   /// Initialize the state with the information in the Attributor \p A.
3273   ///
3274   /// This function is called by the Attributor once all abstract attributes
3275   /// have been identified. It can and shall be used for task like:
3276   ///  - identify existing knowledge in the IR and use it for the "known state"
3277   ///  - perform any work that is not going to change over time, e.g., determine
3278   ///    a subset of the IR, or attributes in-flight, that have to be looked at
3279   ///    in the `updateImpl` method.
3280   virtual void initialize(Attributor &A) {}
3281 
3282   /// A query AA is always scheduled as long as we do updates because it does
3283   /// lazy computation that cannot be determined to be done from the outside.
3284   /// However, while query AAs will not be fixed if they do not have outstanding
3285   /// dependences, we will only schedule them like other AAs. If a query AA that
3286   /// received a new query it needs to request an update via
3287   /// `Attributor::requestUpdateForAA`.
3288   virtual bool isQueryAA() const { return false; }
3289 
3290   /// Return the internal abstract state for inspection.
3291   virtual StateType &getState() = 0;
3292   virtual const StateType &getState() const = 0;
3293 
3294   /// Return an IR position, see struct IRPosition.
3295   const IRPosition &getIRPosition() const { return *this; };
3296   IRPosition &getIRPosition() { return *this; };
3297 
3298   /// Helper functions, for debug purposes only.
3299   ///{
3300   void print(raw_ostream &OS) const { print(nullptr, OS); }
3301   void print(Attributor *, raw_ostream &OS) const override;
3302   virtual void printWithDeps(raw_ostream &OS) const;
3303   void dump() const { this->print(dbgs()); }
3304 
3305   /// This function should return the "summarized" assumed state as string.
3306   virtual const std::string getAsStr(Attributor *A) const = 0;
3307 
3308   /// This function should return the name of the AbstractAttribute
3309   virtual const std::string getName() const = 0;
3310 
3311   /// This function should return the address of the ID of the AbstractAttribute
3312   virtual const char *getIdAddr() const = 0;
3313   ///}
3314 
3315   /// Allow the Attributor access to the protected methods.
3316   friend struct Attributor;
3317 
3318 protected:
3319   /// Hook for the Attributor to trigger an update of the internal state.
3320   ///
3321   /// If this attribute is already fixed, this method will return UNCHANGED,
3322   /// otherwise it delegates to `AbstractAttribute::updateImpl`.
3323   ///
3324   /// \Return CHANGED if the internal state changed, otherwise UNCHANGED.
3325   ChangeStatus update(Attributor &A);
3326 
3327   /// Hook for the Attributor to trigger the manifestation of the information
3328   /// represented by the abstract attribute in the LLVM-IR.
3329   ///
3330   /// \Return CHANGED if the IR was altered, otherwise UNCHANGED.
3331   virtual ChangeStatus manifest(Attributor &A) {
3332     return ChangeStatus::UNCHANGED;
3333   }
3334 
3335   /// Hook to enable custom statistic tracking, called after manifest that
3336   /// resulted in a change if statistics are enabled.
3337   ///
3338   /// We require subclasses to provide an implementation so we remember to
3339   /// add statistics for them.
3340   virtual void trackStatistics() const = 0;
3341 
3342   /// The actual update/transfer function which has to be implemented by the
3343   /// derived classes.
3344   ///
3345   /// If it is called, the environment has changed and we have to determine if
3346   /// the current information is still valid or adjust it otherwise.
3347   ///
3348   /// \Return CHANGED if the internal state changed, otherwise UNCHANGED.
3349   virtual ChangeStatus updateImpl(Attributor &A) = 0;
3350 };
3351 
3352 /// Forward declarations of output streams for debug purposes.
3353 ///
3354 ///{
3355 raw_ostream &operator<<(raw_ostream &OS, const AbstractAttribute &AA);
3356 raw_ostream &operator<<(raw_ostream &OS, ChangeStatus S);
3357 raw_ostream &operator<<(raw_ostream &OS, IRPosition::Kind);
3358 raw_ostream &operator<<(raw_ostream &OS, const IRPosition &);
3359 raw_ostream &operator<<(raw_ostream &OS, const AbstractState &State);
3360 template <typename base_ty, base_ty BestState, base_ty WorstState>
3361 raw_ostream &
3362 operator<<(raw_ostream &OS,
3363            const IntegerStateBase<base_ty, BestState, WorstState> &S) {
3364   return OS << "(" << S.getKnown() << "-" << S.getAssumed() << ")"
3365             << static_cast<const AbstractState &>(S);
3366 }
3367 raw_ostream &operator<<(raw_ostream &OS, const IntegerRangeState &State);
3368 ///}
3369 
3370 struct AttributorPass : public PassInfoMixin<AttributorPass> {
3371   PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
3372 };
3373 struct AttributorCGSCCPass : public PassInfoMixin<AttributorCGSCCPass> {
3374   PreservedAnalyses run(LazyCallGraph::SCC &C, CGSCCAnalysisManager &AM,
3375                         LazyCallGraph &CG, CGSCCUpdateResult &UR);
3376 };
3377 
3378 /// Helper function to clamp a state \p S of type \p StateType with the
3379 /// information in \p R and indicate/return if \p S did change (as-in update is
3380 /// required to be run again).
3381 template <typename StateType>
3382 ChangeStatus clampStateAndIndicateChange(StateType &S, const StateType &R) {
3383   auto Assumed = S.getAssumed();
3384   S ^= R;
3385   return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED
3386                                    : ChangeStatus::CHANGED;
3387 }
3388 
3389 /// ----------------------------------------------------------------------------
3390 ///                       Abstract Attribute Classes
3391 /// ----------------------------------------------------------------------------
3392 
3393 struct AANoUnwind
3394     : public IRAttribute<Attribute::NoUnwind,
3395                          StateWrapper<BooleanState, AbstractAttribute>,
3396                          AANoUnwind> {
3397   AANoUnwind(const IRPosition &IRP, Attributor &A) : IRAttribute(IRP) {}
3398 
3399   /// Returns true if nounwind is assumed.
3400   bool isAssumedNoUnwind() const { return getAssumed(); }
3401 
3402   /// Returns true if nounwind is known.
3403   bool isKnownNoUnwind() const { return getKnown(); }
3404 
3405   /// Create an abstract attribute view for the position \p IRP.
3406   static AANoUnwind &createForPosition(const IRPosition &IRP, Attributor &A);
3407 
3408   /// See AbstractAttribute::getName()
3409   const std::string getName() const override { return "AANoUnwind"; }
3410 
3411   /// See AbstractAttribute::getIdAddr()
3412   const char *getIdAddr() const override { return &ID; }
3413 
3414   /// This function should return true if the type of the \p AA is AANoUnwind
3415   static bool classof(const AbstractAttribute *AA) {
3416     return (AA->getIdAddr() == &ID);
3417   }
3418 
3419   /// Unique ID (due to the unique address)
3420   static const char ID;
3421 };
3422 
3423 struct AANoSync
3424     : public IRAttribute<Attribute::NoSync,
3425                          StateWrapper<BooleanState, AbstractAttribute>,
3426                          AANoSync> {
3427   AANoSync(const IRPosition &IRP, Attributor &A) : IRAttribute(IRP) {}
3428 
3429   /// See AbstractAttribute::isValidIRPositionForInit
3430   static bool isValidIRPositionForInit(Attributor &A, const IRPosition &IRP) {
3431     if (!IRP.isFunctionScope() &&
3432         !IRP.getAssociatedType()->isPtrOrPtrVectorTy())
3433       return false;
3434     return IRAttribute::isValidIRPositionForInit(A, IRP);
3435   }
3436 
3437   /// Returns true if "nosync" is assumed.
3438   bool isAssumedNoSync() const { return getAssumed(); }
3439 
3440   /// Returns true if "nosync" is known.
3441   bool isKnownNoSync() const { return getKnown(); }
3442 
3443   /// Helper function used to determine whether an instruction is non-relaxed
3444   /// atomic. In other words, if an atomic instruction does not have unordered
3445   /// or monotonic ordering
3446   static bool isNonRelaxedAtomic(const Instruction *I);
3447 
3448   /// Helper function specific for intrinsics which are potentially volatile.
3449   static bool isNoSyncIntrinsic(const Instruction *I);
3450 
3451   /// Helper function to determine if \p CB is an aligned (GPU) barrier. Aligned
3452   /// barriers have to be executed by all threads. The flag \p ExecutedAligned
3453   /// indicates if the call is executed by all threads in a (thread) block in an
3454   /// aligned way. If that is the case, non-aligned barriers are effectively
3455   /// aligned barriers.
3456   static bool isAlignedBarrier(const CallBase &CB, bool ExecutedAligned);
3457 
3458   /// Create an abstract attribute view for the position \p IRP.
3459   static AANoSync &createForPosition(const IRPosition &IRP, Attributor &A);
3460 
3461   /// See AbstractAttribute::getName()
3462   const std::string getName() const override { return "AANoSync"; }
3463 
3464   /// See AbstractAttribute::getIdAddr()
3465   const char *getIdAddr() const override { return &ID; }
3466 
3467   /// This function should return true if the type of the \p AA is AANoSync
3468   static bool classof(const AbstractAttribute *AA) {
3469     return (AA->getIdAddr() == &ID);
3470   }
3471 
3472   /// Unique ID (due to the unique address)
3473   static const char ID;
3474 };
3475 
3476 /// An abstract interface for all nonnull attributes.
3477 struct AAMustProgress
3478     : public IRAttribute<Attribute::MustProgress,
3479                          StateWrapper<BooleanState, AbstractAttribute>,
3480                          AAMustProgress> {
3481   AAMustProgress(const IRPosition &IRP, Attributor &A) : IRAttribute(IRP) {}
3482 
3483   static bool isImpliedByIR(Attributor &A, const IRPosition &IRP,
3484                             Attribute::AttrKind ImpliedAttributeKind,
3485                             bool IgnoreSubsumingPositions = false) {
3486     // Note: This is also run for non-IPO amendable functions.
3487     assert(ImpliedAttributeKind == Attribute::MustProgress);
3488     return A.hasAttr(IRP, {Attribute::MustProgress, Attribute::WillReturn},
3489                      IgnoreSubsumingPositions, Attribute::MustProgress);
3490   }
3491 
3492   /// Return true if we assume that the underlying value is nonnull.
3493   bool isAssumedMustProgress() const { return getAssumed(); }
3494 
3495   /// Return true if we know that underlying value is nonnull.
3496   bool isKnownMustProgress() const { return getKnown(); }
3497 
3498   /// Create an abstract attribute view for the position \p IRP.
3499   static AAMustProgress &createForPosition(const IRPosition &IRP,
3500                                            Attributor &A);
3501 
3502   /// See AbstractAttribute::getName()
3503   const std::string getName() const override { return "AAMustProgress"; }
3504 
3505   /// See AbstractAttribute::getIdAddr()
3506   const char *getIdAddr() const override { return &ID; }
3507 
3508   /// This function should return true if the type of the \p AA is
3509   /// AAMustProgress
3510   static bool classof(const AbstractAttribute *AA) {
3511     return (AA->getIdAddr() == &ID);
3512   }
3513 
3514   /// Unique ID (due to the unique address)
3515   static const char ID;
3516 };
3517 
3518 /// An abstract interface for all nonnull attributes.
3519 struct AANonNull
3520     : public IRAttribute<Attribute::NonNull,
3521                          StateWrapper<BooleanState, AbstractAttribute>,
3522                          AANonNull> {
3523   AANonNull(const IRPosition &IRP, Attributor &A) : IRAttribute(IRP) {}
3524 
3525   /// See AbstractAttribute::hasTrivialInitializer.
3526   static bool hasTrivialInitializer() { return false; }
3527 
3528   /// See IRAttribute::isImpliedByUndef.
3529   /// Undef is not necessarily nonnull as nonnull + noundef would cause poison.
3530   /// Poison implies nonnull though.
3531   static bool isImpliedByUndef() { return false; }
3532 
3533   /// See AbstractAttribute::isValidIRPositionForInit
3534   static bool isValidIRPositionForInit(Attributor &A, const IRPosition &IRP) {
3535     if (!IRP.getAssociatedType()->isPtrOrPtrVectorTy())
3536       return false;
3537     return IRAttribute::isValidIRPositionForInit(A, IRP);
3538   }
3539 
3540   /// See AbstractAttribute::isImpliedByIR(...).
3541   static bool isImpliedByIR(Attributor &A, const IRPosition &IRP,
3542                             Attribute::AttrKind ImpliedAttributeKind,
3543                             bool IgnoreSubsumingPositions = false);
3544 
3545   /// Return true if we assume that the underlying value is nonnull.
3546   bool isAssumedNonNull() const { return getAssumed(); }
3547 
3548   /// Return true if we know that underlying value is nonnull.
3549   bool isKnownNonNull() const { return getKnown(); }
3550 
3551   /// Create an abstract attribute view for the position \p IRP.
3552   static AANonNull &createForPosition(const IRPosition &IRP, Attributor &A);
3553 
3554   /// See AbstractAttribute::getName()
3555   const std::string getName() const override { return "AANonNull"; }
3556 
3557   /// See AbstractAttribute::getIdAddr()
3558   const char *getIdAddr() const override { return &ID; }
3559 
3560   /// This function should return true if the type of the \p AA is AANonNull
3561   static bool classof(const AbstractAttribute *AA) {
3562     return (AA->getIdAddr() == &ID);
3563   }
3564 
3565   /// Unique ID (due to the unique address)
3566   static const char ID;
3567 };
3568 
3569 /// An abstract attribute for norecurse.
3570 struct AANoRecurse
3571     : public IRAttribute<Attribute::NoRecurse,
3572                          StateWrapper<BooleanState, AbstractAttribute>,
3573                          AANoRecurse> {
3574   AANoRecurse(const IRPosition &IRP, Attributor &A) : IRAttribute(IRP) {}
3575 
3576   /// Return true if "norecurse" is assumed.
3577   bool isAssumedNoRecurse() const { return getAssumed(); }
3578 
3579   /// Return true if "norecurse" is known.
3580   bool isKnownNoRecurse() const { return getKnown(); }
3581 
3582   /// Create an abstract attribute view for the position \p IRP.
3583   static AANoRecurse &createForPosition(const IRPosition &IRP, Attributor &A);
3584 
3585   /// See AbstractAttribute::getName()
3586   const std::string getName() const override { return "AANoRecurse"; }
3587 
3588   /// See AbstractAttribute::getIdAddr()
3589   const char *getIdAddr() const override { return &ID; }
3590 
3591   /// This function should return true if the type of the \p AA is AANoRecurse
3592   static bool classof(const AbstractAttribute *AA) {
3593     return (AA->getIdAddr() == &ID);
3594   }
3595 
3596   /// Unique ID (due to the unique address)
3597   static const char ID;
3598 };
3599 
3600 /// An abstract attribute for willreturn.
3601 struct AAWillReturn
3602     : public IRAttribute<Attribute::WillReturn,
3603                          StateWrapper<BooleanState, AbstractAttribute>,
3604                          AAWillReturn> {
3605   AAWillReturn(const IRPosition &IRP, Attributor &A) : IRAttribute(IRP) {}
3606 
3607   static bool isImpliedByIR(Attributor &A, const IRPosition &IRP,
3608                             Attribute::AttrKind ImpliedAttributeKind,
3609                             bool IgnoreSubsumingPositions = false) {
3610     // Note: This is also run for non-IPO amendable functions.
3611     assert(ImpliedAttributeKind == Attribute::WillReturn);
3612     if (IRAttribute::isImpliedByIR(A, IRP, ImpliedAttributeKind,
3613                                    IgnoreSubsumingPositions))
3614       return true;
3615     if (!isImpliedByMustprogressAndReadonly(A, IRP))
3616       return false;
3617     A.manifestAttrs(IRP, Attribute::get(IRP.getAnchorValue().getContext(),
3618                                         Attribute::WillReturn));
3619     return true;
3620   }
3621 
3622   /// Check for `mustprogress` and `readonly` as they imply `willreturn`.
3623   static bool isImpliedByMustprogressAndReadonly(Attributor &A,
3624                                                  const IRPosition &IRP) {
3625     // Check for `mustprogress` in the scope and the associated function which
3626     // might be different if this is a call site.
3627     if (!A.hasAttr(IRP, {Attribute::MustProgress}))
3628       return false;
3629 
3630     SmallVector<Attribute, 2> Attrs;
3631     A.getAttrs(IRP, {Attribute::Memory}, Attrs,
3632                /* IgnoreSubsumingPositions */ false);
3633 
3634     MemoryEffects ME = MemoryEffects::unknown();
3635     for (const Attribute &Attr : Attrs)
3636       ME &= Attr.getMemoryEffects();
3637     return ME.onlyReadsMemory();
3638   }
3639 
3640   /// Return true if "willreturn" is assumed.
3641   bool isAssumedWillReturn() const { return getAssumed(); }
3642 
3643   /// Return true if "willreturn" is known.
3644   bool isKnownWillReturn() const { return getKnown(); }
3645 
3646   /// Create an abstract attribute view for the position \p IRP.
3647   static AAWillReturn &createForPosition(const IRPosition &IRP, Attributor &A);
3648 
3649   /// See AbstractAttribute::getName()
3650   const std::string getName() const override { return "AAWillReturn"; }
3651 
3652   /// See AbstractAttribute::getIdAddr()
3653   const char *getIdAddr() const override { return &ID; }
3654 
3655   /// This function should return true if the type of the \p AA is AAWillReturn
3656   static bool classof(const AbstractAttribute *AA) {
3657     return (AA->getIdAddr() == &ID);
3658   }
3659 
3660   /// Unique ID (due to the unique address)
3661   static const char ID;
3662 };
3663 
3664 /// An abstract attribute for undefined behavior.
3665 struct AAUndefinedBehavior
3666     : public StateWrapper<BooleanState, AbstractAttribute> {
3667   using Base = StateWrapper<BooleanState, AbstractAttribute>;
3668   AAUndefinedBehavior(const IRPosition &IRP, Attributor &A) : Base(IRP) {}
3669 
3670   /// Return true if "undefined behavior" is assumed.
3671   bool isAssumedToCauseUB() const { return getAssumed(); }
3672 
3673   /// Return true if "undefined behavior" is assumed for a specific instruction.
3674   virtual bool isAssumedToCauseUB(Instruction *I) const = 0;
3675 
3676   /// Return true if "undefined behavior" is known.
3677   bool isKnownToCauseUB() const { return getKnown(); }
3678 
3679   /// Return true if "undefined behavior" is known for a specific instruction.
3680   virtual bool isKnownToCauseUB(Instruction *I) const = 0;
3681 
3682   /// Create an abstract attribute view for the position \p IRP.
3683   static AAUndefinedBehavior &createForPosition(const IRPosition &IRP,
3684                                                 Attributor &A);
3685 
3686   /// See AbstractAttribute::getName()
3687   const std::string getName() const override { return "AAUndefinedBehavior"; }
3688 
3689   /// See AbstractAttribute::getIdAddr()
3690   const char *getIdAddr() const override { return &ID; }
3691 
3692   /// This function should return true if the type of the \p AA is
3693   /// AAUndefineBehavior
3694   static bool classof(const AbstractAttribute *AA) {
3695     return (AA->getIdAddr() == &ID);
3696   }
3697 
3698   /// Unique ID (due to the unique address)
3699   static const char ID;
3700 };
3701 
3702 /// An abstract interface to determine reachability of point A to B.
3703 struct AAIntraFnReachability
3704     : public StateWrapper<BooleanState, AbstractAttribute> {
3705   using Base = StateWrapper<BooleanState, AbstractAttribute>;
3706   AAIntraFnReachability(const IRPosition &IRP, Attributor &A) : Base(IRP) {}
3707 
3708   /// Returns true if 'From' instruction is assumed to reach, 'To' instruction.
3709   /// Users should provide two positions they are interested in, and the class
3710   /// determines (and caches) reachability.
3711   virtual bool isAssumedReachable(
3712       Attributor &A, const Instruction &From, const Instruction &To,
3713       const AA::InstExclusionSetTy *ExclusionSet = nullptr) const = 0;
3714 
3715   /// Create an abstract attribute view for the position \p IRP.
3716   static AAIntraFnReachability &createForPosition(const IRPosition &IRP,
3717                                                   Attributor &A);
3718 
3719   /// See AbstractAttribute::getName()
3720   const std::string getName() const override { return "AAIntraFnReachability"; }
3721 
3722   /// See AbstractAttribute::getIdAddr()
3723   const char *getIdAddr() const override { return &ID; }
3724 
3725   /// This function should return true if the type of the \p AA is
3726   /// AAIntraFnReachability
3727   static bool classof(const AbstractAttribute *AA) {
3728     return (AA->getIdAddr() == &ID);
3729   }
3730 
3731   /// Unique ID (due to the unique address)
3732   static const char ID;
3733 };
3734 
3735 /// An abstract interface for all noalias attributes.
3736 struct AANoAlias
3737     : public IRAttribute<Attribute::NoAlias,
3738                          StateWrapper<BooleanState, AbstractAttribute>,
3739                          AANoAlias> {
3740   AANoAlias(const IRPosition &IRP, Attributor &A) : IRAttribute(IRP) {}
3741 
3742   /// See AbstractAttribute::isValidIRPositionForInit
3743   static bool isValidIRPositionForInit(Attributor &A, const IRPosition &IRP) {
3744     if (!IRP.getAssociatedType()->isPtrOrPtrVectorTy())
3745       return false;
3746     return IRAttribute::isValidIRPositionForInit(A, IRP);
3747   }
3748 
3749   /// See IRAttribute::isImpliedByIR
3750   static bool isImpliedByIR(Attributor &A, const IRPosition &IRP,
3751                             Attribute::AttrKind ImpliedAttributeKind,
3752                             bool IgnoreSubsumingPositions = false);
3753 
3754   /// See AbstractAttribute::requiresCalleeForCallBase
3755   static bool requiresCalleeForCallBase() { return false; }
3756 
3757   /// See AbstractAttribute::requiresCallersForArgOrFunction
3758   static bool requiresCallersForArgOrFunction() { return true; }
3759 
3760   /// Return true if we assume that the underlying value is alias.
3761   bool isAssumedNoAlias() const { return getAssumed(); }
3762 
3763   /// Return true if we know that underlying value is noalias.
3764   bool isKnownNoAlias() const { return getKnown(); }
3765 
3766   /// Create an abstract attribute view for the position \p IRP.
3767   static AANoAlias &createForPosition(const IRPosition &IRP, Attributor &A);
3768 
3769   /// See AbstractAttribute::getName()
3770   const std::string getName() const override { return "AANoAlias"; }
3771 
3772   /// See AbstractAttribute::getIdAddr()
3773   const char *getIdAddr() const override { return &ID; }
3774 
3775   /// This function should return true if the type of the \p AA is AANoAlias
3776   static bool classof(const AbstractAttribute *AA) {
3777     return (AA->getIdAddr() == &ID);
3778   }
3779 
3780   /// Unique ID (due to the unique address)
3781   static const char ID;
3782 };
3783 
3784 /// An AbstractAttribute for nofree.
3785 struct AANoFree
3786     : public IRAttribute<Attribute::NoFree,
3787                          StateWrapper<BooleanState, AbstractAttribute>,
3788                          AANoFree> {
3789   AANoFree(const IRPosition &IRP, Attributor &A) : IRAttribute(IRP) {}
3790 
3791   /// See IRAttribute::isImpliedByIR
3792   static bool isImpliedByIR(Attributor &A, const IRPosition &IRP,
3793                             Attribute::AttrKind ImpliedAttributeKind,
3794                             bool IgnoreSubsumingPositions = false) {
3795     // Note: This is also run for non-IPO amendable functions.
3796     assert(ImpliedAttributeKind == Attribute::NoFree);
3797     return A.hasAttr(
3798         IRP, {Attribute::ReadNone, Attribute::ReadOnly, Attribute::NoFree},
3799         IgnoreSubsumingPositions, Attribute::NoFree);
3800   }
3801 
3802   /// See AbstractAttribute::isValidIRPositionForInit
3803   static bool isValidIRPositionForInit(Attributor &A, const IRPosition &IRP) {
3804     if (!IRP.isFunctionScope() &&
3805         !IRP.getAssociatedType()->isPtrOrPtrVectorTy())
3806       return false;
3807     return IRAttribute::isValidIRPositionForInit(A, IRP);
3808   }
3809 
3810   /// Return true if "nofree" is assumed.
3811   bool isAssumedNoFree() const { return getAssumed(); }
3812 
3813   /// Return true if "nofree" is known.
3814   bool isKnownNoFree() const { return getKnown(); }
3815 
3816   /// Create an abstract attribute view for the position \p IRP.
3817   static AANoFree &createForPosition(const IRPosition &IRP, Attributor &A);
3818 
3819   /// See AbstractAttribute::getName()
3820   const std::string getName() const override { return "AANoFree"; }
3821 
3822   /// See AbstractAttribute::getIdAddr()
3823   const char *getIdAddr() const override { return &ID; }
3824 
3825   /// This function should return true if the type of the \p AA is AANoFree
3826   static bool classof(const AbstractAttribute *AA) {
3827     return (AA->getIdAddr() == &ID);
3828   }
3829 
3830   /// Unique ID (due to the unique address)
3831   static const char ID;
3832 };
3833 
3834 /// An AbstractAttribute for noreturn.
3835 struct AANoReturn
3836     : public IRAttribute<Attribute::NoReturn,
3837                          StateWrapper<BooleanState, AbstractAttribute>,
3838                          AANoReturn> {
3839   AANoReturn(const IRPosition &IRP, Attributor &A) : IRAttribute(IRP) {}
3840 
3841   /// Return true if the underlying object is assumed to never return.
3842   bool isAssumedNoReturn() const { return getAssumed(); }
3843 
3844   /// Return true if the underlying object is known to never return.
3845   bool isKnownNoReturn() const { return getKnown(); }
3846 
3847   /// Create an abstract attribute view for the position \p IRP.
3848   static AANoReturn &createForPosition(const IRPosition &IRP, Attributor &A);
3849 
3850   /// See AbstractAttribute::getName()
3851   const std::string getName() const override { return "AANoReturn"; }
3852 
3853   /// See AbstractAttribute::getIdAddr()
3854   const char *getIdAddr() const override { return &ID; }
3855 
3856   /// This function should return true if the type of the \p AA is AANoReturn
3857   static bool classof(const AbstractAttribute *AA) {
3858     return (AA->getIdAddr() == &ID);
3859   }
3860 
3861   /// Unique ID (due to the unique address)
3862   static const char ID;
3863 };
3864 
3865 /// An abstract interface for liveness abstract attribute.
3866 struct AAIsDead
3867     : public StateWrapper<BitIntegerState<uint8_t, 3, 0>, AbstractAttribute> {
3868   using Base = StateWrapper<BitIntegerState<uint8_t, 3, 0>, AbstractAttribute>;
3869   AAIsDead(const IRPosition &IRP, Attributor &A) : Base(IRP) {}
3870 
3871   /// State encoding bits. A set bit in the state means the property holds.
3872   enum {
3873     HAS_NO_EFFECT = 1 << 0,
3874     IS_REMOVABLE = 1 << 1,
3875 
3876     IS_DEAD = HAS_NO_EFFECT | IS_REMOVABLE,
3877   };
3878   static_assert(IS_DEAD == getBestState(), "Unexpected BEST_STATE value");
3879 
3880 protected:
3881   /// The query functions are protected such that other attributes need to go
3882   /// through the Attributor interfaces: `Attributor::isAssumedDead(...)`
3883 
3884   /// Returns true if the underlying value is assumed dead.
3885   virtual bool isAssumedDead() const = 0;
3886 
3887   /// Returns true if the underlying value is known dead.
3888   virtual bool isKnownDead() const = 0;
3889 
3890   /// Returns true if \p BB is known dead.
3891   virtual bool isKnownDead(const BasicBlock *BB) const = 0;
3892 
3893   /// Returns true if \p I is assumed dead.
3894   virtual bool isAssumedDead(const Instruction *I) const = 0;
3895 
3896   /// Returns true if \p I is known dead.
3897   virtual bool isKnownDead(const Instruction *I) const = 0;
3898 
3899   /// Return true if the underlying value is a store that is known to be
3900   /// removable. This is different from dead stores as the removable store
3901   /// can have an effect on live values, especially loads, but that effect
3902   /// is propagated which allows us to remove the store in turn.
3903   virtual bool isRemovableStore() const { return false; }
3904 
3905   /// This method is used to check if at least one instruction in a collection
3906   /// of instructions is live.
3907   template <typename T> bool isLiveInstSet(T begin, T end) const {
3908     for (const auto &I : llvm::make_range(begin, end)) {
3909       assert(I->getFunction() == getIRPosition().getAssociatedFunction() &&
3910              "Instruction must be in the same anchor scope function.");
3911 
3912       if (!isAssumedDead(I))
3913         return true;
3914     }
3915 
3916     return false;
3917   }
3918 
3919 public:
3920   /// Create an abstract attribute view for the position \p IRP.
3921   static AAIsDead &createForPosition(const IRPosition &IRP, Attributor &A);
3922 
3923   /// Determine if \p F might catch asynchronous exceptions.
3924   static bool mayCatchAsynchronousExceptions(const Function &F) {
3925     return F.hasPersonalityFn() && !canSimplifyInvokeNoUnwind(&F);
3926   }
3927 
3928   /// Returns true if \p BB is assumed dead.
3929   virtual bool isAssumedDead(const BasicBlock *BB) const = 0;
3930 
3931   /// Return if the edge from \p From BB to \p To BB is assumed dead.
3932   /// This is specifically useful in AAReachability.
3933   virtual bool isEdgeDead(const BasicBlock *From, const BasicBlock *To) const {
3934     return false;
3935   }
3936 
3937   /// See AbstractAttribute::getName()
3938   const std::string getName() const override { return "AAIsDead"; }
3939 
3940   /// See AbstractAttribute::getIdAddr()
3941   const char *getIdAddr() const override { return &ID; }
3942 
3943   /// This function should return true if the type of the \p AA is AAIsDead
3944   static bool classof(const AbstractAttribute *AA) {
3945     return (AA->getIdAddr() == &ID);
3946   }
3947 
3948   /// Unique ID (due to the unique address)
3949   static const char ID;
3950 
3951   friend struct Attributor;
3952 };
3953 
3954 /// State for dereferenceable attribute
3955 struct DerefState : AbstractState {
3956 
3957   static DerefState getBestState() { return DerefState(); }
3958   static DerefState getBestState(const DerefState &) { return getBestState(); }
3959 
3960   /// Return the worst possible representable state.
3961   static DerefState getWorstState() {
3962     DerefState DS;
3963     DS.indicatePessimisticFixpoint();
3964     return DS;
3965   }
3966   static DerefState getWorstState(const DerefState &) {
3967     return getWorstState();
3968   }
3969 
3970   /// State representing for dereferenceable bytes.
3971   IncIntegerState<> DerefBytesState;
3972 
3973   /// Map representing for accessed memory offsets and sizes.
3974   /// A key is Offset and a value is size.
3975   /// If there is a load/store instruction something like,
3976   ///   p[offset] = v;
3977   /// (offset, sizeof(v)) will be inserted to this map.
3978   /// std::map is used because we want to iterate keys in ascending order.
3979   std::map<int64_t, uint64_t> AccessedBytesMap;
3980 
3981   /// Helper function to calculate dereferenceable bytes from current known
3982   /// bytes and accessed bytes.
3983   ///
3984   /// int f(int *A){
3985   ///    *A = 0;
3986   ///    *(A+2) = 2;
3987   ///    *(A+1) = 1;
3988   ///    *(A+10) = 10;
3989   /// }
3990   /// ```
3991   /// In that case, AccessedBytesMap is `{0:4, 4:4, 8:4, 40:4}`.
3992   /// AccessedBytesMap is std::map so it is iterated in accending order on
3993   /// key(Offset). So KnownBytes will be updated like this:
3994   ///
3995   /// |Access | KnownBytes
3996   /// |(0, 4)| 0 -> 4
3997   /// |(4, 4)| 4 -> 8
3998   /// |(8, 4)| 8 -> 12
3999   /// |(40, 4) | 12 (break)
4000   void computeKnownDerefBytesFromAccessedMap() {
4001     int64_t KnownBytes = DerefBytesState.getKnown();
4002     for (auto &Access : AccessedBytesMap) {
4003       if (KnownBytes < Access.first)
4004         break;
4005       KnownBytes = std::max(KnownBytes, Access.first + (int64_t)Access.second);
4006     }
4007 
4008     DerefBytesState.takeKnownMaximum(KnownBytes);
4009   }
4010 
4011   /// State representing that whether the value is globaly dereferenceable.
4012   BooleanState GlobalState;
4013 
4014   /// See AbstractState::isValidState()
4015   bool isValidState() const override { return DerefBytesState.isValidState(); }
4016 
4017   /// See AbstractState::isAtFixpoint()
4018   bool isAtFixpoint() const override {
4019     return !isValidState() ||
4020            (DerefBytesState.isAtFixpoint() && GlobalState.isAtFixpoint());
4021   }
4022 
4023   /// See AbstractState::indicateOptimisticFixpoint(...)
4024   ChangeStatus indicateOptimisticFixpoint() override {
4025     DerefBytesState.indicateOptimisticFixpoint();
4026     GlobalState.indicateOptimisticFixpoint();
4027     return ChangeStatus::UNCHANGED;
4028   }
4029 
4030   /// See AbstractState::indicatePessimisticFixpoint(...)
4031   ChangeStatus indicatePessimisticFixpoint() override {
4032     DerefBytesState.indicatePessimisticFixpoint();
4033     GlobalState.indicatePessimisticFixpoint();
4034     return ChangeStatus::CHANGED;
4035   }
4036 
4037   /// Update known dereferenceable bytes.
4038   void takeKnownDerefBytesMaximum(uint64_t Bytes) {
4039     DerefBytesState.takeKnownMaximum(Bytes);
4040 
4041     // Known bytes might increase.
4042     computeKnownDerefBytesFromAccessedMap();
4043   }
4044 
4045   /// Update assumed dereferenceable bytes.
4046   void takeAssumedDerefBytesMinimum(uint64_t Bytes) {
4047     DerefBytesState.takeAssumedMinimum(Bytes);
4048   }
4049 
4050   /// Add accessed bytes to the map.
4051   void addAccessedBytes(int64_t Offset, uint64_t Size) {
4052     uint64_t &AccessedBytes = AccessedBytesMap[Offset];
4053     AccessedBytes = std::max(AccessedBytes, Size);
4054 
4055     // Known bytes might increase.
4056     computeKnownDerefBytesFromAccessedMap();
4057   }
4058 
4059   /// Equality for DerefState.
4060   bool operator==(const DerefState &R) const {
4061     return this->DerefBytesState == R.DerefBytesState &&
4062            this->GlobalState == R.GlobalState;
4063   }
4064 
4065   /// Inequality for DerefState.
4066   bool operator!=(const DerefState &R) const { return !(*this == R); }
4067 
4068   /// See IntegerStateBase::operator^=
4069   DerefState operator^=(const DerefState &R) {
4070     DerefBytesState ^= R.DerefBytesState;
4071     GlobalState ^= R.GlobalState;
4072     return *this;
4073   }
4074 
4075   /// See IntegerStateBase::operator+=
4076   DerefState operator+=(const DerefState &R) {
4077     DerefBytesState += R.DerefBytesState;
4078     GlobalState += R.GlobalState;
4079     return *this;
4080   }
4081 
4082   /// See IntegerStateBase::operator&=
4083   DerefState operator&=(const DerefState &R) {
4084     DerefBytesState &= R.DerefBytesState;
4085     GlobalState &= R.GlobalState;
4086     return *this;
4087   }
4088 
4089   /// See IntegerStateBase::operator|=
4090   DerefState operator|=(const DerefState &R) {
4091     DerefBytesState |= R.DerefBytesState;
4092     GlobalState |= R.GlobalState;
4093     return *this;
4094   }
4095 };
4096 
4097 /// An abstract interface for all dereferenceable attribute.
4098 struct AADereferenceable
4099     : public IRAttribute<Attribute::Dereferenceable,
4100                          StateWrapper<DerefState, AbstractAttribute>,
4101                          AADereferenceable> {
4102   AADereferenceable(const IRPosition &IRP, Attributor &A) : IRAttribute(IRP) {}
4103 
4104   /// See AbstractAttribute::isValidIRPositionForInit
4105   static bool isValidIRPositionForInit(Attributor &A, const IRPosition &IRP) {
4106     if (!IRP.getAssociatedType()->isPtrOrPtrVectorTy())
4107       return false;
4108     return IRAttribute::isValidIRPositionForInit(A, IRP);
4109   }
4110 
4111   /// Return true if we assume that underlying value is
4112   /// dereferenceable(_or_null) globally.
4113   bool isAssumedGlobal() const { return GlobalState.getAssumed(); }
4114 
4115   /// Return true if we know that underlying value is
4116   /// dereferenceable(_or_null) globally.
4117   bool isKnownGlobal() const { return GlobalState.getKnown(); }
4118 
4119   /// Return assumed dereferenceable bytes.
4120   uint32_t getAssumedDereferenceableBytes() const {
4121     return DerefBytesState.getAssumed();
4122   }
4123 
4124   /// Return known dereferenceable bytes.
4125   uint32_t getKnownDereferenceableBytes() const {
4126     return DerefBytesState.getKnown();
4127   }
4128 
4129   /// Create an abstract attribute view for the position \p IRP.
4130   static AADereferenceable &createForPosition(const IRPosition &IRP,
4131                                               Attributor &A);
4132 
4133   /// See AbstractAttribute::getName()
4134   const std::string getName() const override { return "AADereferenceable"; }
4135 
4136   /// See AbstractAttribute::getIdAddr()
4137   const char *getIdAddr() const override { return &ID; }
4138 
4139   /// This function should return true if the type of the \p AA is
4140   /// AADereferenceable
4141   static bool classof(const AbstractAttribute *AA) {
4142     return (AA->getIdAddr() == &ID);
4143   }
4144 
4145   /// Unique ID (due to the unique address)
4146   static const char ID;
4147 };
4148 
4149 using AAAlignmentStateType =
4150     IncIntegerState<uint64_t, Value::MaximumAlignment, 1>;
4151 /// An abstract interface for all align attributes.
4152 struct AAAlign
4153     : public IRAttribute<Attribute::Alignment,
4154                          StateWrapper<AAAlignmentStateType, AbstractAttribute>,
4155                          AAAlign> {
4156   AAAlign(const IRPosition &IRP, Attributor &A) : IRAttribute(IRP) {}
4157 
4158   /// See AbstractAttribute::isValidIRPositionForInit
4159   static bool isValidIRPositionForInit(Attributor &A, const IRPosition &IRP) {
4160     if (!IRP.getAssociatedType()->isPtrOrPtrVectorTy())
4161       return false;
4162     return IRAttribute::isValidIRPositionForInit(A, IRP);
4163   }
4164 
4165   /// Return assumed alignment.
4166   Align getAssumedAlign() const { return Align(getAssumed()); }
4167 
4168   /// Return known alignment.
4169   Align getKnownAlign() const { return Align(getKnown()); }
4170 
4171   /// See AbstractAttribute::getName()
4172   const std::string getName() const override { return "AAAlign"; }
4173 
4174   /// See AbstractAttribute::getIdAddr()
4175   const char *getIdAddr() const override { return &ID; }
4176 
4177   /// This function should return true if the type of the \p AA is AAAlign
4178   static bool classof(const AbstractAttribute *AA) {
4179     return (AA->getIdAddr() == &ID);
4180   }
4181 
4182   /// Create an abstract attribute view for the position \p IRP.
4183   static AAAlign &createForPosition(const IRPosition &IRP, Attributor &A);
4184 
4185   /// Unique ID (due to the unique address)
4186   static const char ID;
4187 };
4188 
4189 /// An abstract interface to track if a value leaves it's defining function
4190 /// instance.
4191 /// TODO: We should make it a ternary AA tracking uniqueness, and uniqueness
4192 /// wrt. the Attributor analysis separately.
4193 struct AAInstanceInfo : public StateWrapper<BooleanState, AbstractAttribute> {
4194   AAInstanceInfo(const IRPosition &IRP, Attributor &A)
4195       : StateWrapper<BooleanState, AbstractAttribute>(IRP) {}
4196 
4197   /// Return true if we know that the underlying value is unique in its scope
4198   /// wrt. the Attributor analysis. That means it might not be unique but we can
4199   /// still use pointer equality without risking to represent two instances with
4200   /// one `llvm::Value`.
4201   bool isKnownUniqueForAnalysis() const { return isKnown(); }
4202 
4203   /// Return true if we assume that the underlying value is unique in its scope
4204   /// wrt. the Attributor analysis. That means it might not be unique but we can
4205   /// still use pointer equality without risking to represent two instances with
4206   /// one `llvm::Value`.
4207   bool isAssumedUniqueForAnalysis() const { return isAssumed(); }
4208 
4209   /// Create an abstract attribute view for the position \p IRP.
4210   static AAInstanceInfo &createForPosition(const IRPosition &IRP,
4211                                            Attributor &A);
4212 
4213   /// See AbstractAttribute::getName()
4214   const std::string getName() const override { return "AAInstanceInfo"; }
4215 
4216   /// See AbstractAttribute::getIdAddr()
4217   const char *getIdAddr() const override { return &ID; }
4218 
4219   /// This function should return true if the type of the \p AA is
4220   /// AAInstanceInfo
4221   static bool classof(const AbstractAttribute *AA) {
4222     return (AA->getIdAddr() == &ID);
4223   }
4224 
4225   /// Unique ID (due to the unique address)
4226   static const char ID;
4227 };
4228 
4229 /// An abstract interface for all nocapture attributes.
4230 struct AANoCapture
4231     : public IRAttribute<
4232           Attribute::NoCapture,
4233           StateWrapper<BitIntegerState<uint16_t, 7, 0>, AbstractAttribute>,
4234           AANoCapture> {
4235   AANoCapture(const IRPosition &IRP, Attributor &A) : IRAttribute(IRP) {}
4236 
4237   /// See IRAttribute::isImpliedByIR
4238   static bool isImpliedByIR(Attributor &A, const IRPosition &IRP,
4239                             Attribute::AttrKind ImpliedAttributeKind,
4240                             bool IgnoreSubsumingPositions = false);
4241 
4242   /// Update \p State according to the capture capabilities of \p F for position
4243   /// \p IRP.
4244   static void determineFunctionCaptureCapabilities(const IRPosition &IRP,
4245                                                    const Function &F,
4246                                                    BitIntegerState &State);
4247 
4248   /// See AbstractAttribute::isValidIRPositionForInit
4249   static bool isValidIRPositionForInit(Attributor &A, const IRPosition &IRP) {
4250     if (!IRP.getAssociatedType()->isPtrOrPtrVectorTy())
4251       return false;
4252     return IRAttribute::isValidIRPositionForInit(A, IRP);
4253   }
4254 
4255   /// State encoding bits. A set bit in the state means the property holds.
4256   /// NO_CAPTURE is the best possible state, 0 the worst possible state.
4257   enum {
4258     NOT_CAPTURED_IN_MEM = 1 << 0,
4259     NOT_CAPTURED_IN_INT = 1 << 1,
4260     NOT_CAPTURED_IN_RET = 1 << 2,
4261 
4262     /// If we do not capture the value in memory or through integers we can only
4263     /// communicate it back as a derived pointer.
4264     NO_CAPTURE_MAYBE_RETURNED = NOT_CAPTURED_IN_MEM | NOT_CAPTURED_IN_INT,
4265 
4266     /// If we do not capture the value in memory, through integers, or as a
4267     /// derived pointer we know it is not captured.
4268     NO_CAPTURE =
4269         NOT_CAPTURED_IN_MEM | NOT_CAPTURED_IN_INT | NOT_CAPTURED_IN_RET,
4270   };
4271 
4272   /// Return true if we know that the underlying value is not captured in its
4273   /// respective scope.
4274   bool isKnownNoCapture() const { return isKnown(NO_CAPTURE); }
4275 
4276   /// Return true if we assume that the underlying value is not captured in its
4277   /// respective scope.
4278   bool isAssumedNoCapture() const { return isAssumed(NO_CAPTURE); }
4279 
4280   /// Return true if we know that the underlying value is not captured in its
4281   /// respective scope but we allow it to escape through a "return".
4282   bool isKnownNoCaptureMaybeReturned() const {
4283     return isKnown(NO_CAPTURE_MAYBE_RETURNED);
4284   }
4285 
4286   /// Return true if we assume that the underlying value is not captured in its
4287   /// respective scope but we allow it to escape through a "return".
4288   bool isAssumedNoCaptureMaybeReturned() const {
4289     return isAssumed(NO_CAPTURE_MAYBE_RETURNED);
4290   }
4291 
4292   /// Create an abstract attribute view for the position \p IRP.
4293   static AANoCapture &createForPosition(const IRPosition &IRP, Attributor &A);
4294 
4295   /// See AbstractAttribute::getName()
4296   const std::string getName() const override { return "AANoCapture"; }
4297 
4298   /// See AbstractAttribute::getIdAddr()
4299   const char *getIdAddr() const override { return &ID; }
4300 
4301   /// This function should return true if the type of the \p AA is AANoCapture
4302   static bool classof(const AbstractAttribute *AA) {
4303     return (AA->getIdAddr() == &ID);
4304   }
4305 
4306   /// Unique ID (due to the unique address)
4307   static const char ID;
4308 };
4309 
4310 struct ValueSimplifyStateType : public AbstractState {
4311 
4312   ValueSimplifyStateType(Type *Ty) : Ty(Ty) {}
4313 
4314   static ValueSimplifyStateType getBestState(Type *Ty) {
4315     return ValueSimplifyStateType(Ty);
4316   }
4317   static ValueSimplifyStateType getBestState(const ValueSimplifyStateType &VS) {
4318     return getBestState(VS.Ty);
4319   }
4320 
4321   /// Return the worst possible representable state.
4322   static ValueSimplifyStateType getWorstState(Type *Ty) {
4323     ValueSimplifyStateType DS(Ty);
4324     DS.indicatePessimisticFixpoint();
4325     return DS;
4326   }
4327   static ValueSimplifyStateType
4328   getWorstState(const ValueSimplifyStateType &VS) {
4329     return getWorstState(VS.Ty);
4330   }
4331 
4332   /// See AbstractState::isValidState(...)
4333   bool isValidState() const override { return BS.isValidState(); }
4334 
4335   /// See AbstractState::isAtFixpoint(...)
4336   bool isAtFixpoint() const override { return BS.isAtFixpoint(); }
4337 
4338   /// Return the assumed state encoding.
4339   ValueSimplifyStateType getAssumed() { return *this; }
4340   const ValueSimplifyStateType &getAssumed() const { return *this; }
4341 
4342   /// See AbstractState::indicatePessimisticFixpoint(...)
4343   ChangeStatus indicatePessimisticFixpoint() override {
4344     return BS.indicatePessimisticFixpoint();
4345   }
4346 
4347   /// See AbstractState::indicateOptimisticFixpoint(...)
4348   ChangeStatus indicateOptimisticFixpoint() override {
4349     return BS.indicateOptimisticFixpoint();
4350   }
4351 
4352   /// "Clamp" this state with \p PVS.
4353   ValueSimplifyStateType operator^=(const ValueSimplifyStateType &VS) {
4354     BS ^= VS.BS;
4355     unionAssumed(VS.SimplifiedAssociatedValue);
4356     return *this;
4357   }
4358 
4359   bool operator==(const ValueSimplifyStateType &RHS) const {
4360     if (isValidState() != RHS.isValidState())
4361       return false;
4362     if (!isValidState() && !RHS.isValidState())
4363       return true;
4364     return SimplifiedAssociatedValue == RHS.SimplifiedAssociatedValue;
4365   }
4366 
4367 protected:
4368   /// The type of the original value.
4369   Type *Ty;
4370 
4371   /// Merge \p Other into the currently assumed simplified value
4372   bool unionAssumed(std::optional<Value *> Other);
4373 
4374   /// Helper to track validity and fixpoint
4375   BooleanState BS;
4376 
4377   /// An assumed simplified value. Initially, it is set to std::nullopt, which
4378   /// means that the value is not clear under current assumption. If in the
4379   /// pessimistic state, getAssumedSimplifiedValue doesn't return this value but
4380   /// returns orignal associated value.
4381   std::optional<Value *> SimplifiedAssociatedValue;
4382 };
4383 
4384 /// An abstract interface for value simplify abstract attribute.
4385 struct AAValueSimplify
4386     : public StateWrapper<ValueSimplifyStateType, AbstractAttribute, Type *> {
4387   using Base = StateWrapper<ValueSimplifyStateType, AbstractAttribute, Type *>;
4388   AAValueSimplify(const IRPosition &IRP, Attributor &A)
4389       : Base(IRP, IRP.getAssociatedType()) {}
4390 
4391   /// Create an abstract attribute view for the position \p IRP.
4392   static AAValueSimplify &createForPosition(const IRPosition &IRP,
4393                                             Attributor &A);
4394 
4395   /// See AbstractAttribute::getName()
4396   const std::string getName() const override { return "AAValueSimplify"; }
4397 
4398   /// See AbstractAttribute::getIdAddr()
4399   const char *getIdAddr() const override { return &ID; }
4400 
4401   /// This function should return true if the type of the \p AA is
4402   /// AAValueSimplify
4403   static bool classof(const AbstractAttribute *AA) {
4404     return (AA->getIdAddr() == &ID);
4405   }
4406 
4407   /// Unique ID (due to the unique address)
4408   static const char ID;
4409 
4410 private:
4411   /// Return an assumed simplified value if a single candidate is found. If
4412   /// there cannot be one, return original value. If it is not clear yet, return
4413   /// std::nullopt.
4414   ///
4415   /// Use `Attributor::getAssumedSimplified` for value simplification.
4416   virtual std::optional<Value *>
4417   getAssumedSimplifiedValue(Attributor &A) const = 0;
4418 
4419   friend struct Attributor;
4420 };
4421 
4422 struct AAHeapToStack : public StateWrapper<BooleanState, AbstractAttribute> {
4423   using Base = StateWrapper<BooleanState, AbstractAttribute>;
4424   AAHeapToStack(const IRPosition &IRP, Attributor &A) : Base(IRP) {}
4425 
4426   /// Returns true if HeapToStack conversion is assumed to be possible.
4427   virtual bool isAssumedHeapToStack(const CallBase &CB) const = 0;
4428 
4429   /// Returns true if HeapToStack conversion is assumed and the CB is a
4430   /// callsite to a free operation to be removed.
4431   virtual bool isAssumedHeapToStackRemovedFree(CallBase &CB) const = 0;
4432 
4433   /// Create an abstract attribute view for the position \p IRP.
4434   static AAHeapToStack &createForPosition(const IRPosition &IRP, Attributor &A);
4435 
4436   /// See AbstractAttribute::getName()
4437   const std::string getName() const override { return "AAHeapToStack"; }
4438 
4439   /// See AbstractAttribute::getIdAddr()
4440   const char *getIdAddr() const override { return &ID; }
4441 
4442   /// This function should return true if the type of the \p AA is AAHeapToStack
4443   static bool classof(const AbstractAttribute *AA) {
4444     return (AA->getIdAddr() == &ID);
4445   }
4446 
4447   /// Unique ID (due to the unique address)
4448   static const char ID;
4449 };
4450 
4451 /// An abstract interface for privatizability.
4452 ///
4453 /// A pointer is privatizable if it can be replaced by a new, private one.
4454 /// Privatizing pointer reduces the use count, interaction between unrelated
4455 /// code parts.
4456 ///
4457 /// In order for a pointer to be privatizable its value cannot be observed
4458 /// (=nocapture), it is (for now) not written (=readonly & noalias), we know
4459 /// what values are necessary to make the private copy look like the original
4460 /// one, and the values we need can be loaded (=dereferenceable).
4461 struct AAPrivatizablePtr
4462     : public StateWrapper<BooleanState, AbstractAttribute> {
4463   using Base = StateWrapper<BooleanState, AbstractAttribute>;
4464   AAPrivatizablePtr(const IRPosition &IRP, Attributor &A) : Base(IRP) {}
4465 
4466   /// See AbstractAttribute::isValidIRPositionForInit
4467   static bool isValidIRPositionForInit(Attributor &A, const IRPosition &IRP) {
4468     if (!IRP.getAssociatedType()->isPtrOrPtrVectorTy())
4469       return false;
4470     return AbstractAttribute::isValidIRPositionForInit(A, IRP);
4471   }
4472 
4473   /// Returns true if pointer privatization is assumed to be possible.
4474   bool isAssumedPrivatizablePtr() const { return getAssumed(); }
4475 
4476   /// Returns true if pointer privatization is known to be possible.
4477   bool isKnownPrivatizablePtr() const { return getKnown(); }
4478 
4479   /// See AbstractAttribute::requiresCallersForArgOrFunction
4480   static bool requiresCallersForArgOrFunction() { return true; }
4481 
4482   /// Return the type we can choose for a private copy of the underlying
4483   /// value. std::nullopt means it is not clear yet, nullptr means there is
4484   /// none.
4485   virtual std::optional<Type *> getPrivatizableType() const = 0;
4486 
4487   /// Create an abstract attribute view for the position \p IRP.
4488   static AAPrivatizablePtr &createForPosition(const IRPosition &IRP,
4489                                               Attributor &A);
4490 
4491   /// See AbstractAttribute::getName()
4492   const std::string getName() const override { return "AAPrivatizablePtr"; }
4493 
4494   /// See AbstractAttribute::getIdAddr()
4495   const char *getIdAddr() const override { return &ID; }
4496 
4497   /// This function should return true if the type of the \p AA is
4498   /// AAPricatizablePtr
4499   static bool classof(const AbstractAttribute *AA) {
4500     return (AA->getIdAddr() == &ID);
4501   }
4502 
4503   /// Unique ID (due to the unique address)
4504   static const char ID;
4505 };
4506 
4507 /// An abstract interface for memory access kind related attributes
4508 /// (readnone/readonly/writeonly).
4509 struct AAMemoryBehavior
4510     : public IRAttribute<
4511           Attribute::ReadNone,
4512           StateWrapper<BitIntegerState<uint8_t, 3>, AbstractAttribute>,
4513           AAMemoryBehavior> {
4514   AAMemoryBehavior(const IRPosition &IRP, Attributor &A) : IRAttribute(IRP) {}
4515 
4516   /// See AbstractAttribute::hasTrivialInitializer.
4517   static bool hasTrivialInitializer() { return false; }
4518 
4519   /// See AbstractAttribute::isValidIRPositionForInit
4520   static bool isValidIRPositionForInit(Attributor &A, const IRPosition &IRP) {
4521     if (!IRP.isFunctionScope() &&
4522         !IRP.getAssociatedType()->isPtrOrPtrVectorTy())
4523       return false;
4524     return IRAttribute::isValidIRPositionForInit(A, IRP);
4525   }
4526 
4527   /// State encoding bits. A set bit in the state means the property holds.
4528   /// BEST_STATE is the best possible state, 0 the worst possible state.
4529   enum {
4530     NO_READS = 1 << 0,
4531     NO_WRITES = 1 << 1,
4532     NO_ACCESSES = NO_READS | NO_WRITES,
4533 
4534     BEST_STATE = NO_ACCESSES,
4535   };
4536   static_assert(BEST_STATE == getBestState(), "Unexpected BEST_STATE value");
4537 
4538   /// Return true if we know that the underlying value is not read or accessed
4539   /// in its respective scope.
4540   bool isKnownReadNone() const { return isKnown(NO_ACCESSES); }
4541 
4542   /// Return true if we assume that the underlying value is not read or accessed
4543   /// in its respective scope.
4544   bool isAssumedReadNone() const { return isAssumed(NO_ACCESSES); }
4545 
4546   /// Return true if we know that the underlying value is not accessed
4547   /// (=written) in its respective scope.
4548   bool isKnownReadOnly() const { return isKnown(NO_WRITES); }
4549 
4550   /// Return true if we assume that the underlying value is not accessed
4551   /// (=written) in its respective scope.
4552   bool isAssumedReadOnly() const { return isAssumed(NO_WRITES); }
4553 
4554   /// Return true if we know that the underlying value is not read in its
4555   /// respective scope.
4556   bool isKnownWriteOnly() const { return isKnown(NO_READS); }
4557 
4558   /// Return true if we assume that the underlying value is not read in its
4559   /// respective scope.
4560   bool isAssumedWriteOnly() const { return isAssumed(NO_READS); }
4561 
4562   /// Create an abstract attribute view for the position \p IRP.
4563   static AAMemoryBehavior &createForPosition(const IRPosition &IRP,
4564                                              Attributor &A);
4565 
4566   /// See AbstractAttribute::getName()
4567   const std::string getName() const override { return "AAMemoryBehavior"; }
4568 
4569   /// See AbstractAttribute::getIdAddr()
4570   const char *getIdAddr() const override { return &ID; }
4571 
4572   /// This function should return true if the type of the \p AA is
4573   /// AAMemoryBehavior
4574   static bool classof(const AbstractAttribute *AA) {
4575     return (AA->getIdAddr() == &ID);
4576   }
4577 
4578   /// Unique ID (due to the unique address)
4579   static const char ID;
4580 };
4581 
4582 /// An abstract interface for all memory location attributes
4583 /// (readnone/argmemonly/inaccessiblememonly/inaccessibleorargmemonly).
4584 struct AAMemoryLocation
4585     : public IRAttribute<
4586           Attribute::ReadNone,
4587           StateWrapper<BitIntegerState<uint32_t, 511>, AbstractAttribute>,
4588           AAMemoryLocation> {
4589   using MemoryLocationsKind = StateType::base_t;
4590 
4591   AAMemoryLocation(const IRPosition &IRP, Attributor &A) : IRAttribute(IRP) {}
4592 
4593   /// See AbstractAttribute::hasTrivialInitializer.
4594   static bool hasTrivialInitializer() { return false; }
4595 
4596   /// See AbstractAttribute::isValidIRPositionForInit
4597   static bool isValidIRPositionForInit(Attributor &A, const IRPosition &IRP) {
4598     if (!IRP.isFunctionScope() &&
4599         !IRP.getAssociatedType()->isPtrOrPtrVectorTy())
4600       return false;
4601     return IRAttribute::isValidIRPositionForInit(A, IRP);
4602   }
4603 
4604   /// Encoding of different locations that could be accessed by a memory
4605   /// access.
4606   enum {
4607     ALL_LOCATIONS = 0,
4608     NO_LOCAL_MEM = 1 << 0,
4609     NO_CONST_MEM = 1 << 1,
4610     NO_GLOBAL_INTERNAL_MEM = 1 << 2,
4611     NO_GLOBAL_EXTERNAL_MEM = 1 << 3,
4612     NO_GLOBAL_MEM = NO_GLOBAL_INTERNAL_MEM | NO_GLOBAL_EXTERNAL_MEM,
4613     NO_ARGUMENT_MEM = 1 << 4,
4614     NO_INACCESSIBLE_MEM = 1 << 5,
4615     NO_MALLOCED_MEM = 1 << 6,
4616     NO_UNKOWN_MEM = 1 << 7,
4617     NO_LOCATIONS = NO_LOCAL_MEM | NO_CONST_MEM | NO_GLOBAL_INTERNAL_MEM |
4618                    NO_GLOBAL_EXTERNAL_MEM | NO_ARGUMENT_MEM |
4619                    NO_INACCESSIBLE_MEM | NO_MALLOCED_MEM | NO_UNKOWN_MEM,
4620 
4621     // Helper bit to track if we gave up or not.
4622     VALID_STATE = NO_LOCATIONS + 1,
4623 
4624     BEST_STATE = NO_LOCATIONS | VALID_STATE,
4625   };
4626   static_assert(BEST_STATE == getBestState(), "Unexpected BEST_STATE value");
4627 
4628   /// Return true if we know that the associated functions has no observable
4629   /// accesses.
4630   bool isKnownReadNone() const { return isKnown(NO_LOCATIONS); }
4631 
4632   /// Return true if we assume that the associated functions has no observable
4633   /// accesses.
4634   bool isAssumedReadNone() const {
4635     return isAssumed(NO_LOCATIONS) || isAssumedStackOnly();
4636   }
4637 
4638   /// Return true if we know that the associated functions has at most
4639   /// local/stack accesses.
4640   bool isKnowStackOnly() const {
4641     return isKnown(inverseLocation(NO_LOCAL_MEM, true, true));
4642   }
4643 
4644   /// Return true if we assume that the associated functions has at most
4645   /// local/stack accesses.
4646   bool isAssumedStackOnly() const {
4647     return isAssumed(inverseLocation(NO_LOCAL_MEM, true, true));
4648   }
4649 
4650   /// Return true if we know that the underlying value will only access
4651   /// inaccesible memory only (see Attribute::InaccessibleMemOnly).
4652   bool isKnownInaccessibleMemOnly() const {
4653     return isKnown(inverseLocation(NO_INACCESSIBLE_MEM, true, true));
4654   }
4655 
4656   /// Return true if we assume that the underlying value will only access
4657   /// inaccesible memory only (see Attribute::InaccessibleMemOnly).
4658   bool isAssumedInaccessibleMemOnly() const {
4659     return isAssumed(inverseLocation(NO_INACCESSIBLE_MEM, true, true));
4660   }
4661 
4662   /// Return true if we know that the underlying value will only access
4663   /// argument pointees (see Attribute::ArgMemOnly).
4664   bool isKnownArgMemOnly() const {
4665     return isKnown(inverseLocation(NO_ARGUMENT_MEM, true, true));
4666   }
4667 
4668   /// Return true if we assume that the underlying value will only access
4669   /// argument pointees (see Attribute::ArgMemOnly).
4670   bool isAssumedArgMemOnly() const {
4671     return isAssumed(inverseLocation(NO_ARGUMENT_MEM, true, true));
4672   }
4673 
4674   /// Return true if we know that the underlying value will only access
4675   /// inaccesible memory or argument pointees (see
4676   /// Attribute::InaccessibleOrArgMemOnly).
4677   bool isKnownInaccessibleOrArgMemOnly() const {
4678     return isKnown(
4679         inverseLocation(NO_INACCESSIBLE_MEM | NO_ARGUMENT_MEM, true, true));
4680   }
4681 
4682   /// Return true if we assume that the underlying value will only access
4683   /// inaccesible memory or argument pointees (see
4684   /// Attribute::InaccessibleOrArgMemOnly).
4685   bool isAssumedInaccessibleOrArgMemOnly() const {
4686     return isAssumed(
4687         inverseLocation(NO_INACCESSIBLE_MEM | NO_ARGUMENT_MEM, true, true));
4688   }
4689 
4690   /// Return true if the underlying value may access memory through arguement
4691   /// pointers of the associated function, if any.
4692   bool mayAccessArgMem() const { return !isAssumed(NO_ARGUMENT_MEM); }
4693 
4694   /// Return true if only the memory locations specififed by \p MLK are assumed
4695   /// to be accessed by the associated function.
4696   bool isAssumedSpecifiedMemOnly(MemoryLocationsKind MLK) const {
4697     return isAssumed(MLK);
4698   }
4699 
4700   /// Return the locations that are assumed to be not accessed by the associated
4701   /// function, if any.
4702   MemoryLocationsKind getAssumedNotAccessedLocation() const {
4703     return getAssumed();
4704   }
4705 
4706   /// Return the inverse of location \p Loc, thus for NO_XXX the return
4707   /// describes ONLY_XXX. The flags \p AndLocalMem and \p AndConstMem determine
4708   /// if local (=stack) and constant memory are allowed as well. Most of the
4709   /// time we do want them to be included, e.g., argmemonly allows accesses via
4710   /// argument pointers or local or constant memory accesses.
4711   static MemoryLocationsKind
4712   inverseLocation(MemoryLocationsKind Loc, bool AndLocalMem, bool AndConstMem) {
4713     return NO_LOCATIONS & ~(Loc | (AndLocalMem ? NO_LOCAL_MEM : 0) |
4714                             (AndConstMem ? NO_CONST_MEM : 0));
4715   };
4716 
4717   /// Return the locations encoded by \p MLK as a readable string.
4718   static std::string getMemoryLocationsAsStr(MemoryLocationsKind MLK);
4719 
4720   /// Simple enum to distinguish read/write/read-write accesses.
4721   enum AccessKind {
4722     NONE = 0,
4723     READ = 1 << 0,
4724     WRITE = 1 << 1,
4725     READ_WRITE = READ | WRITE,
4726   };
4727 
4728   /// Check \p Pred on all accesses to the memory kinds specified by \p MLK.
4729   ///
4730   /// This method will evaluate \p Pred on all accesses (access instruction +
4731   /// underlying accessed memory pointer) and it will return true if \p Pred
4732   /// holds every time.
4733   virtual bool checkForAllAccessesToMemoryKind(
4734       function_ref<bool(const Instruction *, const Value *, AccessKind,
4735                         MemoryLocationsKind)>
4736           Pred,
4737       MemoryLocationsKind MLK) const = 0;
4738 
4739   /// Create an abstract attribute view for the position \p IRP.
4740   static AAMemoryLocation &createForPosition(const IRPosition &IRP,
4741                                              Attributor &A);
4742 
4743   /// See AbstractState::getAsStr(Attributor).
4744   const std::string getAsStr(Attributor *A) const override {
4745     return getMemoryLocationsAsStr(getAssumedNotAccessedLocation());
4746   }
4747 
4748   /// See AbstractAttribute::getName()
4749   const std::string getName() const override { return "AAMemoryLocation"; }
4750 
4751   /// See AbstractAttribute::getIdAddr()
4752   const char *getIdAddr() const override { return &ID; }
4753 
4754   /// This function should return true if the type of the \p AA is
4755   /// AAMemoryLocation
4756   static bool classof(const AbstractAttribute *AA) {
4757     return (AA->getIdAddr() == &ID);
4758   }
4759 
4760   /// Unique ID (due to the unique address)
4761   static const char ID;
4762 };
4763 
4764 /// An abstract interface for range value analysis.
4765 struct AAValueConstantRange
4766     : public StateWrapper<IntegerRangeState, AbstractAttribute, uint32_t> {
4767   using Base = StateWrapper<IntegerRangeState, AbstractAttribute, uint32_t>;
4768   AAValueConstantRange(const IRPosition &IRP, Attributor &A)
4769       : Base(IRP, IRP.getAssociatedType()->getIntegerBitWidth()) {}
4770 
4771   /// See AbstractAttribute::isValidIRPositionForInit
4772   static bool isValidIRPositionForInit(Attributor &A, const IRPosition &IRP) {
4773     if (!IRP.getAssociatedType()->isIntegerTy())
4774       return false;
4775     return AbstractAttribute::isValidIRPositionForInit(A, IRP);
4776   }
4777 
4778   /// See AbstractAttribute::requiresCallersForArgOrFunction
4779   static bool requiresCallersForArgOrFunction() { return true; }
4780 
4781   /// See AbstractAttribute::getState(...).
4782   IntegerRangeState &getState() override { return *this; }
4783   const IntegerRangeState &getState() const override { return *this; }
4784 
4785   /// Create an abstract attribute view for the position \p IRP.
4786   static AAValueConstantRange &createForPosition(const IRPosition &IRP,
4787                                                  Attributor &A);
4788 
4789   /// Return an assumed range for the associated value a program point \p CtxI.
4790   /// If \p I is nullptr, simply return an assumed range.
4791   virtual ConstantRange
4792   getAssumedConstantRange(Attributor &A,
4793                           const Instruction *CtxI = nullptr) const = 0;
4794 
4795   /// Return a known range for the associated value at a program point \p CtxI.
4796   /// If \p I is nullptr, simply return a known range.
4797   virtual ConstantRange
4798   getKnownConstantRange(Attributor &A,
4799                         const Instruction *CtxI = nullptr) const = 0;
4800 
4801   /// Return an assumed constant for the associated value a program point \p
4802   /// CtxI.
4803   std::optional<Constant *>
4804   getAssumedConstant(Attributor &A, const Instruction *CtxI = nullptr) const {
4805     ConstantRange RangeV = getAssumedConstantRange(A, CtxI);
4806     if (auto *C = RangeV.getSingleElement()) {
4807       Type *Ty = getAssociatedValue().getType();
4808       return cast_or_null<Constant>(
4809           AA::getWithType(*ConstantInt::get(Ty->getContext(), *C), *Ty));
4810     }
4811     if (RangeV.isEmptySet())
4812       return std::nullopt;
4813     return nullptr;
4814   }
4815 
4816   /// See AbstractAttribute::getName()
4817   const std::string getName() const override { return "AAValueConstantRange"; }
4818 
4819   /// See AbstractAttribute::getIdAddr()
4820   const char *getIdAddr() const override { return &ID; }
4821 
4822   /// This function should return true if the type of the \p AA is
4823   /// AAValueConstantRange
4824   static bool classof(const AbstractAttribute *AA) {
4825     return (AA->getIdAddr() == &ID);
4826   }
4827 
4828   /// Unique ID (due to the unique address)
4829   static const char ID;
4830 };
4831 
4832 /// A class for a set state.
4833 /// The assumed boolean state indicates whether the corresponding set is full
4834 /// set or not. If the assumed state is false, this is the worst state. The
4835 /// worst state (invalid state) of set of potential values is when the set
4836 /// contains every possible value (i.e. we cannot in any way limit the value
4837 /// that the target position can take). That never happens naturally, we only
4838 /// force it. As for the conditions under which we force it, see
4839 /// AAPotentialConstantValues.
4840 template <typename MemberTy> struct PotentialValuesState : AbstractState {
4841   using SetTy = SmallSetVector<MemberTy, 8>;
4842 
4843   PotentialValuesState() : IsValidState(true), UndefIsContained(false) {}
4844 
4845   PotentialValuesState(bool IsValid)
4846       : IsValidState(IsValid), UndefIsContained(false) {}
4847 
4848   /// See AbstractState::isValidState(...)
4849   bool isValidState() const override { return IsValidState.isValidState(); }
4850 
4851   /// See AbstractState::isAtFixpoint(...)
4852   bool isAtFixpoint() const override { return IsValidState.isAtFixpoint(); }
4853 
4854   /// See AbstractState::indicatePessimisticFixpoint(...)
4855   ChangeStatus indicatePessimisticFixpoint() override {
4856     return IsValidState.indicatePessimisticFixpoint();
4857   }
4858 
4859   /// See AbstractState::indicateOptimisticFixpoint(...)
4860   ChangeStatus indicateOptimisticFixpoint() override {
4861     return IsValidState.indicateOptimisticFixpoint();
4862   }
4863 
4864   /// Return the assumed state
4865   PotentialValuesState &getAssumed() { return *this; }
4866   const PotentialValuesState &getAssumed() const { return *this; }
4867 
4868   /// Return this set. We should check whether this set is valid or not by
4869   /// isValidState() before calling this function.
4870   const SetTy &getAssumedSet() const {
4871     assert(isValidState() && "This set shoud not be used when it is invalid!");
4872     return Set;
4873   }
4874 
4875   /// Returns whether this state contains an undef value or not.
4876   bool undefIsContained() const {
4877     assert(isValidState() && "This flag shoud not be used when it is invalid!");
4878     return UndefIsContained;
4879   }
4880 
4881   bool operator==(const PotentialValuesState &RHS) const {
4882     if (isValidState() != RHS.isValidState())
4883       return false;
4884     if (!isValidState() && !RHS.isValidState())
4885       return true;
4886     if (undefIsContained() != RHS.undefIsContained())
4887       return false;
4888     return Set == RHS.getAssumedSet();
4889   }
4890 
4891   /// Maximum number of potential values to be tracked.
4892   /// This is set by -attributor-max-potential-values command line option
4893   static unsigned MaxPotentialValues;
4894 
4895   /// Return empty set as the best state of potential values.
4896   static PotentialValuesState getBestState() {
4897     return PotentialValuesState(true);
4898   }
4899 
4900   static PotentialValuesState getBestState(const PotentialValuesState &PVS) {
4901     return getBestState();
4902   }
4903 
4904   /// Return full set as the worst state of potential values.
4905   static PotentialValuesState getWorstState() {
4906     return PotentialValuesState(false);
4907   }
4908 
4909   /// Union assumed set with the passed value.
4910   void unionAssumed(const MemberTy &C) { insert(C); }
4911 
4912   /// Union assumed set with assumed set of the passed state \p PVS.
4913   void unionAssumed(const PotentialValuesState &PVS) { unionWith(PVS); }
4914 
4915   /// Union assumed set with an undef value.
4916   void unionAssumedWithUndef() { unionWithUndef(); }
4917 
4918   /// "Clamp" this state with \p PVS.
4919   PotentialValuesState operator^=(const PotentialValuesState &PVS) {
4920     IsValidState ^= PVS.IsValidState;
4921     unionAssumed(PVS);
4922     return *this;
4923   }
4924 
4925   PotentialValuesState operator&=(const PotentialValuesState &PVS) {
4926     IsValidState &= PVS.IsValidState;
4927     unionAssumed(PVS);
4928     return *this;
4929   }
4930 
4931   bool contains(const MemberTy &V) const {
4932     return !isValidState() ? true : Set.contains(V);
4933   }
4934 
4935 protected:
4936   SetTy &getAssumedSet() {
4937     assert(isValidState() && "This set shoud not be used when it is invalid!");
4938     return Set;
4939   }
4940 
4941 private:
4942   /// Check the size of this set, and invalidate when the size is no
4943   /// less than \p MaxPotentialValues threshold.
4944   void checkAndInvalidate() {
4945     if (Set.size() >= MaxPotentialValues)
4946       indicatePessimisticFixpoint();
4947     else
4948       reduceUndefValue();
4949   }
4950 
4951   /// If this state contains both undef and not undef, we can reduce
4952   /// undef to the not undef value.
4953   void reduceUndefValue() { UndefIsContained = UndefIsContained & Set.empty(); }
4954 
4955   /// Insert an element into this set.
4956   void insert(const MemberTy &C) {
4957     if (!isValidState())
4958       return;
4959     Set.insert(C);
4960     checkAndInvalidate();
4961   }
4962 
4963   /// Take union with R.
4964   void unionWith(const PotentialValuesState &R) {
4965     /// If this is a full set, do nothing.
4966     if (!isValidState())
4967       return;
4968     /// If R is full set, change L to a full set.
4969     if (!R.isValidState()) {
4970       indicatePessimisticFixpoint();
4971       return;
4972     }
4973     for (const MemberTy &C : R.Set)
4974       Set.insert(C);
4975     UndefIsContained |= R.undefIsContained();
4976     checkAndInvalidate();
4977   }
4978 
4979   /// Take union with an undef value.
4980   void unionWithUndef() {
4981     UndefIsContained = true;
4982     reduceUndefValue();
4983   }
4984 
4985   /// Take intersection with R.
4986   void intersectWith(const PotentialValuesState &R) {
4987     /// If R is a full set, do nothing.
4988     if (!R.isValidState())
4989       return;
4990     /// If this is a full set, change this to R.
4991     if (!isValidState()) {
4992       *this = R;
4993       return;
4994     }
4995     SetTy IntersectSet;
4996     for (const MemberTy &C : Set) {
4997       if (R.Set.count(C))
4998         IntersectSet.insert(C);
4999     }
5000     Set = IntersectSet;
5001     UndefIsContained &= R.undefIsContained();
5002     reduceUndefValue();
5003   }
5004 
5005   /// A helper state which indicate whether this state is valid or not.
5006   BooleanState IsValidState;
5007 
5008   /// Container for potential values
5009   SetTy Set;
5010 
5011   /// Flag for undef value
5012   bool UndefIsContained;
5013 };
5014 
5015 using PotentialConstantIntValuesState = PotentialValuesState<APInt>;
5016 using PotentialLLVMValuesState =
5017     PotentialValuesState<std::pair<AA::ValueAndContext, AA::ValueScope>>;
5018 
5019 raw_ostream &operator<<(raw_ostream &OS,
5020                         const PotentialConstantIntValuesState &R);
5021 raw_ostream &operator<<(raw_ostream &OS, const PotentialLLVMValuesState &R);
5022 
5023 /// An abstract interface for potential values analysis.
5024 ///
5025 /// This AA collects potential values for each IR position.
5026 /// An assumed set of potential values is initialized with the empty set (the
5027 /// best state) and it will grow monotonically as we find more potential values
5028 /// for this position.
5029 /// The set might be forced to the worst state, that is, to contain every
5030 /// possible value for this position in 2 cases.
5031 ///   1. We surpassed the \p MaxPotentialValues threshold. This includes the
5032 ///      case that this position is affected (e.g. because of an operation) by a
5033 ///      Value that is in the worst state.
5034 ///   2. We tried to initialize on a Value that we cannot handle (e.g. an
5035 ///      operator we do not currently handle).
5036 ///
5037 /// For non constant integers see AAPotentialValues.
5038 struct AAPotentialConstantValues
5039     : public StateWrapper<PotentialConstantIntValuesState, AbstractAttribute> {
5040   using Base = StateWrapper<PotentialConstantIntValuesState, AbstractAttribute>;
5041   AAPotentialConstantValues(const IRPosition &IRP, Attributor &A) : Base(IRP) {}
5042 
5043   /// See AbstractAttribute::isValidIRPositionForInit
5044   static bool isValidIRPositionForInit(Attributor &A, const IRPosition &IRP) {
5045     if (!IRP.getAssociatedType()->isIntegerTy())
5046       return false;
5047     return AbstractAttribute::isValidIRPositionForInit(A, IRP);
5048   }
5049 
5050   /// See AbstractAttribute::requiresCallersForArgOrFunction
5051   static bool requiresCallersForArgOrFunction() { return true; }
5052 
5053   /// See AbstractAttribute::getState(...).
5054   PotentialConstantIntValuesState &getState() override { return *this; }
5055   const PotentialConstantIntValuesState &getState() const override {
5056     return *this;
5057   }
5058 
5059   /// Create an abstract attribute view for the position \p IRP.
5060   static AAPotentialConstantValues &createForPosition(const IRPosition &IRP,
5061                                                       Attributor &A);
5062 
5063   /// Return assumed constant for the associated value
5064   std::optional<Constant *>
5065   getAssumedConstant(Attributor &A, const Instruction *CtxI = nullptr) const {
5066     if (!isValidState())
5067       return nullptr;
5068     if (getAssumedSet().size() == 1) {
5069       Type *Ty = getAssociatedValue().getType();
5070       return cast_or_null<Constant>(AA::getWithType(
5071           *ConstantInt::get(Ty->getContext(), *(getAssumedSet().begin())),
5072           *Ty));
5073     }
5074     if (getAssumedSet().size() == 0) {
5075       if (undefIsContained())
5076         return UndefValue::get(getAssociatedValue().getType());
5077       return std::nullopt;
5078     }
5079 
5080     return nullptr;
5081   }
5082 
5083   /// See AbstractAttribute::getName()
5084   const std::string getName() const override {
5085     return "AAPotentialConstantValues";
5086   }
5087 
5088   /// See AbstractAttribute::getIdAddr()
5089   const char *getIdAddr() const override { return &ID; }
5090 
5091   /// This function should return true if the type of the \p AA is
5092   /// AAPotentialConstantValues
5093   static bool classof(const AbstractAttribute *AA) {
5094     return (AA->getIdAddr() == &ID);
5095   }
5096 
5097   /// Unique ID (due to the unique address)
5098   static const char ID;
5099 };
5100 
5101 struct AAPotentialValues
5102     : public StateWrapper<PotentialLLVMValuesState, AbstractAttribute> {
5103   using Base = StateWrapper<PotentialLLVMValuesState, AbstractAttribute>;
5104   AAPotentialValues(const IRPosition &IRP, Attributor &A) : Base(IRP) {}
5105 
5106   /// See AbstractAttribute::requiresCallersForArgOrFunction
5107   static bool requiresCallersForArgOrFunction() { return true; }
5108 
5109   /// See AbstractAttribute::getState(...).
5110   PotentialLLVMValuesState &getState() override { return *this; }
5111   const PotentialLLVMValuesState &getState() const override { return *this; }
5112 
5113   /// Create an abstract attribute view for the position \p IRP.
5114   static AAPotentialValues &createForPosition(const IRPosition &IRP,
5115                                               Attributor &A);
5116 
5117   /// Extract the single value in \p Values if any.
5118   static Value *getSingleValue(Attributor &A, const AbstractAttribute &AA,
5119                                const IRPosition &IRP,
5120                                SmallVectorImpl<AA::ValueAndContext> &Values);
5121 
5122   /// See AbstractAttribute::getName()
5123   const std::string getName() const override { return "AAPotentialValues"; }
5124 
5125   /// See AbstractAttribute::getIdAddr()
5126   const char *getIdAddr() const override { return &ID; }
5127 
5128   /// This function should return true if the type of the \p AA is
5129   /// AAPotentialValues
5130   static bool classof(const AbstractAttribute *AA) {
5131     return (AA->getIdAddr() == &ID);
5132   }
5133 
5134   /// Unique ID (due to the unique address)
5135   static const char ID;
5136 
5137 private:
5138   virtual bool getAssumedSimplifiedValues(
5139       Attributor &A, SmallVectorImpl<AA::ValueAndContext> &Values,
5140       AA::ValueScope, bool RecurseForSelectAndPHI = false) const = 0;
5141 
5142   friend struct Attributor;
5143 };
5144 
5145 /// An abstract interface for all noundef attributes.
5146 struct AANoUndef
5147     : public IRAttribute<Attribute::NoUndef,
5148                          StateWrapper<BooleanState, AbstractAttribute>,
5149                          AANoUndef> {
5150   AANoUndef(const IRPosition &IRP, Attributor &A) : IRAttribute(IRP) {}
5151 
5152   /// See IRAttribute::isImpliedByUndef
5153   static bool isImpliedByUndef() { return false; }
5154 
5155   /// See IRAttribute::isImpliedByPoison
5156   static bool isImpliedByPoison() { return false; }
5157 
5158   /// See IRAttribute::isImpliedByIR
5159   static bool isImpliedByIR(Attributor &A, const IRPosition &IRP,
5160                             Attribute::AttrKind ImpliedAttributeKind,
5161                             bool IgnoreSubsumingPositions = false);
5162 
5163   /// Return true if we assume that the underlying value is noundef.
5164   bool isAssumedNoUndef() const { return getAssumed(); }
5165 
5166   /// Return true if we know that underlying value is noundef.
5167   bool isKnownNoUndef() const { return getKnown(); }
5168 
5169   /// Create an abstract attribute view for the position \p IRP.
5170   static AANoUndef &createForPosition(const IRPosition &IRP, Attributor &A);
5171 
5172   /// See AbstractAttribute::getName()
5173   const std::string getName() const override { return "AANoUndef"; }
5174 
5175   /// See AbstractAttribute::getIdAddr()
5176   const char *getIdAddr() const override { return &ID; }
5177 
5178   /// This function should return true if the type of the \p AA is AANoUndef
5179   static bool classof(const AbstractAttribute *AA) {
5180     return (AA->getIdAddr() == &ID);
5181   }
5182 
5183   /// Unique ID (due to the unique address)
5184   static const char ID;
5185 };
5186 
5187 struct AANoFPClass
5188     : public IRAttribute<
5189           Attribute::NoFPClass,
5190           StateWrapper<BitIntegerState<uint32_t, fcAllFlags, fcNone>,
5191                        AbstractAttribute>,
5192           AANoFPClass> {
5193   using Base = StateWrapper<BitIntegerState<uint32_t, fcAllFlags, fcNone>,
5194                             AbstractAttribute>;
5195 
5196   AANoFPClass(const IRPosition &IRP, Attributor &A) : IRAttribute(IRP) {}
5197 
5198   /// See AbstractAttribute::isValidIRPositionForInit
5199   static bool isValidIRPositionForInit(Attributor &A, const IRPosition &IRP) {
5200     Type *Ty = IRP.getAssociatedType();
5201     do {
5202       if (Ty->isFPOrFPVectorTy())
5203         return IRAttribute::isValidIRPositionForInit(A, IRP);
5204       if (!Ty->isArrayTy())
5205         break;
5206       Ty = Ty->getArrayElementType();
5207     } while (true);
5208     return false;
5209   }
5210 
5211   /// Return true if we assume that the underlying value is nofpclass.
5212   FPClassTest getAssumedNoFPClass() const {
5213     return static_cast<FPClassTest>(getAssumed());
5214   }
5215 
5216   /// Create an abstract attribute view for the position \p IRP.
5217   static AANoFPClass &createForPosition(const IRPosition &IRP, Attributor &A);
5218 
5219   /// See AbstractAttribute::getName()
5220   const std::string getName() const override { return "AANoFPClass"; }
5221 
5222   /// See AbstractAttribute::getIdAddr()
5223   const char *getIdAddr() const override { return &ID; }
5224 
5225   /// This function should return true if the type of the \p AA is AANoFPClass
5226   static bool classof(const AbstractAttribute *AA) {
5227     return (AA->getIdAddr() == &ID);
5228   }
5229 
5230   /// Unique ID (due to the unique address)
5231   static const char ID;
5232 };
5233 
5234 struct AACallGraphNode;
5235 struct AACallEdges;
5236 
5237 /// An Iterator for call edges, creates AACallEdges attributes in a lazy way.
5238 /// This iterator becomes invalid if the underlying edge list changes.
5239 /// So This shouldn't outlive a iteration of Attributor.
5240 class AACallEdgeIterator
5241     : public iterator_adaptor_base<AACallEdgeIterator,
5242                                    SetVector<Function *>::iterator> {
5243   AACallEdgeIterator(Attributor &A, SetVector<Function *>::iterator Begin)
5244       : iterator_adaptor_base(Begin), A(A) {}
5245 
5246 public:
5247   AACallGraphNode *operator*() const;
5248 
5249 private:
5250   Attributor &A;
5251   friend AACallEdges;
5252   friend AttributorCallGraph;
5253 };
5254 
5255 struct AACallGraphNode {
5256   AACallGraphNode(Attributor &A) : A(A) {}
5257   virtual ~AACallGraphNode() = default;
5258 
5259   virtual AACallEdgeIterator optimisticEdgesBegin() const = 0;
5260   virtual AACallEdgeIterator optimisticEdgesEnd() const = 0;
5261 
5262   /// Iterator range for exploring the call graph.
5263   iterator_range<AACallEdgeIterator> optimisticEdgesRange() const {
5264     return iterator_range<AACallEdgeIterator>(optimisticEdgesBegin(),
5265                                               optimisticEdgesEnd());
5266   }
5267 
5268 protected:
5269   /// Reference to Attributor needed for GraphTraits implementation.
5270   Attributor &A;
5271 };
5272 
5273 /// An abstract state for querying live call edges.
5274 /// This interface uses the Attributor's optimistic liveness
5275 /// information to compute the edges that are alive.
5276 struct AACallEdges : public StateWrapper<BooleanState, AbstractAttribute>,
5277                      AACallGraphNode {
5278   using Base = StateWrapper<BooleanState, AbstractAttribute>;
5279 
5280   AACallEdges(const IRPosition &IRP, Attributor &A)
5281       : Base(IRP), AACallGraphNode(A) {}
5282 
5283   /// The callee value is tracked beyond a simple stripPointerCasts, so we allow
5284   /// unknown callees.
5285   static bool requiresCalleeForCallBase() { return false; }
5286 
5287   /// Get the optimistic edges.
5288   virtual const SetVector<Function *> &getOptimisticEdges() const = 0;
5289 
5290   /// Is there any call with a unknown callee.
5291   virtual bool hasUnknownCallee() const = 0;
5292 
5293   /// Is there any call with a unknown callee, excluding any inline asm.
5294   virtual bool hasNonAsmUnknownCallee() const = 0;
5295 
5296   /// Iterator for exploring the call graph.
5297   AACallEdgeIterator optimisticEdgesBegin() const override {
5298     return AACallEdgeIterator(A, getOptimisticEdges().begin());
5299   }
5300 
5301   /// Iterator for exploring the call graph.
5302   AACallEdgeIterator optimisticEdgesEnd() const override {
5303     return AACallEdgeIterator(A, getOptimisticEdges().end());
5304   }
5305 
5306   /// Create an abstract attribute view for the position \p IRP.
5307   static AACallEdges &createForPosition(const IRPosition &IRP, Attributor &A);
5308 
5309   /// See AbstractAttribute::getName()
5310   const std::string getName() const override { return "AACallEdges"; }
5311 
5312   /// See AbstractAttribute::getIdAddr()
5313   const char *getIdAddr() const override { return &ID; }
5314 
5315   /// This function should return true if the type of the \p AA is AACallEdges.
5316   static bool classof(const AbstractAttribute *AA) {
5317     return (AA->getIdAddr() == &ID);
5318   }
5319 
5320   /// Unique ID (due to the unique address)
5321   static const char ID;
5322 };
5323 
5324 // Synthetic root node for the Attributor's internal call graph.
5325 struct AttributorCallGraph : public AACallGraphNode {
5326   AttributorCallGraph(Attributor &A) : AACallGraphNode(A) {}
5327   virtual ~AttributorCallGraph() = default;
5328 
5329   AACallEdgeIterator optimisticEdgesBegin() const override {
5330     return AACallEdgeIterator(A, A.Functions.begin());
5331   }
5332 
5333   AACallEdgeIterator optimisticEdgesEnd() const override {
5334     return AACallEdgeIterator(A, A.Functions.end());
5335   }
5336 
5337   /// Force populate the entire call graph.
5338   void populateAll() const {
5339     for (const AACallGraphNode *AA : optimisticEdgesRange()) {
5340       // Nothing else to do here.
5341       (void)AA;
5342     }
5343   }
5344 
5345   void print();
5346 };
5347 
5348 template <> struct GraphTraits<AACallGraphNode *> {
5349   using NodeRef = AACallGraphNode *;
5350   using ChildIteratorType = AACallEdgeIterator;
5351 
5352   static AACallEdgeIterator child_begin(AACallGraphNode *Node) {
5353     return Node->optimisticEdgesBegin();
5354   }
5355 
5356   static AACallEdgeIterator child_end(AACallGraphNode *Node) {
5357     return Node->optimisticEdgesEnd();
5358   }
5359 };
5360 
5361 template <>
5362 struct GraphTraits<AttributorCallGraph *>
5363     : public GraphTraits<AACallGraphNode *> {
5364   using nodes_iterator = AACallEdgeIterator;
5365 
5366   static AACallGraphNode *getEntryNode(AttributorCallGraph *G) {
5367     return static_cast<AACallGraphNode *>(G);
5368   }
5369 
5370   static AACallEdgeIterator nodes_begin(const AttributorCallGraph *G) {
5371     return G->optimisticEdgesBegin();
5372   }
5373 
5374   static AACallEdgeIterator nodes_end(const AttributorCallGraph *G) {
5375     return G->optimisticEdgesEnd();
5376   }
5377 };
5378 
5379 template <>
5380 struct DOTGraphTraits<AttributorCallGraph *> : public DefaultDOTGraphTraits {
5381   DOTGraphTraits(bool Simple = false) : DefaultDOTGraphTraits(Simple) {}
5382 
5383   std::string getNodeLabel(const AACallGraphNode *Node,
5384                            const AttributorCallGraph *Graph) {
5385     const AACallEdges *AACE = static_cast<const AACallEdges *>(Node);
5386     return AACE->getAssociatedFunction()->getName().str();
5387   }
5388 
5389   static bool isNodeHidden(const AACallGraphNode *Node,
5390                            const AttributorCallGraph *Graph) {
5391     // Hide the synth root.
5392     return static_cast<const AACallGraphNode *>(Graph) == Node;
5393   }
5394 };
5395 
5396 struct AAExecutionDomain
5397     : public StateWrapper<BooleanState, AbstractAttribute> {
5398   using Base = StateWrapper<BooleanState, AbstractAttribute>;
5399   AAExecutionDomain(const IRPosition &IRP, Attributor &A) : Base(IRP) {}
5400 
5401   /// Summary about the execution domain of a block or instruction.
5402   struct ExecutionDomainTy {
5403     using BarriersSetTy = SmallPtrSet<CallBase *, 2>;
5404     using AssumesSetTy = SmallPtrSet<AssumeInst *, 4>;
5405 
5406     void addAssumeInst(Attributor &A, AssumeInst &AI) {
5407       EncounteredAssumes.insert(&AI);
5408     }
5409 
5410     void addAlignedBarrier(Attributor &A, CallBase &CB) {
5411       AlignedBarriers.insert(&CB);
5412     }
5413 
5414     void clearAssumeInstAndAlignedBarriers() {
5415       EncounteredAssumes.clear();
5416       AlignedBarriers.clear();
5417     }
5418 
5419     bool IsExecutedByInitialThreadOnly = true;
5420     bool IsReachedFromAlignedBarrierOnly = true;
5421     bool IsReachingAlignedBarrierOnly = true;
5422     bool EncounteredNonLocalSideEffect = false;
5423     BarriersSetTy AlignedBarriers;
5424     AssumesSetTy EncounteredAssumes;
5425   };
5426 
5427   /// Create an abstract attribute view for the position \p IRP.
5428   static AAExecutionDomain &createForPosition(const IRPosition &IRP,
5429                                               Attributor &A);
5430 
5431   /// See AbstractAttribute::getName().
5432   const std::string getName() const override { return "AAExecutionDomain"; }
5433 
5434   /// See AbstractAttribute::getIdAddr().
5435   const char *getIdAddr() const override { return &ID; }
5436 
5437   /// Check if an instruction is executed only by the initial thread.
5438   bool isExecutedByInitialThreadOnly(const Instruction &I) const {
5439     return isExecutedByInitialThreadOnly(*I.getParent());
5440   }
5441 
5442   /// Check if a basic block is executed only by the initial thread.
5443   virtual bool isExecutedByInitialThreadOnly(const BasicBlock &) const = 0;
5444 
5445   /// Check if the instruction \p I is executed in an aligned region, that is,
5446   /// the synchronizing effects before and after \p I are both aligned barriers.
5447   /// This effectively means all threads execute \p I together.
5448   virtual bool isExecutedInAlignedRegion(Attributor &A,
5449                                          const Instruction &I) const = 0;
5450 
5451   virtual ExecutionDomainTy getExecutionDomain(const BasicBlock &) const = 0;
5452   /// Return the execution domain with which the call \p CB is entered and the
5453   /// one with which it is left.
5454   virtual std::pair<ExecutionDomainTy, ExecutionDomainTy>
5455   getExecutionDomain(const CallBase &CB) const = 0;
5456   virtual ExecutionDomainTy getFunctionExecutionDomain() const = 0;
5457 
5458   /// Helper function to determine if \p FI is a no-op given the information
5459   /// about its execution from \p ExecDomainAA.
5460   virtual bool isNoOpFence(const FenceInst &FI) const = 0;
5461 
5462   /// This function should return true if the type of the \p AA is
5463   /// AAExecutionDomain.
5464   static bool classof(const AbstractAttribute *AA) {
5465     return (AA->getIdAddr() == &ID);
5466   }
5467 
5468   /// Unique ID (due to the unique address)
5469   static const char ID;
5470 };
5471 
5472 /// An abstract Attribute for computing reachability between functions.
5473 struct AAInterFnReachability
5474     : public StateWrapper<BooleanState, AbstractAttribute> {
5475   using Base = StateWrapper<BooleanState, AbstractAttribute>;
5476 
5477   AAInterFnReachability(const IRPosition &IRP, Attributor &A) : Base(IRP) {}
5478 
5479   /// If the function represented by this possition can reach \p Fn.
5480   bool canReach(Attributor &A, const Function &Fn) const {
5481     Function *Scope = getAnchorScope();
5482     if (!Scope || Scope->isDeclaration())
5483       return true;
5484     return instructionCanReach(A, Scope->getEntryBlock().front(), Fn);
5485   }
5486 
5487   /// Can  \p Inst reach \p Fn.
5488   /// See also AA::isPotentiallyReachable.
5489   virtual bool instructionCanReach(
5490       Attributor &A, const Instruction &Inst, const Function &Fn,
5491       const AA::InstExclusionSetTy *ExclusionSet = nullptr,
5492       SmallPtrSet<const Function *, 16> *Visited = nullptr) const = 0;
5493 
5494   /// Create an abstract attribute view for the position \p IRP.
5495   static AAInterFnReachability &createForPosition(const IRPosition &IRP,
5496                                                   Attributor &A);
5497 
5498   /// See AbstractAttribute::getName()
5499   const std::string getName() const override { return "AAInterFnReachability"; }
5500 
5501   /// See AbstractAttribute::getIdAddr()
5502   const char *getIdAddr() const override { return &ID; }
5503 
5504   /// This function should return true if the type of the \p AA is AACallEdges.
5505   static bool classof(const AbstractAttribute *AA) {
5506     return (AA->getIdAddr() == &ID);
5507   }
5508 
5509   /// Unique ID (due to the unique address)
5510   static const char ID;
5511 };
5512 
5513 /// An abstract Attribute for determining the necessity of the convergent
5514 /// attribute.
5515 struct AANonConvergent : public StateWrapper<BooleanState, AbstractAttribute> {
5516   using Base = StateWrapper<BooleanState, AbstractAttribute>;
5517 
5518   AANonConvergent(const IRPosition &IRP, Attributor &A) : Base(IRP) {}
5519 
5520   /// Create an abstract attribute view for the position \p IRP.
5521   static AANonConvergent &createForPosition(const IRPosition &IRP,
5522                                             Attributor &A);
5523 
5524   /// Return true if "non-convergent" is assumed.
5525   bool isAssumedNotConvergent() const { return getAssumed(); }
5526 
5527   /// Return true if "non-convergent" is known.
5528   bool isKnownNotConvergent() const { return getKnown(); }
5529 
5530   /// See AbstractAttribute::getName()
5531   const std::string getName() const override { return "AANonConvergent"; }
5532 
5533   /// See AbstractAttribute::getIdAddr()
5534   const char *getIdAddr() const override { return &ID; }
5535 
5536   /// This function should return true if the type of the \p AA is
5537   /// AANonConvergent.
5538   static bool classof(const AbstractAttribute *AA) {
5539     return (AA->getIdAddr() == &ID);
5540   }
5541 
5542   /// Unique ID (due to the unique address)
5543   static const char ID;
5544 };
5545 
5546 /// An abstract interface for struct information.
5547 struct AAPointerInfo : public AbstractAttribute {
5548   AAPointerInfo(const IRPosition &IRP) : AbstractAttribute(IRP) {}
5549 
5550   /// See AbstractAttribute::isValidIRPositionForInit
5551   static bool isValidIRPositionForInit(Attributor &A, const IRPosition &IRP) {
5552     if (!IRP.getAssociatedType()->isPtrOrPtrVectorTy())
5553       return false;
5554     return AbstractAttribute::isValidIRPositionForInit(A, IRP);
5555   }
5556 
5557   enum AccessKind {
5558     // First two bits to distinguish may and must accesses.
5559     AK_MUST = 1 << 0,
5560     AK_MAY = 1 << 1,
5561 
5562     // Then two bits for read and write. These are not exclusive.
5563     AK_R = 1 << 2,
5564     AK_W = 1 << 3,
5565     AK_RW = AK_R | AK_W,
5566 
5567     // One special case for assumptions about memory content. These
5568     // are neither reads nor writes. They are however always modeled
5569     // as read to avoid using them for write removal.
5570     AK_ASSUMPTION = (1 << 4) | AK_MUST,
5571 
5572     // Helper for easy access.
5573     AK_MAY_READ = AK_MAY | AK_R,
5574     AK_MAY_WRITE = AK_MAY | AK_W,
5575     AK_MAY_READ_WRITE = AK_MAY | AK_R | AK_W,
5576     AK_MUST_READ = AK_MUST | AK_R,
5577     AK_MUST_WRITE = AK_MUST | AK_W,
5578     AK_MUST_READ_WRITE = AK_MUST | AK_R | AK_W,
5579   };
5580 
5581   /// A container for a list of ranges.
5582   struct RangeList {
5583     // The set of ranges rarely contains more than one element, and is unlikely
5584     // to contain more than say four elements. So we find the middle-ground with
5585     // a sorted vector. This avoids hard-coding a rarely used number like "four"
5586     // into every instance of a SmallSet.
5587     using RangeTy = AA::RangeTy;
5588     using VecTy = SmallVector<RangeTy>;
5589     using iterator = VecTy::iterator;
5590     using const_iterator = VecTy::const_iterator;
5591     VecTy Ranges;
5592 
5593     RangeList(const RangeTy &R) { Ranges.push_back(R); }
5594     RangeList(ArrayRef<int64_t> Offsets, int64_t Size) {
5595       Ranges.reserve(Offsets.size());
5596       for (unsigned i = 0, e = Offsets.size(); i != e; ++i) {
5597         assert(((i + 1 == e) || Offsets[i] < Offsets[i + 1]) &&
5598                "Expected strictly ascending offsets.");
5599         Ranges.emplace_back(Offsets[i], Size);
5600       }
5601     }
5602     RangeList() = default;
5603 
5604     iterator begin() { return Ranges.begin(); }
5605     iterator end() { return Ranges.end(); }
5606     const_iterator begin() const { return Ranges.begin(); }
5607     const_iterator end() const { return Ranges.end(); }
5608 
5609     // Helpers required for std::set_difference
5610     using value_type = RangeTy;
5611     void push_back(const RangeTy &R) {
5612       assert((Ranges.empty() || RangeTy::OffsetLessThan(Ranges.back(), R)) &&
5613              "Ensure the last element is the greatest.");
5614       Ranges.push_back(R);
5615     }
5616 
5617     /// Copy ranges from \p L that are not in \p R, into \p D.
5618     static void set_difference(const RangeList &L, const RangeList &R,
5619                                RangeList &D) {
5620       std::set_difference(L.begin(), L.end(), R.begin(), R.end(),
5621                           std::back_inserter(D), RangeTy::OffsetLessThan);
5622     }
5623 
5624     unsigned size() const { return Ranges.size(); }
5625 
5626     bool operator==(const RangeList &OI) const { return Ranges == OI.Ranges; }
5627 
5628     /// Merge the ranges in \p RHS into the current ranges.
5629     /// - Merging a list of  unknown ranges makes the current list unknown.
5630     /// - Ranges with the same offset are merged according to RangeTy::operator&
5631     /// \return true if the current RangeList changed.
5632     bool merge(const RangeList &RHS) {
5633       if (isUnknown())
5634         return false;
5635       if (RHS.isUnknown()) {
5636         setUnknown();
5637         return true;
5638       }
5639 
5640       if (Ranges.empty()) {
5641         Ranges = RHS.Ranges;
5642         return true;
5643       }
5644 
5645       bool Changed = false;
5646       auto LPos = Ranges.begin();
5647       for (auto &R : RHS.Ranges) {
5648         auto Result = insert(LPos, R);
5649         if (isUnknown())
5650           return true;
5651         LPos = Result.first;
5652         Changed |= Result.second;
5653       }
5654       return Changed;
5655     }
5656 
5657     /// Insert \p R at the given iterator \p Pos, and merge if necessary.
5658     ///
5659     /// This assumes that all ranges before \p Pos are OffsetLessThan \p R, and
5660     /// then maintains the sorted order for the suffix list.
5661     ///
5662     /// \return The place of insertion and true iff anything changed.
5663     std::pair<iterator, bool> insert(iterator Pos, const RangeTy &R) {
5664       if (isUnknown())
5665         return std::make_pair(Ranges.begin(), false);
5666       if (R.offsetOrSizeAreUnknown()) {
5667         return std::make_pair(setUnknown(), true);
5668       }
5669 
5670       // Maintain this as a sorted vector of unique entries.
5671       auto LB = std::lower_bound(Pos, Ranges.end(), R, RangeTy::OffsetLessThan);
5672       if (LB == Ranges.end() || LB->Offset != R.Offset)
5673         return std::make_pair(Ranges.insert(LB, R), true);
5674       bool Changed = *LB != R;
5675       *LB &= R;
5676       if (LB->offsetOrSizeAreUnknown())
5677         return std::make_pair(setUnknown(), true);
5678       return std::make_pair(LB, Changed);
5679     }
5680 
5681     /// Insert the given range \p R, maintaining sorted order.
5682     ///
5683     /// \return The place of insertion and true iff anything changed.
5684     std::pair<iterator, bool> insert(const RangeTy &R) {
5685       return insert(Ranges.begin(), R);
5686     }
5687 
5688     /// Add the increment \p Inc to the offset of every range.
5689     void addToAllOffsets(int64_t Inc) {
5690       assert(!isUnassigned() &&
5691              "Cannot increment if the offset is not yet computed!");
5692       if (isUnknown())
5693         return;
5694       for (auto &R : Ranges) {
5695         R.Offset += Inc;
5696       }
5697     }
5698 
5699     /// Return true iff there is exactly one range and it is known.
5700     bool isUnique() const {
5701       return Ranges.size() == 1 && !Ranges.front().offsetOrSizeAreUnknown();
5702     }
5703 
5704     /// Return the unique range, assuming it exists.
5705     const RangeTy &getUnique() const {
5706       assert(isUnique() && "No unique range to return!");
5707       return Ranges.front();
5708     }
5709 
5710     /// Return true iff the list contains an unknown range.
5711     bool isUnknown() const {
5712       if (isUnassigned())
5713         return false;
5714       if (Ranges.front().offsetOrSizeAreUnknown()) {
5715         assert(Ranges.size() == 1 && "Unknown is a singleton range.");
5716         return true;
5717       }
5718       return false;
5719     }
5720 
5721     /// Discard all ranges and insert a single unknown range.
5722     iterator setUnknown() {
5723       Ranges.clear();
5724       Ranges.push_back(RangeTy::getUnknown());
5725       return Ranges.begin();
5726     }
5727 
5728     /// Return true if no ranges have been inserted.
5729     bool isUnassigned() const { return Ranges.size() == 0; }
5730   };
5731 
5732   /// An access description.
5733   struct Access {
5734     Access(Instruction *I, int64_t Offset, int64_t Size,
5735            std::optional<Value *> Content, AccessKind Kind, Type *Ty)
5736         : LocalI(I), RemoteI(I), Content(Content), Ranges(Offset, Size),
5737           Kind(Kind), Ty(Ty) {
5738       verify();
5739     }
5740     Access(Instruction *LocalI, Instruction *RemoteI, const RangeList &Ranges,
5741            std::optional<Value *> Content, AccessKind K, Type *Ty)
5742         : LocalI(LocalI), RemoteI(RemoteI), Content(Content), Ranges(Ranges),
5743           Kind(K), Ty(Ty) {
5744       if (Ranges.size() > 1) {
5745         Kind = AccessKind(Kind | AK_MAY);
5746         Kind = AccessKind(Kind & ~AK_MUST);
5747       }
5748       verify();
5749     }
5750     Access(Instruction *LocalI, Instruction *RemoteI, int64_t Offset,
5751            int64_t Size, std::optional<Value *> Content, AccessKind Kind,
5752            Type *Ty)
5753         : LocalI(LocalI), RemoteI(RemoteI), Content(Content),
5754           Ranges(Offset, Size), Kind(Kind), Ty(Ty) {
5755       verify();
5756     }
5757     Access(const Access &Other) = default;
5758 
5759     Access &operator=(const Access &Other) = default;
5760     bool operator==(const Access &R) const {
5761       return LocalI == R.LocalI && RemoteI == R.RemoteI && Ranges == R.Ranges &&
5762              Content == R.Content && Kind == R.Kind;
5763     }
5764     bool operator!=(const Access &R) const { return !(*this == R); }
5765 
5766     Access &operator&=(const Access &R) {
5767       assert(RemoteI == R.RemoteI && "Expected same instruction!");
5768       assert(LocalI == R.LocalI && "Expected same instruction!");
5769 
5770       // Note that every Access object corresponds to a unique Value, and only
5771       // accesses to the same Value are merged. Hence we assume that all ranges
5772       // are the same size. If ranges can be different size, then the contents
5773       // must be dropped.
5774       Ranges.merge(R.Ranges);
5775       Content =
5776           AA::combineOptionalValuesInAAValueLatice(Content, R.Content, Ty);
5777 
5778       // Combine the access kind, which results in a bitwise union.
5779       // If there is more than one range, then this must be a MAY.
5780       // If we combine a may and a must access we clear the must bit.
5781       Kind = AccessKind(Kind | R.Kind);
5782       if ((Kind & AK_MAY) || Ranges.size() > 1) {
5783         Kind = AccessKind(Kind | AK_MAY);
5784         Kind = AccessKind(Kind & ~AK_MUST);
5785       }
5786       verify();
5787       return *this;
5788     }
5789 
5790     void verify() {
5791       assert(isMustAccess() + isMayAccess() == 1 &&
5792              "Expect must or may access, not both.");
5793       assert(isAssumption() + isWrite() <= 1 &&
5794              "Expect assumption access or write access, never both.");
5795       assert((isMayAccess() || Ranges.size() == 1) &&
5796              "Cannot be a must access if there are multiple ranges.");
5797     }
5798 
5799     /// Return the access kind.
5800     AccessKind getKind() const { return Kind; }
5801 
5802     /// Return true if this is a read access.
5803     bool isRead() const { return Kind & AK_R; }
5804 
5805     /// Return true if this is a write access.
5806     bool isWrite() const { return Kind & AK_W; }
5807 
5808     /// Return true if this is a write access.
5809     bool isWriteOrAssumption() const { return isWrite() || isAssumption(); }
5810 
5811     /// Return true if this is an assumption access.
5812     bool isAssumption() const { return Kind == AK_ASSUMPTION; }
5813 
5814     bool isMustAccess() const {
5815       bool MustAccess = Kind & AK_MUST;
5816       assert((!MustAccess || Ranges.size() < 2) &&
5817              "Cannot be a must access if there are multiple ranges.");
5818       return MustAccess;
5819     }
5820 
5821     bool isMayAccess() const {
5822       bool MayAccess = Kind & AK_MAY;
5823       assert((MayAccess || Ranges.size() < 2) &&
5824              "Cannot be a must access if there are multiple ranges.");
5825       return MayAccess;
5826     }
5827 
5828     /// Return the instruction that causes the access with respect to the local
5829     /// scope of the associated attribute.
5830     Instruction *getLocalInst() const { return LocalI; }
5831 
5832     /// Return the actual instruction that causes the access.
5833     Instruction *getRemoteInst() const { return RemoteI; }
5834 
5835     /// Return true if the value written is not known yet.
5836     bool isWrittenValueYetUndetermined() const { return !Content; }
5837 
5838     /// Return true if the value written cannot be determined at all.
5839     bool isWrittenValueUnknown() const {
5840       return Content.has_value() && !*Content;
5841     }
5842 
5843     /// Set the value written to nullptr, i.e., unknown.
5844     void setWrittenValueUnknown() { Content = nullptr; }
5845 
5846     /// Return the type associated with the access, if known.
5847     Type *getType() const { return Ty; }
5848 
5849     /// Return the value writen, if any.
5850     Value *getWrittenValue() const {
5851       assert(!isWrittenValueYetUndetermined() &&
5852              "Value needs to be determined before accessing it.");
5853       return *Content;
5854     }
5855 
5856     /// Return the written value which can be `llvm::null` if it is not yet
5857     /// determined.
5858     std::optional<Value *> getContent() const { return Content; }
5859 
5860     bool hasUniqueRange() const { return Ranges.isUnique(); }
5861     const AA::RangeTy &getUniqueRange() const { return Ranges.getUnique(); }
5862 
5863     /// Add a range accessed by this Access.
5864     ///
5865     /// If there are multiple ranges, then this is a "may access".
5866     void addRange(int64_t Offset, int64_t Size) {
5867       Ranges.insert({Offset, Size});
5868       if (!hasUniqueRange()) {
5869         Kind = AccessKind(Kind | AK_MAY);
5870         Kind = AccessKind(Kind & ~AK_MUST);
5871       }
5872     }
5873 
5874     const RangeList &getRanges() const { return Ranges; }
5875 
5876     using const_iterator = RangeList::const_iterator;
5877     const_iterator begin() const { return Ranges.begin(); }
5878     const_iterator end() const { return Ranges.end(); }
5879 
5880   private:
5881     /// The instruction responsible for the access with respect to the local
5882     /// scope of the associated attribute.
5883     Instruction *LocalI;
5884 
5885     /// The instruction responsible for the access.
5886     Instruction *RemoteI;
5887 
5888     /// The value written, if any. `std::nullopt` means "not known yet",
5889     /// `nullptr` cannot be determined.
5890     std::optional<Value *> Content;
5891 
5892     /// Set of potential ranges accessed from the base pointer.
5893     RangeList Ranges;
5894 
5895     /// The access kind, e.g., READ, as bitset (could be more than one).
5896     AccessKind Kind;
5897 
5898     /// The type of the content, thus the type read/written, can be null if not
5899     /// available.
5900     Type *Ty;
5901   };
5902 
5903   /// Create an abstract attribute view for the position \p IRP.
5904   static AAPointerInfo &createForPosition(const IRPosition &IRP, Attributor &A);
5905 
5906   /// See AbstractAttribute::getName()
5907   const std::string getName() const override { return "AAPointerInfo"; }
5908 
5909   /// See AbstractAttribute::getIdAddr()
5910   const char *getIdAddr() const override { return &ID; }
5911 
5912   /// Call \p CB on all accesses that might interfere with \p Range and return
5913   /// true if all such accesses were known and the callback returned true for
5914   /// all of them, false otherwise. An access interferes with an offset-size
5915   /// pair if it might read or write that memory region.
5916   virtual bool forallInterferingAccesses(
5917       AA::RangeTy Range, function_ref<bool(const Access &, bool)> CB) const = 0;
5918 
5919   /// Call \p CB on all accesses that might interfere with \p I and
5920   /// return true if all such accesses were known and the callback returned true
5921   /// for all of them, false otherwise. In contrast to forallInterferingAccesses
5922   /// this function will perform reasoning to exclude write accesses that cannot
5923   /// affect the load even if they on the surface look as if they would. The
5924   /// flag \p HasBeenWrittenTo will be set to true if we know that \p I does not
5925   /// read the intial value of the underlying memory.
5926   virtual bool forallInterferingAccesses(
5927       Attributor &A, const AbstractAttribute &QueryingAA, Instruction &I,
5928       bool FindInterferingWrites, bool FindInterferingReads,
5929       function_ref<bool(const Access &, bool)> CB, bool &HasBeenWrittenTo,
5930       AA::RangeTy &Range) const = 0;
5931 
5932   /// This function should return true if the type of the \p AA is AAPointerInfo
5933   static bool classof(const AbstractAttribute *AA) {
5934     return (AA->getIdAddr() == &ID);
5935   }
5936 
5937   /// Unique ID (due to the unique address)
5938   static const char ID;
5939 };
5940 
5941 /// An abstract attribute for getting assumption information.
5942 struct AAAssumptionInfo
5943     : public StateWrapper<SetState<StringRef>, AbstractAttribute,
5944                           DenseSet<StringRef>> {
5945   using Base =
5946       StateWrapper<SetState<StringRef>, AbstractAttribute, DenseSet<StringRef>>;
5947 
5948   AAAssumptionInfo(const IRPosition &IRP, Attributor &A,
5949                    const DenseSet<StringRef> &Known)
5950       : Base(IRP, Known) {}
5951 
5952   /// Returns true if the assumption set contains the assumption \p Assumption.
5953   virtual bool hasAssumption(const StringRef Assumption) const = 0;
5954 
5955   /// Create an abstract attribute view for the position \p IRP.
5956   static AAAssumptionInfo &createForPosition(const IRPosition &IRP,
5957                                              Attributor &A);
5958 
5959   /// See AbstractAttribute::getName()
5960   const std::string getName() const override { return "AAAssumptionInfo"; }
5961 
5962   /// See AbstractAttribute::getIdAddr()
5963   const char *getIdAddr() const override { return &ID; }
5964 
5965   /// This function should return true if the type of the \p AA is
5966   /// AAAssumptionInfo
5967   static bool classof(const AbstractAttribute *AA) {
5968     return (AA->getIdAddr() == &ID);
5969   }
5970 
5971   /// Unique ID (due to the unique address)
5972   static const char ID;
5973 };
5974 
5975 /// An abstract attribute for getting all assumption underlying objects.
5976 struct AAUnderlyingObjects : AbstractAttribute {
5977   AAUnderlyingObjects(const IRPosition &IRP) : AbstractAttribute(IRP) {}
5978 
5979   /// See AbstractAttribute::isValidIRPositionForInit
5980   static bool isValidIRPositionForInit(Attributor &A, const IRPosition &IRP) {
5981     if (!IRP.getAssociatedType()->isPtrOrPtrVectorTy())
5982       return false;
5983     return AbstractAttribute::isValidIRPositionForInit(A, IRP);
5984   }
5985 
5986   /// See AbstractAttribute::requiresCallersForArgOrFunction
5987   static bool requiresCallersForArgOrFunction() { return true; }
5988 
5989   /// Create an abstract attribute biew for the position \p IRP.
5990   static AAUnderlyingObjects &createForPosition(const IRPosition &IRP,
5991                                                 Attributor &A);
5992 
5993   /// See AbstractAttribute::getName()
5994   const std::string getName() const override { return "AAUnderlyingObjects"; }
5995 
5996   /// See AbstractAttribute::getIdAddr()
5997   const char *getIdAddr() const override { return &ID; }
5998 
5999   /// This function should return true if the type of the \p AA is
6000   /// AAUnderlyingObjects.
6001   static bool classof(const AbstractAttribute *AA) {
6002     return (AA->getIdAddr() == &ID);
6003   }
6004 
6005   /// Unique ID (due to the unique address)
6006   static const char ID;
6007 
6008   /// Check \p Pred on all underlying objects in \p Scope collected so far.
6009   ///
6010   /// This method will evaluate \p Pred on all underlying objects in \p Scope
6011   /// collected so far and return true if \p Pred holds on all of them.
6012   virtual bool
6013   forallUnderlyingObjects(function_ref<bool(Value &)> Pred,
6014                           AA::ValueScope Scope = AA::Interprocedural) const = 0;
6015 };
6016 
6017 /// An abstract interface for address space information.
6018 struct AAAddressSpace : public StateWrapper<BooleanState, AbstractAttribute> {
6019   AAAddressSpace(const IRPosition &IRP, Attributor &A)
6020       : StateWrapper<BooleanState, AbstractAttribute>(IRP) {}
6021 
6022   /// See AbstractAttribute::isValidIRPositionForInit
6023   static bool isValidIRPositionForInit(Attributor &A, const IRPosition &IRP) {
6024     if (!IRP.getAssociatedType()->isPtrOrPtrVectorTy())
6025       return false;
6026     return AbstractAttribute::isValidIRPositionForInit(A, IRP);
6027   }
6028 
6029   /// See AbstractAttribute::requiresCallersForArgOrFunction
6030   static bool requiresCallersForArgOrFunction() { return true; }
6031 
6032   /// Return the address space of the associated value. \p NoAddressSpace is
6033   /// returned if the associated value is dead. This functions is not supposed
6034   /// to be called if the AA is invalid.
6035   virtual int32_t getAddressSpace() const = 0;
6036 
6037   /// Create an abstract attribute view for the position \p IRP.
6038   static AAAddressSpace &createForPosition(const IRPosition &IRP,
6039                                            Attributor &A);
6040 
6041   /// See AbstractAttribute::getName()
6042   const std::string getName() const override { return "AAAddressSpace"; }
6043 
6044   /// See AbstractAttribute::getIdAddr()
6045   const char *getIdAddr() const override { return &ID; }
6046 
6047   /// This function should return true if the type of the \p AA is
6048   /// AAAssumptionInfo
6049   static bool classof(const AbstractAttribute *AA) {
6050     return (AA->getIdAddr() == &ID);
6051   }
6052 
6053   // No address space which indicates the associated value is dead.
6054   static const int32_t NoAddressSpace = -1;
6055 
6056   /// Unique ID (due to the unique address)
6057   static const char ID;
6058 };
6059 
6060 raw_ostream &operator<<(raw_ostream &, const AAPointerInfo::Access &);
6061 
6062 /// Run options, used by the pass manager.
6063 enum AttributorRunOption {
6064   NONE = 0,
6065   MODULE = 1 << 0,
6066   CGSCC = 1 << 1,
6067   ALL = MODULE | CGSCC
6068 };
6069 
6070 namespace AA {
6071 /// Helper to avoid creating an AA for IR Attributes that might already be set.
6072 template <Attribute::AttrKind AK, typename AAType = AbstractAttribute>
6073 bool hasAssumedIRAttr(Attributor &A, const AbstractAttribute *QueryingAA,
6074                       const IRPosition &IRP, DepClassTy DepClass, bool &IsKnown,
6075                       bool IgnoreSubsumingPositions = false,
6076                       const AAType **AAPtr = nullptr) {
6077   IsKnown = false;
6078   switch (AK) {
6079 #define CASE(ATTRNAME, AANAME, ...)                                            \
6080   case Attribute::ATTRNAME: {                                                  \
6081     if (AANAME::isImpliedByIR(A, IRP, AK, IgnoreSubsumingPositions))           \
6082       return IsKnown = true;                                                   \
6083     if (!QueryingAA)                                                           \
6084       return false;                                                            \
6085     const auto *AA = A.getAAFor<AANAME>(*QueryingAA, IRP, DepClass);           \
6086     if (AAPtr)                                                                 \
6087       *AAPtr = reinterpret_cast<const AAType *>(AA);                           \
6088     if (!AA || !AA->isAssumed(__VA_ARGS__))                                    \
6089       return false;                                                            \
6090     IsKnown = AA->isKnown(__VA_ARGS__);                                        \
6091     return true;                                                               \
6092   }
6093     CASE(NoUnwind, AANoUnwind, );
6094     CASE(WillReturn, AAWillReturn, );
6095     CASE(NoFree, AANoFree, );
6096     CASE(NoCapture, AANoCapture, );
6097     CASE(NoRecurse, AANoRecurse, );
6098     CASE(NoReturn, AANoReturn, );
6099     CASE(NoSync, AANoSync, );
6100     CASE(NoAlias, AANoAlias, );
6101     CASE(NonNull, AANonNull, );
6102     CASE(MustProgress, AAMustProgress, );
6103     CASE(NoUndef, AANoUndef, );
6104     CASE(ReadNone, AAMemoryBehavior, AAMemoryBehavior::NO_ACCESSES);
6105     CASE(ReadOnly, AAMemoryBehavior, AAMemoryBehavior::NO_WRITES);
6106     CASE(WriteOnly, AAMemoryBehavior, AAMemoryBehavior::NO_READS);
6107 #undef CASE
6108   default:
6109     llvm_unreachable("hasAssumedIRAttr not available for this attribute kind");
6110   };
6111 }
6112 } // namespace AA
6113 
6114 } // end namespace llvm
6115 
6116 #endif // LLVM_TRANSFORMS_IPO_ATTRIBUTOR_H
6117