1 //===- AttributorAttributes.cpp - Attributes for Attributor deduction -----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // See the Attributor.h file comment and the class descriptions in that file for
10 // more information.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/Transforms/IPO/Attributor.h"
15 
16 #include "llvm/ADT/APInt.h"
17 #include "llvm/ADT/ArrayRef.h"
18 #include "llvm/ADT/DenseMapInfo.h"
19 #include "llvm/ADT/MapVector.h"
20 #include "llvm/ADT/SCCIterator.h"
21 #include "llvm/ADT/STLExtras.h"
22 #include "llvm/ADT/SetOperations.h"
23 #include "llvm/ADT/SetVector.h"
24 #include "llvm/ADT/SmallPtrSet.h"
25 #include "llvm/ADT/SmallVector.h"
26 #include "llvm/ADT/Statistic.h"
27 #include "llvm/ADT/StringExtras.h"
28 #include "llvm/Analysis/AliasAnalysis.h"
29 #include "llvm/Analysis/AssumeBundleQueries.h"
30 #include "llvm/Analysis/AssumptionCache.h"
31 #include "llvm/Analysis/CaptureTracking.h"
32 #include "llvm/Analysis/CycleAnalysis.h"
33 #include "llvm/Analysis/InstructionSimplify.h"
34 #include "llvm/Analysis/LazyValueInfo.h"
35 #include "llvm/Analysis/MemoryBuiltins.h"
36 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
37 #include "llvm/Analysis/ScalarEvolution.h"
38 #include "llvm/Analysis/TargetTransformInfo.h"
39 #include "llvm/Analysis/ValueTracking.h"
40 #include "llvm/IR/Argument.h"
41 #include "llvm/IR/Assumptions.h"
42 #include "llvm/IR/Attributes.h"
43 #include "llvm/IR/BasicBlock.h"
44 #include "llvm/IR/Constant.h"
45 #include "llvm/IR/Constants.h"
46 #include "llvm/IR/DataLayout.h"
47 #include "llvm/IR/DerivedTypes.h"
48 #include "llvm/IR/GlobalValue.h"
49 #include "llvm/IR/IRBuilder.h"
50 #include "llvm/IR/InlineAsm.h"
51 #include "llvm/IR/InstrTypes.h"
52 #include "llvm/IR/Instruction.h"
53 #include "llvm/IR/Instructions.h"
54 #include "llvm/IR/IntrinsicInst.h"
55 #include "llvm/IR/IntrinsicsAMDGPU.h"
56 #include "llvm/IR/IntrinsicsNVPTX.h"
57 #include "llvm/IR/LLVMContext.h"
58 #include "llvm/IR/MDBuilder.h"
59 #include "llvm/IR/NoFolder.h"
60 #include "llvm/IR/Value.h"
61 #include "llvm/IR/ValueHandle.h"
62 #include "llvm/Support/Alignment.h"
63 #include "llvm/Support/Casting.h"
64 #include "llvm/Support/CommandLine.h"
65 #include "llvm/Support/ErrorHandling.h"
66 #include "llvm/Support/GraphWriter.h"
67 #include "llvm/Support/MathExtras.h"
68 #include "llvm/Support/TypeSize.h"
69 #include "llvm/Support/raw_ostream.h"
70 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
71 #include "llvm/Transforms/Utils/CallPromotionUtils.h"
72 #include "llvm/Transforms/Utils/Local.h"
73 #include "llvm/Transforms/Utils/ValueMapper.h"
74 #include <cassert>
75 #include <numeric>
76 #include <optional>
77 #include <string>
78 
79 using namespace llvm;
80 
81 #define DEBUG_TYPE "attributor"
82 
83 static cl::opt<bool> ManifestInternal(
84     "attributor-manifest-internal", cl::Hidden,
85     cl::desc("Manifest Attributor internal string attributes."),
86     cl::init(false));
87 
88 static cl::opt<int> MaxHeapToStackSize("max-heap-to-stack-size", cl::init(128),
89                                        cl::Hidden);
90 
91 template <>
92 unsigned llvm::PotentialConstantIntValuesState::MaxPotentialValues = 0;
93 
94 template <> unsigned llvm::PotentialLLVMValuesState::MaxPotentialValues = -1;
95 
96 static cl::opt<unsigned, true> MaxPotentialValues(
97     "attributor-max-potential-values", cl::Hidden,
98     cl::desc("Maximum number of potential values to be "
99              "tracked for each position."),
100     cl::location(llvm::PotentialConstantIntValuesState::MaxPotentialValues),
101     cl::init(7));
102 
103 static cl::opt<int> MaxPotentialValuesIterations(
104     "attributor-max-potential-values-iterations", cl::Hidden,
105     cl::desc(
106         "Maximum number of iterations we keep dismantling potential values."),
107     cl::init(64));
108 
109 STATISTIC(NumAAs, "Number of abstract attributes created");
110 
111 // Some helper macros to deal with statistics tracking.
112 //
113 // Usage:
114 // For simple IR attribute tracking overload trackStatistics in the abstract
115 // attribute and choose the right STATS_DECLTRACK_********* macro,
116 // e.g.,:
117 //  void trackStatistics() const override {
118 //    STATS_DECLTRACK_ARG_ATTR(returned)
119 //  }
120 // If there is a single "increment" side one can use the macro
121 // STATS_DECLTRACK with a custom message. If there are multiple increment
122 // sides, STATS_DECL and STATS_TRACK can also be used separately.
123 //
124 #define BUILD_STAT_MSG_IR_ATTR(TYPE, NAME)                                     \
125   ("Number of " #TYPE " marked '" #NAME "'")
126 #define BUILD_STAT_NAME(NAME, TYPE) NumIR##TYPE##_##NAME
127 #define STATS_DECL_(NAME, MSG) STATISTIC(NAME, MSG);
128 #define STATS_DECL(NAME, TYPE, MSG)                                            \
129   STATS_DECL_(BUILD_STAT_NAME(NAME, TYPE), MSG);
130 #define STATS_TRACK(NAME, TYPE) ++(BUILD_STAT_NAME(NAME, TYPE));
131 #define STATS_DECLTRACK(NAME, TYPE, MSG)                                       \
132   {                                                                            \
133     STATS_DECL(NAME, TYPE, MSG)                                                \
134     STATS_TRACK(NAME, TYPE)                                                    \
135   }
136 #define STATS_DECLTRACK_ARG_ATTR(NAME)                                         \
137   STATS_DECLTRACK(NAME, Arguments, BUILD_STAT_MSG_IR_ATTR(arguments, NAME))
138 #define STATS_DECLTRACK_CSARG_ATTR(NAME)                                       \
139   STATS_DECLTRACK(NAME, CSArguments,                                           \
140                   BUILD_STAT_MSG_IR_ATTR(call site arguments, NAME))
141 #define STATS_DECLTRACK_FN_ATTR(NAME)                                          \
142   STATS_DECLTRACK(NAME, Function, BUILD_STAT_MSG_IR_ATTR(functions, NAME))
143 #define STATS_DECLTRACK_CS_ATTR(NAME)                                          \
144   STATS_DECLTRACK(NAME, CS, BUILD_STAT_MSG_IR_ATTR(call site, NAME))
145 #define STATS_DECLTRACK_FNRET_ATTR(NAME)                                       \
146   STATS_DECLTRACK(NAME, FunctionReturn,                                        \
147                   BUILD_STAT_MSG_IR_ATTR(function returns, NAME))
148 #define STATS_DECLTRACK_CSRET_ATTR(NAME)                                       \
149   STATS_DECLTRACK(NAME, CSReturn,                                              \
150                   BUILD_STAT_MSG_IR_ATTR(call site returns, NAME))
151 #define STATS_DECLTRACK_FLOATING_ATTR(NAME)                                    \
152   STATS_DECLTRACK(NAME, Floating,                                              \
153                   ("Number of floating values known to be '" #NAME "'"))
154 
155 // Specialization of the operator<< for abstract attributes subclasses. This
156 // disambiguates situations where multiple operators are applicable.
157 namespace llvm {
158 #define PIPE_OPERATOR(CLASS)                                                   \
159   raw_ostream &operator<<(raw_ostream &OS, const CLASS &AA) {                  \
160     return OS << static_cast<const AbstractAttribute &>(AA);                   \
161   }
162 
163 PIPE_OPERATOR(AAIsDead)
164 PIPE_OPERATOR(AANoUnwind)
165 PIPE_OPERATOR(AANoSync)
166 PIPE_OPERATOR(AANoRecurse)
167 PIPE_OPERATOR(AANonConvergent)
168 PIPE_OPERATOR(AAWillReturn)
169 PIPE_OPERATOR(AANoReturn)
170 PIPE_OPERATOR(AANonNull)
171 PIPE_OPERATOR(AAMustProgress)
172 PIPE_OPERATOR(AANoAlias)
173 PIPE_OPERATOR(AADereferenceable)
174 PIPE_OPERATOR(AAAlign)
175 PIPE_OPERATOR(AAInstanceInfo)
176 PIPE_OPERATOR(AANoCapture)
177 PIPE_OPERATOR(AAValueSimplify)
178 PIPE_OPERATOR(AANoFree)
179 PIPE_OPERATOR(AAHeapToStack)
180 PIPE_OPERATOR(AAIntraFnReachability)
181 PIPE_OPERATOR(AAMemoryBehavior)
182 PIPE_OPERATOR(AAMemoryLocation)
183 PIPE_OPERATOR(AAValueConstantRange)
184 PIPE_OPERATOR(AAPrivatizablePtr)
185 PIPE_OPERATOR(AAUndefinedBehavior)
186 PIPE_OPERATOR(AAPotentialConstantValues)
187 PIPE_OPERATOR(AAPotentialValues)
188 PIPE_OPERATOR(AANoUndef)
189 PIPE_OPERATOR(AANoFPClass)
190 PIPE_OPERATOR(AACallEdges)
191 PIPE_OPERATOR(AAInterFnReachability)
192 PIPE_OPERATOR(AAPointerInfo)
193 PIPE_OPERATOR(AAAssumptionInfo)
194 PIPE_OPERATOR(AAUnderlyingObjects)
195 PIPE_OPERATOR(AAAddressSpace)
196 PIPE_OPERATOR(AAAllocationInfo)
197 PIPE_OPERATOR(AAIndirectCallInfo)
198 PIPE_OPERATOR(AAGlobalValueInfo)
199 PIPE_OPERATOR(AADenormalFPMath)
200 
201 #undef PIPE_OPERATOR
202 
203 template <>
204 ChangeStatus clampStateAndIndicateChange<DerefState>(DerefState &S,
205                                                      const DerefState &R) {
206   ChangeStatus CS0 =
207       clampStateAndIndicateChange(S.DerefBytesState, R.DerefBytesState);
208   ChangeStatus CS1 = clampStateAndIndicateChange(S.GlobalState, R.GlobalState);
209   return CS0 | CS1;
210 }
211 
212 } // namespace llvm
213 
214 static bool mayBeInCycle(const CycleInfo *CI, const Instruction *I,
215                          bool HeaderOnly, Cycle **CPtr = nullptr) {
216   if (!CI)
217     return true;
218   auto *BB = I->getParent();
219   auto *C = CI->getCycle(BB);
220   if (!C)
221     return false;
222   if (CPtr)
223     *CPtr = C;
224   return !HeaderOnly || BB == C->getHeader();
225 }
226 
227 /// Checks if a type could have padding bytes.
228 static bool isDenselyPacked(Type *Ty, const DataLayout &DL) {
229   // There is no size information, so be conservative.
230   if (!Ty->isSized())
231     return false;
232 
233   // If the alloc size is not equal to the storage size, then there are padding
234   // bytes. For x86_fp80 on x86-64, size: 80 alloc size: 128.
235   if (DL.getTypeSizeInBits(Ty) != DL.getTypeAllocSizeInBits(Ty))
236     return false;
237 
238   // FIXME: This isn't the right way to check for padding in vectors with
239   // non-byte-size elements.
240   if (VectorType *SeqTy = dyn_cast<VectorType>(Ty))
241     return isDenselyPacked(SeqTy->getElementType(), DL);
242 
243   // For array types, check for padding within members.
244   if (ArrayType *SeqTy = dyn_cast<ArrayType>(Ty))
245     return isDenselyPacked(SeqTy->getElementType(), DL);
246 
247   if (!isa<StructType>(Ty))
248     return true;
249 
250   // Check for padding within and between elements of a struct.
251   StructType *StructTy = cast<StructType>(Ty);
252   const StructLayout *Layout = DL.getStructLayout(StructTy);
253   uint64_t StartPos = 0;
254   for (unsigned I = 0, E = StructTy->getNumElements(); I < E; ++I) {
255     Type *ElTy = StructTy->getElementType(I);
256     if (!isDenselyPacked(ElTy, DL))
257       return false;
258     if (StartPos != Layout->getElementOffsetInBits(I))
259       return false;
260     StartPos += DL.getTypeAllocSizeInBits(ElTy);
261   }
262 
263   return true;
264 }
265 
266 /// Get pointer operand of memory accessing instruction. If \p I is
267 /// not a memory accessing instruction, return nullptr. If \p AllowVolatile,
268 /// is set to false and the instruction is volatile, return nullptr.
269 static const Value *getPointerOperand(const Instruction *I,
270                                       bool AllowVolatile) {
271   if (!AllowVolatile && I->isVolatile())
272     return nullptr;
273 
274   if (auto *LI = dyn_cast<LoadInst>(I)) {
275     return LI->getPointerOperand();
276   }
277 
278   if (auto *SI = dyn_cast<StoreInst>(I)) {
279     return SI->getPointerOperand();
280   }
281 
282   if (auto *CXI = dyn_cast<AtomicCmpXchgInst>(I)) {
283     return CXI->getPointerOperand();
284   }
285 
286   if (auto *RMWI = dyn_cast<AtomicRMWInst>(I)) {
287     return RMWI->getPointerOperand();
288   }
289 
290   return nullptr;
291 }
292 
293 /// Helper function to create a pointer based on \p Ptr, and advanced by \p
294 /// Offset bytes.
295 static Value *constructPointer(Value *Ptr, int64_t Offset,
296                                IRBuilder<NoFolder> &IRB) {
297   LLVM_DEBUG(dbgs() << "Construct pointer: " << *Ptr << " + " << Offset
298                     << "-bytes\n");
299 
300   if (Offset)
301     Ptr = IRB.CreateGEP(IRB.getInt8Ty(), Ptr, IRB.getInt64(Offset),
302                         Ptr->getName() + ".b" + Twine(Offset));
303   return Ptr;
304 }
305 
306 static const Value *
307 stripAndAccumulateOffsets(Attributor &A, const AbstractAttribute &QueryingAA,
308                           const Value *Val, const DataLayout &DL, APInt &Offset,
309                           bool GetMinOffset, bool AllowNonInbounds,
310                           bool UseAssumed = false) {
311 
312   auto AttributorAnalysis = [&](Value &V, APInt &ROffset) -> bool {
313     const IRPosition &Pos = IRPosition::value(V);
314     // Only track dependence if we are going to use the assumed info.
315     const AAValueConstantRange *ValueConstantRangeAA =
316         A.getAAFor<AAValueConstantRange>(QueryingAA, Pos,
317                                          UseAssumed ? DepClassTy::OPTIONAL
318                                                     : DepClassTy::NONE);
319     if (!ValueConstantRangeAA)
320       return false;
321     ConstantRange Range = UseAssumed ? ValueConstantRangeAA->getAssumed()
322                                      : ValueConstantRangeAA->getKnown();
323     if (Range.isFullSet())
324       return false;
325 
326     // We can only use the lower part of the range because the upper part can
327     // be higher than what the value can really be.
328     if (GetMinOffset)
329       ROffset = Range.getSignedMin();
330     else
331       ROffset = Range.getSignedMax();
332     return true;
333   };
334 
335   return Val->stripAndAccumulateConstantOffsets(DL, Offset, AllowNonInbounds,
336                                                 /* AllowInvariant */ true,
337                                                 AttributorAnalysis);
338 }
339 
340 static const Value *
341 getMinimalBaseOfPointer(Attributor &A, const AbstractAttribute &QueryingAA,
342                         const Value *Ptr, int64_t &BytesOffset,
343                         const DataLayout &DL, bool AllowNonInbounds = false) {
344   APInt OffsetAPInt(DL.getIndexTypeSizeInBits(Ptr->getType()), 0);
345   const Value *Base =
346       stripAndAccumulateOffsets(A, QueryingAA, Ptr, DL, OffsetAPInt,
347                                 /* GetMinOffset */ true, AllowNonInbounds);
348 
349   BytesOffset = OffsetAPInt.getSExtValue();
350   return Base;
351 }
352 
353 /// Clamp the information known for all returned values of a function
354 /// (identified by \p QueryingAA) into \p S.
355 template <typename AAType, typename StateType = typename AAType::StateType,
356           Attribute::AttrKind IRAttributeKind = AAType::IRAttributeKind,
357           bool RecurseForSelectAndPHI = true>
358 static void clampReturnedValueStates(
359     Attributor &A, const AAType &QueryingAA, StateType &S,
360     const IRPosition::CallBaseContext *CBContext = nullptr) {
361   LLVM_DEBUG(dbgs() << "[Attributor] Clamp return value states for "
362                     << QueryingAA << " into " << S << "\n");
363 
364   assert((QueryingAA.getIRPosition().getPositionKind() ==
365               IRPosition::IRP_RETURNED ||
366           QueryingAA.getIRPosition().getPositionKind() ==
367               IRPosition::IRP_CALL_SITE_RETURNED) &&
368          "Can only clamp returned value states for a function returned or call "
369          "site returned position!");
370 
371   // Use an optional state as there might not be any return values and we want
372   // to join (IntegerState::operator&) the state of all there are.
373   std::optional<StateType> T;
374 
375   // Callback for each possibly returned value.
376   auto CheckReturnValue = [&](Value &RV) -> bool {
377     const IRPosition &RVPos = IRPosition::value(RV, CBContext);
378     // If possible, use the hasAssumedIRAttr interface.
379     if (Attribute::isEnumAttrKind(IRAttributeKind)) {
380       bool IsKnown;
381       return AA::hasAssumedIRAttr<IRAttributeKind>(
382           A, &QueryingAA, RVPos, DepClassTy::REQUIRED, IsKnown);
383     }
384 
385     const AAType *AA =
386         A.getAAFor<AAType>(QueryingAA, RVPos, DepClassTy::REQUIRED);
387     if (!AA)
388       return false;
389     LLVM_DEBUG(dbgs() << "[Attributor] RV: " << RV
390                       << " AA: " << AA->getAsStr(&A) << " @ " << RVPos << "\n");
391     const StateType &AAS = AA->getState();
392     if (!T)
393       T = StateType::getBestState(AAS);
394     *T &= AAS;
395     LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " RV State: " << T
396                       << "\n");
397     return T->isValidState();
398   };
399 
400   if (!A.checkForAllReturnedValues(CheckReturnValue, QueryingAA,
401                                    AA::ValueScope::Intraprocedural,
402                                    RecurseForSelectAndPHI))
403     S.indicatePessimisticFixpoint();
404   else if (T)
405     S ^= *T;
406 }
407 
408 namespace {
409 /// Helper class for generic deduction: return value -> returned position.
410 template <typename AAType, typename BaseType,
411           typename StateType = typename BaseType::StateType,
412           bool PropagateCallBaseContext = false,
413           Attribute::AttrKind IRAttributeKind = AAType::IRAttributeKind,
414           bool RecurseForSelectAndPHI = true>
415 struct AAReturnedFromReturnedValues : public BaseType {
416   AAReturnedFromReturnedValues(const IRPosition &IRP, Attributor &A)
417       : BaseType(IRP, A) {}
418 
419   /// See AbstractAttribute::updateImpl(...).
420   ChangeStatus updateImpl(Attributor &A) override {
421     StateType S(StateType::getBestState(this->getState()));
422     clampReturnedValueStates<AAType, StateType, IRAttributeKind, RecurseForSelectAndPHI>(
423         A, *this, S,
424         PropagateCallBaseContext ? this->getCallBaseContext() : nullptr);
425     // TODO: If we know we visited all returned values, thus no are assumed
426     // dead, we can take the known information from the state T.
427     return clampStateAndIndicateChange<StateType>(this->getState(), S);
428   }
429 };
430 
431 /// Clamp the information known at all call sites for a given argument
432 /// (identified by \p QueryingAA) into \p S.
433 template <typename AAType, typename StateType = typename AAType::StateType,
434           Attribute::AttrKind IRAttributeKind = AAType::IRAttributeKind>
435 static void clampCallSiteArgumentStates(Attributor &A, const AAType &QueryingAA,
436                                         StateType &S) {
437   LLVM_DEBUG(dbgs() << "[Attributor] Clamp call site argument states for "
438                     << QueryingAA << " into " << S << "\n");
439 
440   assert(QueryingAA.getIRPosition().getPositionKind() ==
441              IRPosition::IRP_ARGUMENT &&
442          "Can only clamp call site argument states for an argument position!");
443 
444   // Use an optional state as there might not be any return values and we want
445   // to join (IntegerState::operator&) the state of all there are.
446   std::optional<StateType> T;
447 
448   // The argument number which is also the call site argument number.
449   unsigned ArgNo = QueryingAA.getIRPosition().getCallSiteArgNo();
450 
451   auto CallSiteCheck = [&](AbstractCallSite ACS) {
452     const IRPosition &ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo);
453     // Check if a coresponding argument was found or if it is on not associated
454     // (which can happen for callback calls).
455     if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
456       return false;
457 
458     // If possible, use the hasAssumedIRAttr interface.
459     if (Attribute::isEnumAttrKind(IRAttributeKind)) {
460       bool IsKnown;
461       return AA::hasAssumedIRAttr<IRAttributeKind>(
462           A, &QueryingAA, ACSArgPos, DepClassTy::REQUIRED, IsKnown);
463     }
464 
465     const AAType *AA =
466         A.getAAFor<AAType>(QueryingAA, ACSArgPos, DepClassTy::REQUIRED);
467     if (!AA)
468       return false;
469     LLVM_DEBUG(dbgs() << "[Attributor] ACS: " << *ACS.getInstruction()
470                       << " AA: " << AA->getAsStr(&A) << " @" << ACSArgPos
471                       << "\n");
472     const StateType &AAS = AA->getState();
473     if (!T)
474       T = StateType::getBestState(AAS);
475     *T &= AAS;
476     LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " CSA State: " << T
477                       << "\n");
478     return T->isValidState();
479   };
480 
481   bool UsedAssumedInformation = false;
482   if (!A.checkForAllCallSites(CallSiteCheck, QueryingAA, true,
483                               UsedAssumedInformation))
484     S.indicatePessimisticFixpoint();
485   else if (T)
486     S ^= *T;
487 }
488 
489 /// This function is the bridge between argument position and the call base
490 /// context.
491 template <typename AAType, typename BaseType,
492           typename StateType = typename AAType::StateType,
493           Attribute::AttrKind IRAttributeKind = AAType::IRAttributeKind>
494 bool getArgumentStateFromCallBaseContext(Attributor &A,
495                                          BaseType &QueryingAttribute,
496                                          IRPosition &Pos, StateType &State) {
497   assert((Pos.getPositionKind() == IRPosition::IRP_ARGUMENT) &&
498          "Expected an 'argument' position !");
499   const CallBase *CBContext = Pos.getCallBaseContext();
500   if (!CBContext)
501     return false;
502 
503   int ArgNo = Pos.getCallSiteArgNo();
504   assert(ArgNo >= 0 && "Invalid Arg No!");
505   const IRPosition CBArgPos = IRPosition::callsite_argument(*CBContext, ArgNo);
506 
507   // If possible, use the hasAssumedIRAttr interface.
508   if (Attribute::isEnumAttrKind(IRAttributeKind)) {
509     bool IsKnown;
510     return AA::hasAssumedIRAttr<IRAttributeKind>(
511         A, &QueryingAttribute, CBArgPos, DepClassTy::REQUIRED, IsKnown);
512   }
513 
514   const auto *AA =
515       A.getAAFor<AAType>(QueryingAttribute, CBArgPos, DepClassTy::REQUIRED);
516   if (!AA)
517     return false;
518   const StateType &CBArgumentState =
519       static_cast<const StateType &>(AA->getState());
520 
521   LLVM_DEBUG(dbgs() << "[Attributor] Briding Call site context to argument"
522                     << "Position:" << Pos << "CB Arg state:" << CBArgumentState
523                     << "\n");
524 
525   // NOTE: If we want to do call site grouping it should happen here.
526   State ^= CBArgumentState;
527   return true;
528 }
529 
530 /// Helper class for generic deduction: call site argument -> argument position.
531 template <typename AAType, typename BaseType,
532           typename StateType = typename AAType::StateType,
533           bool BridgeCallBaseContext = false,
534           Attribute::AttrKind IRAttributeKind = AAType::IRAttributeKind>
535 struct AAArgumentFromCallSiteArguments : public BaseType {
536   AAArgumentFromCallSiteArguments(const IRPosition &IRP, Attributor &A)
537       : BaseType(IRP, A) {}
538 
539   /// See AbstractAttribute::updateImpl(...).
540   ChangeStatus updateImpl(Attributor &A) override {
541     StateType S = StateType::getBestState(this->getState());
542 
543     if (BridgeCallBaseContext) {
544       bool Success =
545           getArgumentStateFromCallBaseContext<AAType, BaseType, StateType,
546                                               IRAttributeKind>(
547               A, *this, this->getIRPosition(), S);
548       if (Success)
549         return clampStateAndIndicateChange<StateType>(this->getState(), S);
550     }
551     clampCallSiteArgumentStates<AAType, StateType, IRAttributeKind>(A, *this,
552                                                                     S);
553 
554     // TODO: If we know we visited all incoming values, thus no are assumed
555     // dead, we can take the known information from the state T.
556     return clampStateAndIndicateChange<StateType>(this->getState(), S);
557   }
558 };
559 
560 /// Helper class for generic replication: function returned -> cs returned.
561 template <typename AAType, typename BaseType,
562           typename StateType = typename BaseType::StateType,
563           bool IntroduceCallBaseContext = false,
564           Attribute::AttrKind IRAttributeKind = AAType::IRAttributeKind>
565 struct AACalleeToCallSite : public BaseType {
566   AACalleeToCallSite(const IRPosition &IRP, Attributor &A) : BaseType(IRP, A) {}
567 
568   /// See AbstractAttribute::updateImpl(...).
569   ChangeStatus updateImpl(Attributor &A) override {
570     auto IRPKind = this->getIRPosition().getPositionKind();
571     assert((IRPKind == IRPosition::IRP_CALL_SITE_RETURNED ||
572             IRPKind == IRPosition::IRP_CALL_SITE) &&
573            "Can only wrap function returned positions for call site "
574            "returned positions!");
575     auto &S = this->getState();
576 
577     CallBase &CB = cast<CallBase>(this->getAnchorValue());
578     if (IntroduceCallBaseContext)
579       LLVM_DEBUG(dbgs() << "[Attributor] Introducing call base context:" << CB
580                         << "\n");
581 
582     ChangeStatus Changed = ChangeStatus::UNCHANGED;
583     auto CalleePred = [&](ArrayRef<const Function *> Callees) {
584       for (const Function *Callee : Callees) {
585         IRPosition FnPos =
586             IRPKind == llvm::IRPosition::IRP_CALL_SITE_RETURNED
587                 ? IRPosition::returned(*Callee,
588                                        IntroduceCallBaseContext ? &CB : nullptr)
589                 : IRPosition::function(
590                       *Callee, IntroduceCallBaseContext ? &CB : nullptr);
591         // If possible, use the hasAssumedIRAttr interface.
592         if (Attribute::isEnumAttrKind(IRAttributeKind)) {
593           bool IsKnown;
594           if (!AA::hasAssumedIRAttr<IRAttributeKind>(
595                   A, this, FnPos, DepClassTy::REQUIRED, IsKnown))
596             return false;
597           continue;
598         }
599 
600         const AAType *AA =
601             A.getAAFor<AAType>(*this, FnPos, DepClassTy::REQUIRED);
602         if (!AA)
603           return false;
604         Changed |= clampStateAndIndicateChange(S, AA->getState());
605         if (S.isAtFixpoint())
606           return S.isValidState();
607       }
608       return true;
609     };
610     if (!A.checkForAllCallees(CalleePred, *this, CB))
611       return S.indicatePessimisticFixpoint();
612     return Changed;
613   }
614 };
615 
616 /// Helper function to accumulate uses.
617 template <class AAType, typename StateType = typename AAType::StateType>
618 static void followUsesInContext(AAType &AA, Attributor &A,
619                                 MustBeExecutedContextExplorer &Explorer,
620                                 const Instruction *CtxI,
621                                 SetVector<const Use *> &Uses,
622                                 StateType &State) {
623   auto EIt = Explorer.begin(CtxI), EEnd = Explorer.end(CtxI);
624   for (unsigned u = 0; u < Uses.size(); ++u) {
625     const Use *U = Uses[u];
626     if (const Instruction *UserI = dyn_cast<Instruction>(U->getUser())) {
627       bool Found = Explorer.findInContextOf(UserI, EIt, EEnd);
628       if (Found && AA.followUseInMBEC(A, U, UserI, State))
629         for (const Use &Us : UserI->uses())
630           Uses.insert(&Us);
631     }
632   }
633 }
634 
635 /// Use the must-be-executed-context around \p I to add information into \p S.
636 /// The AAType class is required to have `followUseInMBEC` method with the
637 /// following signature and behaviour:
638 ///
639 /// bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I)
640 /// U - Underlying use.
641 /// I - The user of the \p U.
642 /// Returns true if the value should be tracked transitively.
643 ///
644 template <class AAType, typename StateType = typename AAType::StateType>
645 static void followUsesInMBEC(AAType &AA, Attributor &A, StateType &S,
646                              Instruction &CtxI) {
647   MustBeExecutedContextExplorer *Explorer =
648       A.getInfoCache().getMustBeExecutedContextExplorer();
649   if (!Explorer)
650     return;
651 
652   // Container for (transitive) uses of the associated value.
653   SetVector<const Use *> Uses;
654   for (const Use &U : AA.getIRPosition().getAssociatedValue().uses())
655     Uses.insert(&U);
656 
657   followUsesInContext<AAType>(AA, A, *Explorer, &CtxI, Uses, S);
658 
659   if (S.isAtFixpoint())
660     return;
661 
662   SmallVector<const BranchInst *, 4> BrInsts;
663   auto Pred = [&](const Instruction *I) {
664     if (const BranchInst *Br = dyn_cast<BranchInst>(I))
665       if (Br->isConditional())
666         BrInsts.push_back(Br);
667     return true;
668   };
669 
670   // Here, accumulate conditional branch instructions in the context. We
671   // explore the child paths and collect the known states. The disjunction of
672   // those states can be merged to its own state. Let ParentState_i be a state
673   // to indicate the known information for an i-th branch instruction in the
674   // context. ChildStates are created for its successors respectively.
675   //
676   // ParentS_1 = ChildS_{1, 1} /\ ChildS_{1, 2} /\ ... /\ ChildS_{1, n_1}
677   // ParentS_2 = ChildS_{2, 1} /\ ChildS_{2, 2} /\ ... /\ ChildS_{2, n_2}
678   //      ...
679   // ParentS_m = ChildS_{m, 1} /\ ChildS_{m, 2} /\ ... /\ ChildS_{m, n_m}
680   //
681   // Known State |= ParentS_1 \/ ParentS_2 \/... \/ ParentS_m
682   //
683   // FIXME: Currently, recursive branches are not handled. For example, we
684   // can't deduce that ptr must be dereferenced in below function.
685   //
686   // void f(int a, int c, int *ptr) {
687   //    if(a)
688   //      if (b) {
689   //        *ptr = 0;
690   //      } else {
691   //        *ptr = 1;
692   //      }
693   //    else {
694   //      if (b) {
695   //        *ptr = 0;
696   //      } else {
697   //        *ptr = 1;
698   //      }
699   //    }
700   // }
701 
702   Explorer->checkForAllContext(&CtxI, Pred);
703   for (const BranchInst *Br : BrInsts) {
704     StateType ParentState;
705 
706     // The known state of the parent state is a conjunction of children's
707     // known states so it is initialized with a best state.
708     ParentState.indicateOptimisticFixpoint();
709 
710     for (const BasicBlock *BB : Br->successors()) {
711       StateType ChildState;
712 
713       size_t BeforeSize = Uses.size();
714       followUsesInContext(AA, A, *Explorer, &BB->front(), Uses, ChildState);
715 
716       // Erase uses which only appear in the child.
717       for (auto It = Uses.begin() + BeforeSize; It != Uses.end();)
718         It = Uses.erase(It);
719 
720       ParentState &= ChildState;
721     }
722 
723     // Use only known state.
724     S += ParentState;
725   }
726 }
727 } // namespace
728 
729 /// ------------------------ PointerInfo ---------------------------------------
730 
731 namespace llvm {
732 namespace AA {
733 namespace PointerInfo {
734 
735 struct State;
736 
737 } // namespace PointerInfo
738 } // namespace AA
739 
740 /// Helper for AA::PointerInfo::Access DenseMap/Set usage.
741 template <>
742 struct DenseMapInfo<AAPointerInfo::Access> : DenseMapInfo<Instruction *> {
743   using Access = AAPointerInfo::Access;
744   static inline Access getEmptyKey();
745   static inline Access getTombstoneKey();
746   static unsigned getHashValue(const Access &A);
747   static bool isEqual(const Access &LHS, const Access &RHS);
748 };
749 
750 /// Helper that allows RangeTy as a key in a DenseMap.
751 template <> struct DenseMapInfo<AA::RangeTy> {
752   static inline AA::RangeTy getEmptyKey() {
753     auto EmptyKey = DenseMapInfo<int64_t>::getEmptyKey();
754     return AA::RangeTy{EmptyKey, EmptyKey};
755   }
756 
757   static inline AA::RangeTy getTombstoneKey() {
758     auto TombstoneKey = DenseMapInfo<int64_t>::getTombstoneKey();
759     return AA::RangeTy{TombstoneKey, TombstoneKey};
760   }
761 
762   static unsigned getHashValue(const AA::RangeTy &Range) {
763     return detail::combineHashValue(
764         DenseMapInfo<int64_t>::getHashValue(Range.Offset),
765         DenseMapInfo<int64_t>::getHashValue(Range.Size));
766   }
767 
768   static bool isEqual(const AA::RangeTy &A, const AA::RangeTy B) {
769     return A == B;
770   }
771 };
772 
773 /// Helper for AA::PointerInfo::Access DenseMap/Set usage ignoring everythign
774 /// but the instruction
775 struct AccessAsInstructionInfo : DenseMapInfo<Instruction *> {
776   using Base = DenseMapInfo<Instruction *>;
777   using Access = AAPointerInfo::Access;
778   static inline Access getEmptyKey();
779   static inline Access getTombstoneKey();
780   static unsigned getHashValue(const Access &A);
781   static bool isEqual(const Access &LHS, const Access &RHS);
782 };
783 
784 } // namespace llvm
785 
786 /// A type to track pointer/struct usage and accesses for AAPointerInfo.
787 struct AA::PointerInfo::State : public AbstractState {
788   /// Return the best possible representable state.
789   static State getBestState(const State &SIS) { return State(); }
790 
791   /// Return the worst possible representable state.
792   static State getWorstState(const State &SIS) {
793     State R;
794     R.indicatePessimisticFixpoint();
795     return R;
796   }
797 
798   State() = default;
799   State(State &&SIS) = default;
800 
801   const State &getAssumed() const { return *this; }
802 
803   /// See AbstractState::isValidState().
804   bool isValidState() const override { return BS.isValidState(); }
805 
806   /// See AbstractState::isAtFixpoint().
807   bool isAtFixpoint() const override { return BS.isAtFixpoint(); }
808 
809   /// See AbstractState::indicateOptimisticFixpoint().
810   ChangeStatus indicateOptimisticFixpoint() override {
811     BS.indicateOptimisticFixpoint();
812     return ChangeStatus::UNCHANGED;
813   }
814 
815   /// See AbstractState::indicatePessimisticFixpoint().
816   ChangeStatus indicatePessimisticFixpoint() override {
817     BS.indicatePessimisticFixpoint();
818     return ChangeStatus::CHANGED;
819   }
820 
821   State &operator=(const State &R) {
822     if (this == &R)
823       return *this;
824     BS = R.BS;
825     AccessList = R.AccessList;
826     OffsetBins = R.OffsetBins;
827     RemoteIMap = R.RemoteIMap;
828     return *this;
829   }
830 
831   State &operator=(State &&R) {
832     if (this == &R)
833       return *this;
834     std::swap(BS, R.BS);
835     std::swap(AccessList, R.AccessList);
836     std::swap(OffsetBins, R.OffsetBins);
837     std::swap(RemoteIMap, R.RemoteIMap);
838     return *this;
839   }
840 
841   /// Add a new Access to the state at offset \p Offset and with size \p Size.
842   /// The access is associated with \p I, writes \p Content (if anything), and
843   /// is of kind \p Kind. If an Access already exists for the same \p I and same
844   /// \p RemoteI, the two are combined, potentially losing information about
845   /// offset and size. The resulting access must now be moved from its original
846   /// OffsetBin to the bin for its new offset.
847   ///
848   /// \Returns CHANGED, if the state changed, UNCHANGED otherwise.
849   ChangeStatus addAccess(Attributor &A, const AAPointerInfo::RangeList &Ranges,
850                          Instruction &I, std::optional<Value *> Content,
851                          AAPointerInfo::AccessKind Kind, Type *Ty,
852                          Instruction *RemoteI = nullptr);
853 
854   AAPointerInfo::const_bin_iterator begin() const { return OffsetBins.begin(); }
855   AAPointerInfo::const_bin_iterator end() const { return OffsetBins.end(); }
856   int64_t numOffsetBins() const { return OffsetBins.size(); }
857 
858   const AAPointerInfo::Access &getAccess(unsigned Index) const {
859     return AccessList[Index];
860   }
861 
862 protected:
863   // Every memory instruction results in an Access object. We maintain a list of
864   // all Access objects that we own, along with the following maps:
865   //
866   // - OffsetBins: RangeTy -> { Access }
867   // - RemoteIMap: RemoteI x LocalI -> Access
868   //
869   // A RemoteI is any instruction that accesses memory. RemoteI is different
870   // from LocalI if and only if LocalI is a call; then RemoteI is some
871   // instruction in the callgraph starting from LocalI. Multiple paths in the
872   // callgraph from LocalI to RemoteI may produce multiple accesses, but these
873   // are all combined into a single Access object. This may result in loss of
874   // information in RangeTy in the Access object.
875   SmallVector<AAPointerInfo::Access> AccessList;
876   AAPointerInfo::OffsetBinsTy OffsetBins;
877   DenseMap<const Instruction *, SmallVector<unsigned>> RemoteIMap;
878 
879   /// See AAPointerInfo::forallInterferingAccesses.
880   bool forallInterferingAccesses(
881       AA::RangeTy Range,
882       function_ref<bool(const AAPointerInfo::Access &, bool)> CB) const {
883     if (!isValidState())
884       return false;
885 
886     for (const auto &It : OffsetBins) {
887       AA::RangeTy ItRange = It.getFirst();
888       if (!Range.mayOverlap(ItRange))
889         continue;
890       bool IsExact = Range == ItRange && !Range.offsetOrSizeAreUnknown();
891       for (auto Index : It.getSecond()) {
892         auto &Access = AccessList[Index];
893         if (!CB(Access, IsExact))
894           return false;
895       }
896     }
897     return true;
898   }
899 
900   /// See AAPointerInfo::forallInterferingAccesses.
901   bool forallInterferingAccesses(
902       Instruction &I,
903       function_ref<bool(const AAPointerInfo::Access &, bool)> CB,
904       AA::RangeTy &Range) const {
905     if (!isValidState())
906       return false;
907 
908     auto LocalList = RemoteIMap.find(&I);
909     if (LocalList == RemoteIMap.end()) {
910       return true;
911     }
912 
913     for (unsigned Index : LocalList->getSecond()) {
914       for (auto &R : AccessList[Index]) {
915         Range &= R;
916         if (Range.offsetAndSizeAreUnknown())
917           break;
918       }
919     }
920     return forallInterferingAccesses(Range, CB);
921   }
922 
923 private:
924   /// State to track fixpoint and validity.
925   BooleanState BS;
926 };
927 
928 ChangeStatus AA::PointerInfo::State::addAccess(
929     Attributor &A, const AAPointerInfo::RangeList &Ranges, Instruction &I,
930     std::optional<Value *> Content, AAPointerInfo::AccessKind Kind, Type *Ty,
931     Instruction *RemoteI) {
932   RemoteI = RemoteI ? RemoteI : &I;
933 
934   // Check if we have an access for this instruction, if not, simply add it.
935   auto &LocalList = RemoteIMap[RemoteI];
936   bool AccExists = false;
937   unsigned AccIndex = AccessList.size();
938   for (auto Index : LocalList) {
939     auto &A = AccessList[Index];
940     if (A.getLocalInst() == &I) {
941       AccExists = true;
942       AccIndex = Index;
943       break;
944     }
945   }
946 
947   auto AddToBins = [&](const AAPointerInfo::RangeList &ToAdd) {
948     LLVM_DEBUG(if (ToAdd.size()) dbgs()
949                    << "[AAPointerInfo] Inserting access in new offset bins\n";);
950 
951     for (auto Key : ToAdd) {
952       LLVM_DEBUG(dbgs() << "    key " << Key << "\n");
953       OffsetBins[Key].insert(AccIndex);
954     }
955   };
956 
957   if (!AccExists) {
958     AccessList.emplace_back(&I, RemoteI, Ranges, Content, Kind, Ty);
959     assert((AccessList.size() == AccIndex + 1) &&
960            "New Access should have been at AccIndex");
961     LocalList.push_back(AccIndex);
962     AddToBins(AccessList[AccIndex].getRanges());
963     return ChangeStatus::CHANGED;
964   }
965 
966   // Combine the new Access with the existing Access, and then update the
967   // mapping in the offset bins.
968   AAPointerInfo::Access Acc(&I, RemoteI, Ranges, Content, Kind, Ty);
969   auto &Current = AccessList[AccIndex];
970   auto Before = Current;
971   Current &= Acc;
972   if (Current == Before)
973     return ChangeStatus::UNCHANGED;
974 
975   auto &ExistingRanges = Before.getRanges();
976   auto &NewRanges = Current.getRanges();
977 
978   // Ranges that are in the old access but not the new access need to be removed
979   // from the offset bins.
980   AAPointerInfo::RangeList ToRemove;
981   AAPointerInfo::RangeList::set_difference(ExistingRanges, NewRanges, ToRemove);
982   LLVM_DEBUG(if (ToRemove.size()) dbgs()
983                  << "[AAPointerInfo] Removing access from old offset bins\n";);
984 
985   for (auto Key : ToRemove) {
986     LLVM_DEBUG(dbgs() << "    key " << Key << "\n");
987     assert(OffsetBins.count(Key) && "Existing Access must be in some bin.");
988     auto &Bin = OffsetBins[Key];
989     assert(Bin.count(AccIndex) &&
990            "Expected bin to actually contain the Access.");
991     Bin.erase(AccIndex);
992   }
993 
994   // Ranges that are in the new access but not the old access need to be added
995   // to the offset bins.
996   AAPointerInfo::RangeList ToAdd;
997   AAPointerInfo::RangeList::set_difference(NewRanges, ExistingRanges, ToAdd);
998   AddToBins(ToAdd);
999   return ChangeStatus::CHANGED;
1000 }
1001 
1002 namespace {
1003 
1004 /// A helper containing a list of offsets computed for a Use. Ideally this
1005 /// list should be strictly ascending, but we ensure that only when we
1006 /// actually translate the list of offsets to a RangeList.
1007 struct OffsetInfo {
1008   using VecTy = SmallVector<int64_t>;
1009   using const_iterator = VecTy::const_iterator;
1010   VecTy Offsets;
1011 
1012   const_iterator begin() const { return Offsets.begin(); }
1013   const_iterator end() const { return Offsets.end(); }
1014 
1015   bool operator==(const OffsetInfo &RHS) const {
1016     return Offsets == RHS.Offsets;
1017   }
1018 
1019   bool operator!=(const OffsetInfo &RHS) const { return !(*this == RHS); }
1020 
1021   void insert(int64_t Offset) { Offsets.push_back(Offset); }
1022   bool isUnassigned() const { return Offsets.size() == 0; }
1023 
1024   bool isUnknown() const {
1025     if (isUnassigned())
1026       return false;
1027     if (Offsets.size() == 1)
1028       return Offsets.front() == AA::RangeTy::Unknown;
1029     return false;
1030   }
1031 
1032   void setUnknown() {
1033     Offsets.clear();
1034     Offsets.push_back(AA::RangeTy::Unknown);
1035   }
1036 
1037   void addToAll(int64_t Inc) {
1038     for (auto &Offset : Offsets) {
1039       Offset += Inc;
1040     }
1041   }
1042 
1043   /// Copy offsets from \p R into the current list.
1044   ///
1045   /// Ideally all lists should be strictly ascending, but we defer that to the
1046   /// actual use of the list. So we just blindly append here.
1047   void merge(const OffsetInfo &R) { Offsets.append(R.Offsets); }
1048 };
1049 
1050 #ifndef NDEBUG
1051 static raw_ostream &operator<<(raw_ostream &OS, const OffsetInfo &OI) {
1052   ListSeparator LS;
1053   OS << "[";
1054   for (auto Offset : OI) {
1055     OS << LS << Offset;
1056   }
1057   OS << "]";
1058   return OS;
1059 }
1060 #endif // NDEBUG
1061 
1062 struct AAPointerInfoImpl
1063     : public StateWrapper<AA::PointerInfo::State, AAPointerInfo> {
1064   using BaseTy = StateWrapper<AA::PointerInfo::State, AAPointerInfo>;
1065   AAPointerInfoImpl(const IRPosition &IRP, Attributor &A) : BaseTy(IRP) {}
1066 
1067   /// See AbstractAttribute::getAsStr().
1068   const std::string getAsStr(Attributor *A) const override {
1069     return std::string("PointerInfo ") +
1070            (isValidState() ? (std::string("#") +
1071                               std::to_string(OffsetBins.size()) + " bins")
1072                            : "<invalid>");
1073   }
1074 
1075   /// See AbstractAttribute::manifest(...).
1076   ChangeStatus manifest(Attributor &A) override {
1077     return AAPointerInfo::manifest(A);
1078   }
1079 
1080   virtual const_bin_iterator begin() const override { return State::begin(); }
1081   virtual const_bin_iterator end() const override { return State::end(); }
1082   virtual int64_t numOffsetBins() const override {
1083     return State::numOffsetBins();
1084   }
1085 
1086   bool forallInterferingAccesses(
1087       AA::RangeTy Range,
1088       function_ref<bool(const AAPointerInfo::Access &, bool)> CB)
1089       const override {
1090     return State::forallInterferingAccesses(Range, CB);
1091   }
1092 
1093   bool forallInterferingAccesses(
1094       Attributor &A, const AbstractAttribute &QueryingAA, Instruction &I,
1095       bool FindInterferingWrites, bool FindInterferingReads,
1096       function_ref<bool(const Access &, bool)> UserCB, bool &HasBeenWrittenTo,
1097       AA::RangeTy &Range,
1098       function_ref<bool(const Access &)> SkipCB) const override {
1099     HasBeenWrittenTo = false;
1100 
1101     SmallPtrSet<const Access *, 8> DominatingWrites;
1102     SmallVector<std::pair<const Access *, bool>, 8> InterferingAccesses;
1103 
1104     Function &Scope = *I.getFunction();
1105     bool IsKnownNoSync;
1106     bool IsAssumedNoSync = AA::hasAssumedIRAttr<Attribute::NoSync>(
1107         A, &QueryingAA, IRPosition::function(Scope), DepClassTy::OPTIONAL,
1108         IsKnownNoSync);
1109     const auto *ExecDomainAA = A.lookupAAFor<AAExecutionDomain>(
1110         IRPosition::function(Scope), &QueryingAA, DepClassTy::NONE);
1111     bool AllInSameNoSyncFn = IsAssumedNoSync;
1112     bool InstIsExecutedByInitialThreadOnly =
1113         ExecDomainAA && ExecDomainAA->isExecutedByInitialThreadOnly(I);
1114 
1115     // If the function is not ending in aligned barriers, we need the stores to
1116     // be in aligned barriers. The load being in one is not sufficient since the
1117     // store might be executed by a thread that disappears after, causing the
1118     // aligned barrier guarding the load to unblock and the load to read a value
1119     // that has no CFG path to the load.
1120     bool InstIsExecutedInAlignedRegion =
1121         FindInterferingReads && ExecDomainAA &&
1122         ExecDomainAA->isExecutedInAlignedRegion(A, I);
1123 
1124     if (InstIsExecutedInAlignedRegion || InstIsExecutedByInitialThreadOnly)
1125       A.recordDependence(*ExecDomainAA, QueryingAA, DepClassTy::OPTIONAL);
1126 
1127     InformationCache &InfoCache = A.getInfoCache();
1128     bool IsThreadLocalObj =
1129         AA::isAssumedThreadLocalObject(A, getAssociatedValue(), *this);
1130 
1131     // Helper to determine if we need to consider threading, which we cannot
1132     // right now. However, if the function is (assumed) nosync or the thread
1133     // executing all instructions is the main thread only we can ignore
1134     // threading. Also, thread-local objects do not require threading reasoning.
1135     // Finally, we can ignore threading if either access is executed in an
1136     // aligned region.
1137     auto CanIgnoreThreadingForInst = [&](const Instruction &I) -> bool {
1138       if (IsThreadLocalObj || AllInSameNoSyncFn)
1139         return true;
1140       const auto *FnExecDomainAA =
1141           I.getFunction() == &Scope
1142               ? ExecDomainAA
1143               : A.lookupAAFor<AAExecutionDomain>(
1144                     IRPosition::function(*I.getFunction()), &QueryingAA,
1145                     DepClassTy::NONE);
1146       if (!FnExecDomainAA)
1147         return false;
1148       if (InstIsExecutedInAlignedRegion ||
1149           (FindInterferingWrites &&
1150            FnExecDomainAA->isExecutedInAlignedRegion(A, I))) {
1151         A.recordDependence(*FnExecDomainAA, QueryingAA, DepClassTy::OPTIONAL);
1152         return true;
1153       }
1154       if (InstIsExecutedByInitialThreadOnly &&
1155           FnExecDomainAA->isExecutedByInitialThreadOnly(I)) {
1156         A.recordDependence(*FnExecDomainAA, QueryingAA, DepClassTy::OPTIONAL);
1157         return true;
1158       }
1159       return false;
1160     };
1161 
1162     // Helper to determine if the access is executed by the same thread as the
1163     // given instruction, for now it is sufficient to avoid any potential
1164     // threading effects as we cannot deal with them anyway.
1165     auto CanIgnoreThreading = [&](const Access &Acc) -> bool {
1166       return CanIgnoreThreadingForInst(*Acc.getRemoteInst()) ||
1167              (Acc.getRemoteInst() != Acc.getLocalInst() &&
1168               CanIgnoreThreadingForInst(*Acc.getLocalInst()));
1169     };
1170 
1171     // TODO: Use inter-procedural reachability and dominance.
1172     bool IsKnownNoRecurse;
1173     AA::hasAssumedIRAttr<Attribute::NoRecurse>(
1174         A, this, IRPosition::function(Scope), DepClassTy::OPTIONAL,
1175         IsKnownNoRecurse);
1176 
1177     // TODO: Use reaching kernels from AAKernelInfo (or move it to
1178     // AAExecutionDomain) such that we allow scopes other than kernels as long
1179     // as the reaching kernels are disjoint.
1180     bool InstInKernel = Scope.hasFnAttribute("kernel");
1181     bool ObjHasKernelLifetime = false;
1182     const bool UseDominanceReasoning =
1183         FindInterferingWrites && IsKnownNoRecurse;
1184     const DominatorTree *DT =
1185         InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(Scope);
1186 
1187     // Helper to check if a value has "kernel lifetime", that is it will not
1188     // outlive a GPU kernel. This is true for shared, constant, and local
1189     // globals on AMD and NVIDIA GPUs.
1190     auto HasKernelLifetime = [&](Value *V, Module &M) {
1191       if (!AA::isGPU(M))
1192         return false;
1193       switch (AA::GPUAddressSpace(V->getType()->getPointerAddressSpace())) {
1194       case AA::GPUAddressSpace::Shared:
1195       case AA::GPUAddressSpace::Constant:
1196       case AA::GPUAddressSpace::Local:
1197         return true;
1198       default:
1199         return false;
1200       };
1201     };
1202 
1203     // The IsLiveInCalleeCB will be used by the AA::isPotentiallyReachable query
1204     // to determine if we should look at reachability from the callee. For
1205     // certain pointers we know the lifetime and we do not have to step into the
1206     // callee to determine reachability as the pointer would be dead in the
1207     // callee. See the conditional initialization below.
1208     std::function<bool(const Function &)> IsLiveInCalleeCB;
1209 
1210     if (auto *AI = dyn_cast<AllocaInst>(&getAssociatedValue())) {
1211       // If the alloca containing function is not recursive the alloca
1212       // must be dead in the callee.
1213       const Function *AIFn = AI->getFunction();
1214       ObjHasKernelLifetime = AIFn->hasFnAttribute("kernel");
1215       bool IsKnownNoRecurse;
1216       if (AA::hasAssumedIRAttr<Attribute::NoRecurse>(
1217               A, this, IRPosition::function(*AIFn), DepClassTy::OPTIONAL,
1218               IsKnownNoRecurse)) {
1219         IsLiveInCalleeCB = [AIFn](const Function &Fn) { return AIFn != &Fn; };
1220       }
1221     } else if (auto *GV = dyn_cast<GlobalValue>(&getAssociatedValue())) {
1222       // If the global has kernel lifetime we can stop if we reach a kernel
1223       // as it is "dead" in the (unknown) callees.
1224       ObjHasKernelLifetime = HasKernelLifetime(GV, *GV->getParent());
1225       if (ObjHasKernelLifetime)
1226         IsLiveInCalleeCB = [](const Function &Fn) {
1227           return !Fn.hasFnAttribute("kernel");
1228         };
1229     }
1230 
1231     // Set of accesses/instructions that will overwrite the result and are
1232     // therefore blockers in the reachability traversal.
1233     AA::InstExclusionSetTy ExclusionSet;
1234 
1235     auto AccessCB = [&](const Access &Acc, bool Exact) {
1236       Function *AccScope = Acc.getRemoteInst()->getFunction();
1237       bool AccInSameScope = AccScope == &Scope;
1238 
1239       // If the object has kernel lifetime we can ignore accesses only reachable
1240       // by other kernels. For now we only skip accesses *in* other kernels.
1241       if (InstInKernel && ObjHasKernelLifetime && !AccInSameScope &&
1242           AccScope->hasFnAttribute("kernel"))
1243         return true;
1244 
1245       if (Exact && Acc.isMustAccess() && Acc.getRemoteInst() != &I) {
1246         if (Acc.isWrite() || (isa<LoadInst>(I) && Acc.isWriteOrAssumption()))
1247           ExclusionSet.insert(Acc.getRemoteInst());
1248       }
1249 
1250       if ((!FindInterferingWrites || !Acc.isWriteOrAssumption()) &&
1251           (!FindInterferingReads || !Acc.isRead()))
1252         return true;
1253 
1254       bool Dominates = FindInterferingWrites && DT && Exact &&
1255                        Acc.isMustAccess() && AccInSameScope &&
1256                        DT->dominates(Acc.getRemoteInst(), &I);
1257       if (Dominates)
1258         DominatingWrites.insert(&Acc);
1259 
1260       // Track if all interesting accesses are in the same `nosync` function as
1261       // the given instruction.
1262       AllInSameNoSyncFn &= Acc.getRemoteInst()->getFunction() == &Scope;
1263 
1264       InterferingAccesses.push_back({&Acc, Exact});
1265       return true;
1266     };
1267     if (!State::forallInterferingAccesses(I, AccessCB, Range))
1268       return false;
1269 
1270     HasBeenWrittenTo = !DominatingWrites.empty();
1271 
1272     // Dominating writes form a chain, find the least/lowest member.
1273     Instruction *LeastDominatingWriteInst = nullptr;
1274     for (const Access *Acc : DominatingWrites) {
1275       if (!LeastDominatingWriteInst) {
1276         LeastDominatingWriteInst = Acc->getRemoteInst();
1277       } else if (DT->dominates(LeastDominatingWriteInst,
1278                                Acc->getRemoteInst())) {
1279         LeastDominatingWriteInst = Acc->getRemoteInst();
1280       }
1281     }
1282 
1283     // Helper to determine if we can skip a specific write access.
1284     auto CanSkipAccess = [&](const Access &Acc, bool Exact) {
1285       if (SkipCB && SkipCB(Acc))
1286         return true;
1287       if (!CanIgnoreThreading(Acc))
1288         return false;
1289 
1290       // Check read (RAW) dependences and write (WAR) dependences as necessary.
1291       // If we successfully excluded all effects we are interested in, the
1292       // access can be skipped.
1293       bool ReadChecked = !FindInterferingReads;
1294       bool WriteChecked = !FindInterferingWrites;
1295 
1296       // If the instruction cannot reach the access, the former does not
1297       // interfere with what the access reads.
1298       if (!ReadChecked) {
1299         if (!AA::isPotentiallyReachable(A, I, *Acc.getRemoteInst(), QueryingAA,
1300                                         &ExclusionSet, IsLiveInCalleeCB))
1301           ReadChecked = true;
1302       }
1303       // If the instruction cannot be reach from the access, the latter does not
1304       // interfere with what the instruction reads.
1305       if (!WriteChecked) {
1306         if (!AA::isPotentiallyReachable(A, *Acc.getRemoteInst(), I, QueryingAA,
1307                                         &ExclusionSet, IsLiveInCalleeCB))
1308           WriteChecked = true;
1309       }
1310 
1311       // If we still might be affected by the write of the access but there are
1312       // dominating writes in the function of the instruction
1313       // (HasBeenWrittenTo), we can try to reason that the access is overwritten
1314       // by them. This would have happend above if they are all in the same
1315       // function, so we only check the inter-procedural case. Effectively, we
1316       // want to show that there is no call after the dominting write that might
1317       // reach the access, and when it returns reach the instruction with the
1318       // updated value. To this end, we iterate all call sites, check if they
1319       // might reach the instruction without going through another access
1320       // (ExclusionSet) and at the same time might reach the access. However,
1321       // that is all part of AAInterFnReachability.
1322       if (!WriteChecked && HasBeenWrittenTo &&
1323           Acc.getRemoteInst()->getFunction() != &Scope) {
1324 
1325         const auto *FnReachabilityAA = A.getAAFor<AAInterFnReachability>(
1326             QueryingAA, IRPosition::function(Scope), DepClassTy::OPTIONAL);
1327 
1328         // Without going backwards in the call tree, can we reach the access
1329         // from the least dominating write. Do not allow to pass the instruction
1330         // itself either.
1331         bool Inserted = ExclusionSet.insert(&I).second;
1332 
1333         if (!FnReachabilityAA ||
1334             !FnReachabilityAA->instructionCanReach(
1335                 A, *LeastDominatingWriteInst,
1336                 *Acc.getRemoteInst()->getFunction(), &ExclusionSet))
1337           WriteChecked = true;
1338 
1339         if (Inserted)
1340           ExclusionSet.erase(&I);
1341       }
1342 
1343       if (ReadChecked && WriteChecked)
1344         return true;
1345 
1346       if (!DT || !UseDominanceReasoning)
1347         return false;
1348       if (!DominatingWrites.count(&Acc))
1349         return false;
1350       return LeastDominatingWriteInst != Acc.getRemoteInst();
1351     };
1352 
1353     // Run the user callback on all accesses we cannot skip and return if
1354     // that succeeded for all or not.
1355     for (auto &It : InterferingAccesses) {
1356       if ((!AllInSameNoSyncFn && !IsThreadLocalObj && !ExecDomainAA) ||
1357           !CanSkipAccess(*It.first, It.second)) {
1358         if (!UserCB(*It.first, It.second))
1359           return false;
1360       }
1361     }
1362     return true;
1363   }
1364 
1365   ChangeStatus translateAndAddStateFromCallee(Attributor &A,
1366                                               const AAPointerInfo &OtherAA,
1367                                               CallBase &CB) {
1368     using namespace AA::PointerInfo;
1369     if (!OtherAA.getState().isValidState() || !isValidState())
1370       return indicatePessimisticFixpoint();
1371 
1372     const auto &OtherAAImpl = static_cast<const AAPointerInfoImpl &>(OtherAA);
1373     bool IsByval = OtherAAImpl.getAssociatedArgument()->hasByValAttr();
1374 
1375     // Combine the accesses bin by bin.
1376     ChangeStatus Changed = ChangeStatus::UNCHANGED;
1377     const auto &State = OtherAAImpl.getState();
1378     for (const auto &It : State) {
1379       for (auto Index : It.getSecond()) {
1380         const auto &RAcc = State.getAccess(Index);
1381         if (IsByval && !RAcc.isRead())
1382           continue;
1383         bool UsedAssumedInformation = false;
1384         AccessKind AK = RAcc.getKind();
1385         auto Content = A.translateArgumentToCallSiteContent(
1386             RAcc.getContent(), CB, *this, UsedAssumedInformation);
1387         AK = AccessKind(AK & (IsByval ? AccessKind::AK_R : AccessKind::AK_RW));
1388         AK = AccessKind(AK | (RAcc.isMayAccess() ? AK_MAY : AK_MUST));
1389 
1390         Changed |= addAccess(A, RAcc.getRanges(), CB, Content, AK,
1391                              RAcc.getType(), RAcc.getRemoteInst());
1392       }
1393     }
1394     return Changed;
1395   }
1396 
1397   ChangeStatus translateAndAddState(Attributor &A, const AAPointerInfo &OtherAA,
1398                                     const OffsetInfo &Offsets, CallBase &CB) {
1399     using namespace AA::PointerInfo;
1400     if (!OtherAA.getState().isValidState() || !isValidState())
1401       return indicatePessimisticFixpoint();
1402 
1403     const auto &OtherAAImpl = static_cast<const AAPointerInfoImpl &>(OtherAA);
1404 
1405     // Combine the accesses bin by bin.
1406     ChangeStatus Changed = ChangeStatus::UNCHANGED;
1407     const auto &State = OtherAAImpl.getState();
1408     for (const auto &It : State) {
1409       for (auto Index : It.getSecond()) {
1410         const auto &RAcc = State.getAccess(Index);
1411         for (auto Offset : Offsets) {
1412           auto NewRanges = Offset == AA::RangeTy::Unknown
1413                                ? AA::RangeTy::getUnknown()
1414                                : RAcc.getRanges();
1415           if (!NewRanges.isUnknown()) {
1416             NewRanges.addToAllOffsets(Offset);
1417           }
1418           Changed |=
1419               addAccess(A, NewRanges, CB, RAcc.getContent(), RAcc.getKind(),
1420                         RAcc.getType(), RAcc.getRemoteInst());
1421         }
1422       }
1423     }
1424     return Changed;
1425   }
1426 
1427   /// Statistic tracking for all AAPointerInfo implementations.
1428   /// See AbstractAttribute::trackStatistics().
1429   void trackPointerInfoStatistics(const IRPosition &IRP) const {}
1430 
1431   /// Dump the state into \p O.
1432   void dumpState(raw_ostream &O) {
1433     for (auto &It : OffsetBins) {
1434       O << "[" << It.first.Offset << "-" << It.first.Offset + It.first.Size
1435         << "] : " << It.getSecond().size() << "\n";
1436       for (auto AccIndex : It.getSecond()) {
1437         auto &Acc = AccessList[AccIndex];
1438         O << "     - " << Acc.getKind() << " - " << *Acc.getLocalInst() << "\n";
1439         if (Acc.getLocalInst() != Acc.getRemoteInst())
1440           O << "     -->                         " << *Acc.getRemoteInst()
1441             << "\n";
1442         if (!Acc.isWrittenValueYetUndetermined()) {
1443           if (isa_and_nonnull<Function>(Acc.getWrittenValue()))
1444             O << "       - c: func " << Acc.getWrittenValue()->getName()
1445               << "\n";
1446           else if (Acc.getWrittenValue())
1447             O << "       - c: " << *Acc.getWrittenValue() << "\n";
1448           else
1449             O << "       - c: <unknown>\n";
1450         }
1451       }
1452     }
1453   }
1454 };
1455 
1456 struct AAPointerInfoFloating : public AAPointerInfoImpl {
1457   using AccessKind = AAPointerInfo::AccessKind;
1458   AAPointerInfoFloating(const IRPosition &IRP, Attributor &A)
1459       : AAPointerInfoImpl(IRP, A) {}
1460 
1461   /// Deal with an access and signal if it was handled successfully.
1462   bool handleAccess(Attributor &A, Instruction &I,
1463                     std::optional<Value *> Content, AccessKind Kind,
1464                     SmallVectorImpl<int64_t> &Offsets, ChangeStatus &Changed,
1465                     Type &Ty) {
1466     using namespace AA::PointerInfo;
1467     auto Size = AA::RangeTy::Unknown;
1468     const DataLayout &DL = A.getDataLayout();
1469     TypeSize AccessSize = DL.getTypeStoreSize(&Ty);
1470     if (!AccessSize.isScalable())
1471       Size = AccessSize.getFixedValue();
1472 
1473     // Make a strictly ascending list of offsets as required by addAccess()
1474     llvm::sort(Offsets);
1475     auto *Last = std::unique(Offsets.begin(), Offsets.end());
1476     Offsets.erase(Last, Offsets.end());
1477 
1478     VectorType *VT = dyn_cast<VectorType>(&Ty);
1479     if (!VT || VT->getElementCount().isScalable() ||
1480         !Content.value_or(nullptr) || !isa<Constant>(*Content) ||
1481         (*Content)->getType() != VT ||
1482         DL.getTypeStoreSize(VT->getElementType()).isScalable()) {
1483       Changed = Changed | addAccess(A, {Offsets, Size}, I, Content, Kind, &Ty);
1484     } else {
1485       // Handle vector stores with constant content element-wise.
1486       // TODO: We could look for the elements or create instructions
1487       //       representing them.
1488       // TODO: We need to push the Content into the range abstraction
1489       //       (AA::RangeTy) to allow different content values for different
1490       //       ranges. ranges. Hence, support vectors storing different values.
1491       Type *ElementType = VT->getElementType();
1492       int64_t ElementSize = DL.getTypeStoreSize(ElementType).getFixedValue();
1493       auto *ConstContent = cast<Constant>(*Content);
1494       Type *Int32Ty = Type::getInt32Ty(ElementType->getContext());
1495       SmallVector<int64_t> ElementOffsets(Offsets.begin(), Offsets.end());
1496 
1497       for (int i = 0, e = VT->getElementCount().getFixedValue(); i != e; ++i) {
1498         Value *ElementContent = ConstantExpr::getExtractElement(
1499             ConstContent, ConstantInt::get(Int32Ty, i));
1500 
1501         // Add the element access.
1502         Changed = Changed | addAccess(A, {ElementOffsets, ElementSize}, I,
1503                                       ElementContent, Kind, ElementType);
1504 
1505         // Advance the offsets for the next element.
1506         for (auto &ElementOffset : ElementOffsets)
1507           ElementOffset += ElementSize;
1508       }
1509     }
1510     return true;
1511   };
1512 
1513   /// See AbstractAttribute::updateImpl(...).
1514   ChangeStatus updateImpl(Attributor &A) override;
1515 
1516   /// If the indices to \p GEP can be traced to constants, incorporate all
1517   /// of these into \p UsrOI.
1518   ///
1519   /// \return true iff \p UsrOI is updated.
1520   bool collectConstantsForGEP(Attributor &A, const DataLayout &DL,
1521                               OffsetInfo &UsrOI, const OffsetInfo &PtrOI,
1522                               const GEPOperator *GEP);
1523 
1524   /// See AbstractAttribute::trackStatistics()
1525   void trackStatistics() const override {
1526     AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
1527   }
1528 };
1529 
1530 bool AAPointerInfoFloating::collectConstantsForGEP(Attributor &A,
1531                                                    const DataLayout &DL,
1532                                                    OffsetInfo &UsrOI,
1533                                                    const OffsetInfo &PtrOI,
1534                                                    const GEPOperator *GEP) {
1535   unsigned BitWidth = DL.getIndexTypeSizeInBits(GEP->getType());
1536   MapVector<Value *, APInt> VariableOffsets;
1537   APInt ConstantOffset(BitWidth, 0);
1538 
1539   assert(!UsrOI.isUnknown() && !PtrOI.isUnknown() &&
1540          "Don't look for constant values if the offset has already been "
1541          "determined to be unknown.");
1542 
1543   if (!GEP->collectOffset(DL, BitWidth, VariableOffsets, ConstantOffset)) {
1544     UsrOI.setUnknown();
1545     return true;
1546   }
1547 
1548   LLVM_DEBUG(dbgs() << "[AAPointerInfo] GEP offset is "
1549                     << (VariableOffsets.empty() ? "" : "not") << " constant "
1550                     << *GEP << "\n");
1551 
1552   auto Union = PtrOI;
1553   Union.addToAll(ConstantOffset.getSExtValue());
1554 
1555   // Each VI in VariableOffsets has a set of potential constant values. Every
1556   // combination of elements, picked one each from these sets, is separately
1557   // added to the original set of offsets, thus resulting in more offsets.
1558   for (const auto &VI : VariableOffsets) {
1559     auto *PotentialConstantsAA = A.getAAFor<AAPotentialConstantValues>(
1560         *this, IRPosition::value(*VI.first), DepClassTy::OPTIONAL);
1561     if (!PotentialConstantsAA || !PotentialConstantsAA->isValidState()) {
1562       UsrOI.setUnknown();
1563       return true;
1564     }
1565 
1566     // UndefValue is treated as a zero, which leaves Union as is.
1567     if (PotentialConstantsAA->undefIsContained())
1568       continue;
1569 
1570     // We need at least one constant in every set to compute an actual offset.
1571     // Otherwise, we end up pessimizing AAPointerInfo by respecting offsets that
1572     // don't actually exist. In other words, the absence of constant values
1573     // implies that the operation can be assumed dead for now.
1574     auto &AssumedSet = PotentialConstantsAA->getAssumedSet();
1575     if (AssumedSet.empty())
1576       return false;
1577 
1578     OffsetInfo Product;
1579     for (const auto &ConstOffset : AssumedSet) {
1580       auto CopyPerOffset = Union;
1581       CopyPerOffset.addToAll(ConstOffset.getSExtValue() *
1582                              VI.second.getZExtValue());
1583       Product.merge(CopyPerOffset);
1584     }
1585     Union = Product;
1586   }
1587 
1588   UsrOI = std::move(Union);
1589   return true;
1590 }
1591 
1592 ChangeStatus AAPointerInfoFloating::updateImpl(Attributor &A) {
1593   using namespace AA::PointerInfo;
1594   ChangeStatus Changed = ChangeStatus::UNCHANGED;
1595   const DataLayout &DL = A.getDataLayout();
1596   Value &AssociatedValue = getAssociatedValue();
1597 
1598   DenseMap<Value *, OffsetInfo> OffsetInfoMap;
1599   OffsetInfoMap[&AssociatedValue].insert(0);
1600 
1601   auto HandlePassthroughUser = [&](Value *Usr, Value *CurPtr, bool &Follow) {
1602     // One does not simply walk into a map and assign a reference to a possibly
1603     // new location. That can cause an invalidation before the assignment
1604     // happens, like so:
1605     //
1606     //   OffsetInfoMap[Usr] = OffsetInfoMap[CurPtr]; /* bad idea! */
1607     //
1608     // The RHS is a reference that may be invalidated by an insertion caused by
1609     // the LHS. So we ensure that the side-effect of the LHS happens first.
1610     auto &UsrOI = OffsetInfoMap[Usr];
1611     auto &PtrOI = OffsetInfoMap[CurPtr];
1612     assert(!PtrOI.isUnassigned() &&
1613            "Cannot pass through if the input Ptr was not visited!");
1614     UsrOI = PtrOI;
1615     Follow = true;
1616     return true;
1617   };
1618 
1619   const auto *F = getAnchorScope();
1620   const auto *CI =
1621       F ? A.getInfoCache().getAnalysisResultForFunction<CycleAnalysis>(*F)
1622         : nullptr;
1623   const auto *TLI =
1624       F ? A.getInfoCache().getTargetLibraryInfoForFunction(*F) : nullptr;
1625 
1626   auto UsePred = [&](const Use &U, bool &Follow) -> bool {
1627     Value *CurPtr = U.get();
1628     User *Usr = U.getUser();
1629     LLVM_DEBUG(dbgs() << "[AAPointerInfo] Analyze " << *CurPtr << " in " << *Usr
1630                       << "\n");
1631     assert(OffsetInfoMap.count(CurPtr) &&
1632            "The current pointer offset should have been seeded!");
1633 
1634     if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Usr)) {
1635       if (CE->isCast())
1636         return HandlePassthroughUser(Usr, CurPtr, Follow);
1637       if (CE->isCompare())
1638         return true;
1639       if (!isa<GEPOperator>(CE)) {
1640         LLVM_DEBUG(dbgs() << "[AAPointerInfo] Unhandled constant user " << *CE
1641                           << "\n");
1642         return false;
1643       }
1644     }
1645     if (auto *GEP = dyn_cast<GEPOperator>(Usr)) {
1646       // Note the order here, the Usr access might change the map, CurPtr is
1647       // already in it though.
1648       auto &UsrOI = OffsetInfoMap[Usr];
1649       auto &PtrOI = OffsetInfoMap[CurPtr];
1650 
1651       if (UsrOI.isUnknown())
1652         return true;
1653 
1654       if (PtrOI.isUnknown()) {
1655         Follow = true;
1656         UsrOI.setUnknown();
1657         return true;
1658       }
1659 
1660       Follow = collectConstantsForGEP(A, DL, UsrOI, PtrOI, GEP);
1661       return true;
1662     }
1663     if (isa<PtrToIntInst>(Usr))
1664       return false;
1665     if (isa<CastInst>(Usr) || isa<SelectInst>(Usr) || isa<ReturnInst>(Usr))
1666       return HandlePassthroughUser(Usr, CurPtr, Follow);
1667 
1668     // For PHIs we need to take care of the recurrence explicitly as the value
1669     // might change while we iterate through a loop. For now, we give up if
1670     // the PHI is not invariant.
1671     if (isa<PHINode>(Usr)) {
1672       // Note the order here, the Usr access might change the map, CurPtr is
1673       // already in it though.
1674       bool IsFirstPHIUser = !OffsetInfoMap.count(Usr);
1675       auto &UsrOI = OffsetInfoMap[Usr];
1676       auto &PtrOI = OffsetInfoMap[CurPtr];
1677 
1678       // Check if the PHI operand has already an unknown offset as we can't
1679       // improve on that anymore.
1680       if (PtrOI.isUnknown()) {
1681         LLVM_DEBUG(dbgs() << "[AAPointerInfo] PHI operand offset unknown "
1682                           << *CurPtr << " in " << *Usr << "\n");
1683         Follow = !UsrOI.isUnknown();
1684         UsrOI.setUnknown();
1685         return true;
1686       }
1687 
1688       // Check if the PHI is invariant (so far).
1689       if (UsrOI == PtrOI) {
1690         assert(!PtrOI.isUnassigned() &&
1691                "Cannot assign if the current Ptr was not visited!");
1692         LLVM_DEBUG(dbgs() << "[AAPointerInfo] PHI is invariant (so far)");
1693         return true;
1694       }
1695 
1696       // Check if the PHI operand can be traced back to AssociatedValue.
1697       APInt Offset(
1698           DL.getIndexSizeInBits(CurPtr->getType()->getPointerAddressSpace()),
1699           0);
1700       Value *CurPtrBase = CurPtr->stripAndAccumulateConstantOffsets(
1701           DL, Offset, /* AllowNonInbounds */ true);
1702       auto It = OffsetInfoMap.find(CurPtrBase);
1703       if (It == OffsetInfoMap.end()) {
1704         LLVM_DEBUG(dbgs() << "[AAPointerInfo] PHI operand is too complex "
1705                           << *CurPtr << " in " << *Usr << "\n");
1706         UsrOI.setUnknown();
1707         Follow = true;
1708         return true;
1709       }
1710 
1711       // Check if the PHI operand is not dependent on the PHI itself. Every
1712       // recurrence is a cyclic net of PHIs in the data flow, and has an
1713       // equivalent Cycle in the control flow. One of those PHIs must be in the
1714       // header of that control flow Cycle. This is independent of the choice of
1715       // Cycles reported by CycleInfo. It is sufficient to check the PHIs in
1716       // every Cycle header; if such a node is marked unknown, this will
1717       // eventually propagate through the whole net of PHIs in the recurrence.
1718       if (mayBeInCycle(CI, cast<Instruction>(Usr), /* HeaderOnly */ true)) {
1719         auto BaseOI = It->getSecond();
1720         BaseOI.addToAll(Offset.getZExtValue());
1721         if (IsFirstPHIUser || BaseOI == UsrOI) {
1722           LLVM_DEBUG(dbgs() << "[AAPointerInfo] PHI is invariant " << *CurPtr
1723                             << " in " << *Usr << "\n");
1724           return HandlePassthroughUser(Usr, CurPtr, Follow);
1725         }
1726 
1727         LLVM_DEBUG(
1728             dbgs() << "[AAPointerInfo] PHI operand pointer offset mismatch "
1729                    << *CurPtr << " in " << *Usr << "\n");
1730         UsrOI.setUnknown();
1731         Follow = true;
1732         return true;
1733       }
1734 
1735       UsrOI.merge(PtrOI);
1736       Follow = true;
1737       return true;
1738     }
1739 
1740     if (auto *LoadI = dyn_cast<LoadInst>(Usr)) {
1741       // If the access is to a pointer that may or may not be the associated
1742       // value, e.g. due to a PHI, we cannot assume it will be read.
1743       AccessKind AK = AccessKind::AK_R;
1744       if (getUnderlyingObject(CurPtr) == &AssociatedValue)
1745         AK = AccessKind(AK | AccessKind::AK_MUST);
1746       else
1747         AK = AccessKind(AK | AccessKind::AK_MAY);
1748       if (!handleAccess(A, *LoadI, /* Content */ nullptr, AK,
1749                         OffsetInfoMap[CurPtr].Offsets, Changed,
1750                         *LoadI->getType()))
1751         return false;
1752 
1753       auto IsAssumption = [](Instruction &I) {
1754         if (auto *II = dyn_cast<IntrinsicInst>(&I))
1755           return II->isAssumeLikeIntrinsic();
1756         return false;
1757       };
1758 
1759       auto IsImpactedInRange = [&](Instruction *FromI, Instruction *ToI) {
1760         // Check if the assumption and the load are executed together without
1761         // memory modification.
1762         do {
1763           if (FromI->mayWriteToMemory() && !IsAssumption(*FromI))
1764             return true;
1765           FromI = FromI->getNextNonDebugInstruction();
1766         } while (FromI && FromI != ToI);
1767         return false;
1768       };
1769 
1770       BasicBlock *BB = LoadI->getParent();
1771       auto IsValidAssume = [&](IntrinsicInst &IntrI) {
1772         if (IntrI.getIntrinsicID() != Intrinsic::assume)
1773           return false;
1774         BasicBlock *IntrBB = IntrI.getParent();
1775         if (IntrI.getParent() == BB) {
1776           if (IsImpactedInRange(LoadI->getNextNonDebugInstruction(), &IntrI))
1777             return false;
1778         } else {
1779           auto PredIt = pred_begin(IntrBB);
1780           if (PredIt == pred_end(IntrBB))
1781             return false;
1782           if ((*PredIt) != BB)
1783             return false;
1784           if (++PredIt != pred_end(IntrBB))
1785             return false;
1786           for (auto *SuccBB : successors(BB)) {
1787             if (SuccBB == IntrBB)
1788               continue;
1789             if (isa<UnreachableInst>(SuccBB->getTerminator()))
1790               continue;
1791             return false;
1792           }
1793           if (IsImpactedInRange(LoadI->getNextNonDebugInstruction(),
1794                                 BB->getTerminator()))
1795             return false;
1796           if (IsImpactedInRange(&IntrBB->front(), &IntrI))
1797             return false;
1798         }
1799         return true;
1800       };
1801 
1802       std::pair<Value *, IntrinsicInst *> Assumption;
1803       for (const Use &LoadU : LoadI->uses()) {
1804         if (auto *CmpI = dyn_cast<CmpInst>(LoadU.getUser())) {
1805           if (!CmpI->isEquality() || !CmpI->isTrueWhenEqual())
1806             continue;
1807           for (const Use &CmpU : CmpI->uses()) {
1808             if (auto *IntrI = dyn_cast<IntrinsicInst>(CmpU.getUser())) {
1809               if (!IsValidAssume(*IntrI))
1810                 continue;
1811               int Idx = CmpI->getOperandUse(0) == LoadU;
1812               Assumption = {CmpI->getOperand(Idx), IntrI};
1813               break;
1814             }
1815           }
1816         }
1817         if (Assumption.first)
1818           break;
1819       }
1820 
1821       // Check if we found an assumption associated with this load.
1822       if (!Assumption.first || !Assumption.second)
1823         return true;
1824 
1825       LLVM_DEBUG(dbgs() << "[AAPointerInfo] Assumption found "
1826                         << *Assumption.second << ": " << *LoadI
1827                         << " == " << *Assumption.first << "\n");
1828       bool UsedAssumedInformation = false;
1829       std::optional<Value *> Content = nullptr;
1830       if (Assumption.first)
1831         Content =
1832             A.getAssumedSimplified(*Assumption.first, *this,
1833                                    UsedAssumedInformation, AA::Interprocedural);
1834       return handleAccess(
1835           A, *Assumption.second, Content, AccessKind::AK_ASSUMPTION,
1836           OffsetInfoMap[CurPtr].Offsets, Changed, *LoadI->getType());
1837     }
1838 
1839     auto HandleStoreLike = [&](Instruction &I, Value *ValueOp, Type &ValueTy,
1840                                ArrayRef<Value *> OtherOps, AccessKind AK) {
1841       for (auto *OtherOp : OtherOps) {
1842         if (OtherOp == CurPtr) {
1843           LLVM_DEBUG(
1844               dbgs()
1845               << "[AAPointerInfo] Escaping use in store like instruction " << I
1846               << "\n");
1847           return false;
1848         }
1849       }
1850 
1851       // If the access is to a pointer that may or may not be the associated
1852       // value, e.g. due to a PHI, we cannot assume it will be written.
1853       if (getUnderlyingObject(CurPtr) == &AssociatedValue)
1854         AK = AccessKind(AK | AccessKind::AK_MUST);
1855       else
1856         AK = AccessKind(AK | AccessKind::AK_MAY);
1857       bool UsedAssumedInformation = false;
1858       std::optional<Value *> Content = nullptr;
1859       if (ValueOp)
1860         Content = A.getAssumedSimplified(
1861             *ValueOp, *this, UsedAssumedInformation, AA::Interprocedural);
1862       return handleAccess(A, I, Content, AK, OffsetInfoMap[CurPtr].Offsets,
1863                           Changed, ValueTy);
1864     };
1865 
1866     if (auto *StoreI = dyn_cast<StoreInst>(Usr))
1867       return HandleStoreLike(*StoreI, StoreI->getValueOperand(),
1868                              *StoreI->getValueOperand()->getType(),
1869                              {StoreI->getValueOperand()}, AccessKind::AK_W);
1870     if (auto *RMWI = dyn_cast<AtomicRMWInst>(Usr))
1871       return HandleStoreLike(*RMWI, nullptr, *RMWI->getValOperand()->getType(),
1872                              {RMWI->getValOperand()}, AccessKind::AK_RW);
1873     if (auto *CXI = dyn_cast<AtomicCmpXchgInst>(Usr))
1874       return HandleStoreLike(
1875           *CXI, nullptr, *CXI->getNewValOperand()->getType(),
1876           {CXI->getCompareOperand(), CXI->getNewValOperand()},
1877           AccessKind::AK_RW);
1878 
1879     if (auto *CB = dyn_cast<CallBase>(Usr)) {
1880       if (CB->isLifetimeStartOrEnd())
1881         return true;
1882       if (getFreedOperand(CB, TLI) == U)
1883         return true;
1884       if (CB->isArgOperand(&U)) {
1885         unsigned ArgNo = CB->getArgOperandNo(&U);
1886         const auto *CSArgPI = A.getAAFor<AAPointerInfo>(
1887             *this, IRPosition::callsite_argument(*CB, ArgNo),
1888             DepClassTy::REQUIRED);
1889         if (!CSArgPI)
1890           return false;
1891         Changed =
1892             translateAndAddState(A, *CSArgPI, OffsetInfoMap[CurPtr], *CB) |
1893             Changed;
1894         return isValidState();
1895       }
1896       LLVM_DEBUG(dbgs() << "[AAPointerInfo] Call user not handled " << *CB
1897                         << "\n");
1898       // TODO: Allow some call uses
1899       return false;
1900     }
1901 
1902     LLVM_DEBUG(dbgs() << "[AAPointerInfo] User not handled " << *Usr << "\n");
1903     return false;
1904   };
1905   auto EquivalentUseCB = [&](const Use &OldU, const Use &NewU) {
1906     assert(OffsetInfoMap.count(OldU) && "Old use should be known already!");
1907     if (OffsetInfoMap.count(NewU)) {
1908       LLVM_DEBUG({
1909         if (!(OffsetInfoMap[NewU] == OffsetInfoMap[OldU])) {
1910           dbgs() << "[AAPointerInfo] Equivalent use callback failed: "
1911                  << OffsetInfoMap[NewU] << " vs " << OffsetInfoMap[OldU]
1912                  << "\n";
1913         }
1914       });
1915       return OffsetInfoMap[NewU] == OffsetInfoMap[OldU];
1916     }
1917     OffsetInfoMap[NewU] = OffsetInfoMap[OldU];
1918     return true;
1919   };
1920   if (!A.checkForAllUses(UsePred, *this, AssociatedValue,
1921                          /* CheckBBLivenessOnly */ true, DepClassTy::OPTIONAL,
1922                          /* IgnoreDroppableUses */ true, EquivalentUseCB)) {
1923     LLVM_DEBUG(dbgs() << "[AAPointerInfo] Check for all uses failed, abort!\n");
1924     return indicatePessimisticFixpoint();
1925   }
1926 
1927   LLVM_DEBUG({
1928     dbgs() << "Accesses by bin after update:\n";
1929     dumpState(dbgs());
1930   });
1931 
1932   return Changed;
1933 }
1934 
1935 struct AAPointerInfoReturned final : AAPointerInfoImpl {
1936   AAPointerInfoReturned(const IRPosition &IRP, Attributor &A)
1937       : AAPointerInfoImpl(IRP, A) {}
1938 
1939   /// See AbstractAttribute::updateImpl(...).
1940   ChangeStatus updateImpl(Attributor &A) override {
1941     return indicatePessimisticFixpoint();
1942   }
1943 
1944   /// See AbstractAttribute::trackStatistics()
1945   void trackStatistics() const override {
1946     AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
1947   }
1948 };
1949 
1950 struct AAPointerInfoArgument final : AAPointerInfoFloating {
1951   AAPointerInfoArgument(const IRPosition &IRP, Attributor &A)
1952       : AAPointerInfoFloating(IRP, A) {}
1953 
1954   /// See AbstractAttribute::trackStatistics()
1955   void trackStatistics() const override {
1956     AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
1957   }
1958 };
1959 
1960 struct AAPointerInfoCallSiteArgument final : AAPointerInfoFloating {
1961   AAPointerInfoCallSiteArgument(const IRPosition &IRP, Attributor &A)
1962       : AAPointerInfoFloating(IRP, A) {}
1963 
1964   /// See AbstractAttribute::updateImpl(...).
1965   ChangeStatus updateImpl(Attributor &A) override {
1966     using namespace AA::PointerInfo;
1967     // We handle memory intrinsics explicitly, at least the first (=
1968     // destination) and second (=source) arguments as we know how they are
1969     // accessed.
1970     if (auto *MI = dyn_cast_or_null<MemIntrinsic>(getCtxI())) {
1971       ConstantInt *Length = dyn_cast<ConstantInt>(MI->getLength());
1972       int64_t LengthVal = AA::RangeTy::Unknown;
1973       if (Length)
1974         LengthVal = Length->getSExtValue();
1975       unsigned ArgNo = getIRPosition().getCallSiteArgNo();
1976       ChangeStatus Changed = ChangeStatus::UNCHANGED;
1977       if (ArgNo > 1) {
1978         LLVM_DEBUG(dbgs() << "[AAPointerInfo] Unhandled memory intrinsic "
1979                           << *MI << "\n");
1980         return indicatePessimisticFixpoint();
1981       } else {
1982         auto Kind =
1983             ArgNo == 0 ? AccessKind::AK_MUST_WRITE : AccessKind::AK_MUST_READ;
1984         Changed =
1985             Changed | addAccess(A, {0, LengthVal}, *MI, nullptr, Kind, nullptr);
1986       }
1987       LLVM_DEBUG({
1988         dbgs() << "Accesses by bin after update:\n";
1989         dumpState(dbgs());
1990       });
1991 
1992       return Changed;
1993     }
1994 
1995     // TODO: Once we have call site specific value information we can provide
1996     //       call site specific liveness information and then it makes
1997     //       sense to specialize attributes for call sites arguments instead of
1998     //       redirecting requests to the callee argument.
1999     Argument *Arg = getAssociatedArgument();
2000     if (Arg) {
2001       const IRPosition &ArgPos = IRPosition::argument(*Arg);
2002       auto *ArgAA =
2003           A.getAAFor<AAPointerInfo>(*this, ArgPos, DepClassTy::REQUIRED);
2004       if (ArgAA && ArgAA->getState().isValidState())
2005         return translateAndAddStateFromCallee(A, *ArgAA,
2006                                               *cast<CallBase>(getCtxI()));
2007       if (!Arg->getParent()->isDeclaration())
2008         return indicatePessimisticFixpoint();
2009     }
2010 
2011     bool IsKnownNoCapture;
2012     if (!AA::hasAssumedIRAttr<Attribute::NoCapture>(
2013             A, this, getIRPosition(), DepClassTy::OPTIONAL, IsKnownNoCapture))
2014       return indicatePessimisticFixpoint();
2015 
2016     bool IsKnown = false;
2017     if (AA::isAssumedReadNone(A, getIRPosition(), *this, IsKnown))
2018       return ChangeStatus::UNCHANGED;
2019     bool ReadOnly = AA::isAssumedReadOnly(A, getIRPosition(), *this, IsKnown);
2020     auto Kind =
2021         ReadOnly ? AccessKind::AK_MAY_READ : AccessKind::AK_MAY_READ_WRITE;
2022     return addAccess(A, AA::RangeTy::getUnknown(), *getCtxI(), nullptr, Kind,
2023                      nullptr);
2024   }
2025 
2026   /// See AbstractAttribute::trackStatistics()
2027   void trackStatistics() const override {
2028     AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
2029   }
2030 };
2031 
2032 struct AAPointerInfoCallSiteReturned final : AAPointerInfoFloating {
2033   AAPointerInfoCallSiteReturned(const IRPosition &IRP, Attributor &A)
2034       : AAPointerInfoFloating(IRP, A) {}
2035 
2036   /// See AbstractAttribute::trackStatistics()
2037   void trackStatistics() const override {
2038     AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
2039   }
2040 };
2041 } // namespace
2042 
2043 /// -----------------------NoUnwind Function Attribute--------------------------
2044 
2045 namespace {
2046 struct AANoUnwindImpl : AANoUnwind {
2047   AANoUnwindImpl(const IRPosition &IRP, Attributor &A) : AANoUnwind(IRP, A) {}
2048 
2049   /// See AbstractAttribute::initialize(...).
2050   void initialize(Attributor &A) override {
2051     bool IsKnown;
2052     assert(!AA::hasAssumedIRAttr<Attribute::NoUnwind>(
2053         A, nullptr, getIRPosition(), DepClassTy::NONE, IsKnown));
2054     (void)IsKnown;
2055   }
2056 
2057   const std::string getAsStr(Attributor *A) const override {
2058     return getAssumed() ? "nounwind" : "may-unwind";
2059   }
2060 
2061   /// See AbstractAttribute::updateImpl(...).
2062   ChangeStatus updateImpl(Attributor &A) override {
2063     auto Opcodes = {
2064         (unsigned)Instruction::Invoke,      (unsigned)Instruction::CallBr,
2065         (unsigned)Instruction::Call,        (unsigned)Instruction::CleanupRet,
2066         (unsigned)Instruction::CatchSwitch, (unsigned)Instruction::Resume};
2067 
2068     auto CheckForNoUnwind = [&](Instruction &I) {
2069       if (!I.mayThrow(/* IncludePhaseOneUnwind */ true))
2070         return true;
2071 
2072       if (const auto *CB = dyn_cast<CallBase>(&I)) {
2073         bool IsKnownNoUnwind;
2074         return AA::hasAssumedIRAttr<Attribute::NoUnwind>(
2075             A, this, IRPosition::callsite_function(*CB), DepClassTy::REQUIRED,
2076             IsKnownNoUnwind);
2077       }
2078       return false;
2079     };
2080 
2081     bool UsedAssumedInformation = false;
2082     if (!A.checkForAllInstructions(CheckForNoUnwind, *this, Opcodes,
2083                                    UsedAssumedInformation))
2084       return indicatePessimisticFixpoint();
2085 
2086     return ChangeStatus::UNCHANGED;
2087   }
2088 };
2089 
2090 struct AANoUnwindFunction final : public AANoUnwindImpl {
2091   AANoUnwindFunction(const IRPosition &IRP, Attributor &A)
2092       : AANoUnwindImpl(IRP, A) {}
2093 
2094   /// See AbstractAttribute::trackStatistics()
2095   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nounwind) }
2096 };
2097 
2098 /// NoUnwind attribute deduction for a call sites.
2099 struct AANoUnwindCallSite final
2100     : AACalleeToCallSite<AANoUnwind, AANoUnwindImpl> {
2101   AANoUnwindCallSite(const IRPosition &IRP, Attributor &A)
2102       : AACalleeToCallSite<AANoUnwind, AANoUnwindImpl>(IRP, A) {}
2103 
2104   /// See AbstractAttribute::trackStatistics()
2105   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nounwind); }
2106 };
2107 } // namespace
2108 
2109 /// ------------------------ NoSync Function Attribute -------------------------
2110 
2111 bool AANoSync::isAlignedBarrier(const CallBase &CB, bool ExecutedAligned) {
2112   switch (CB.getIntrinsicID()) {
2113   case Intrinsic::nvvm_barrier0:
2114   case Intrinsic::nvvm_barrier0_and:
2115   case Intrinsic::nvvm_barrier0_or:
2116   case Intrinsic::nvvm_barrier0_popc:
2117     return true;
2118   case Intrinsic::amdgcn_s_barrier:
2119     if (ExecutedAligned)
2120       return true;
2121     break;
2122   default:
2123     break;
2124   }
2125   return hasAssumption(CB, KnownAssumptionString("ompx_aligned_barrier"));
2126 }
2127 
2128 bool AANoSync::isNonRelaxedAtomic(const Instruction *I) {
2129   if (!I->isAtomic())
2130     return false;
2131 
2132   if (auto *FI = dyn_cast<FenceInst>(I))
2133     // All legal orderings for fence are stronger than monotonic.
2134     return FI->getSyncScopeID() != SyncScope::SingleThread;
2135   if (auto *AI = dyn_cast<AtomicCmpXchgInst>(I)) {
2136     // Unordered is not a legal ordering for cmpxchg.
2137     return (AI->getSuccessOrdering() != AtomicOrdering::Monotonic ||
2138             AI->getFailureOrdering() != AtomicOrdering::Monotonic);
2139   }
2140 
2141   AtomicOrdering Ordering;
2142   switch (I->getOpcode()) {
2143   case Instruction::AtomicRMW:
2144     Ordering = cast<AtomicRMWInst>(I)->getOrdering();
2145     break;
2146   case Instruction::Store:
2147     Ordering = cast<StoreInst>(I)->getOrdering();
2148     break;
2149   case Instruction::Load:
2150     Ordering = cast<LoadInst>(I)->getOrdering();
2151     break;
2152   default:
2153     llvm_unreachable(
2154         "New atomic operations need to be known in the attributor.");
2155   }
2156 
2157   return (Ordering != AtomicOrdering::Unordered &&
2158           Ordering != AtomicOrdering::Monotonic);
2159 }
2160 
2161 /// Return true if this intrinsic is nosync.  This is only used for intrinsics
2162 /// which would be nosync except that they have a volatile flag.  All other
2163 /// intrinsics are simply annotated with the nosync attribute in Intrinsics.td.
2164 bool AANoSync::isNoSyncIntrinsic(const Instruction *I) {
2165   if (auto *MI = dyn_cast<MemIntrinsic>(I))
2166     return !MI->isVolatile();
2167   return false;
2168 }
2169 
2170 namespace {
2171 struct AANoSyncImpl : AANoSync {
2172   AANoSyncImpl(const IRPosition &IRP, Attributor &A) : AANoSync(IRP, A) {}
2173 
2174   /// See AbstractAttribute::initialize(...).
2175   void initialize(Attributor &A) override {
2176     bool IsKnown;
2177     assert(!AA::hasAssumedIRAttr<Attribute::NoSync>(A, nullptr, getIRPosition(),
2178                                                     DepClassTy::NONE, IsKnown));
2179     (void)IsKnown;
2180   }
2181 
2182   const std::string getAsStr(Attributor *A) const override {
2183     return getAssumed() ? "nosync" : "may-sync";
2184   }
2185 
2186   /// See AbstractAttribute::updateImpl(...).
2187   ChangeStatus updateImpl(Attributor &A) override;
2188 };
2189 
2190 ChangeStatus AANoSyncImpl::updateImpl(Attributor &A) {
2191 
2192   auto CheckRWInstForNoSync = [&](Instruction &I) {
2193     return AA::isNoSyncInst(A, I, *this);
2194   };
2195 
2196   auto CheckForNoSync = [&](Instruction &I) {
2197     // At this point we handled all read/write effects and they are all
2198     // nosync, so they can be skipped.
2199     if (I.mayReadOrWriteMemory())
2200       return true;
2201 
2202     bool IsKnown;
2203     CallBase &CB = cast<CallBase>(I);
2204     if (AA::hasAssumedIRAttr<Attribute::NoSync>(
2205             A, this, IRPosition::callsite_function(CB), DepClassTy::OPTIONAL,
2206             IsKnown))
2207       return true;
2208 
2209     // non-convergent and readnone imply nosync.
2210     return !CB.isConvergent();
2211   };
2212 
2213   bool UsedAssumedInformation = false;
2214   if (!A.checkForAllReadWriteInstructions(CheckRWInstForNoSync, *this,
2215                                           UsedAssumedInformation) ||
2216       !A.checkForAllCallLikeInstructions(CheckForNoSync, *this,
2217                                          UsedAssumedInformation))
2218     return indicatePessimisticFixpoint();
2219 
2220   return ChangeStatus::UNCHANGED;
2221 }
2222 
2223 struct AANoSyncFunction final : public AANoSyncImpl {
2224   AANoSyncFunction(const IRPosition &IRP, Attributor &A)
2225       : AANoSyncImpl(IRP, A) {}
2226 
2227   /// See AbstractAttribute::trackStatistics()
2228   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nosync) }
2229 };
2230 
2231 /// NoSync attribute deduction for a call sites.
2232 struct AANoSyncCallSite final : AACalleeToCallSite<AANoSync, AANoSyncImpl> {
2233   AANoSyncCallSite(const IRPosition &IRP, Attributor &A)
2234       : AACalleeToCallSite<AANoSync, AANoSyncImpl>(IRP, A) {}
2235 
2236   /// See AbstractAttribute::trackStatistics()
2237   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nosync); }
2238 };
2239 } // namespace
2240 
2241 /// ------------------------ No-Free Attributes ----------------------------
2242 
2243 namespace {
2244 struct AANoFreeImpl : public AANoFree {
2245   AANoFreeImpl(const IRPosition &IRP, Attributor &A) : AANoFree(IRP, A) {}
2246 
2247   /// See AbstractAttribute::initialize(...).
2248   void initialize(Attributor &A) override {
2249     bool IsKnown;
2250     assert(!AA::hasAssumedIRAttr<Attribute::NoFree>(A, nullptr, getIRPosition(),
2251                                                     DepClassTy::NONE, IsKnown));
2252     (void)IsKnown;
2253   }
2254 
2255   /// See AbstractAttribute::updateImpl(...).
2256   ChangeStatus updateImpl(Attributor &A) override {
2257     auto CheckForNoFree = [&](Instruction &I) {
2258       bool IsKnown;
2259       return AA::hasAssumedIRAttr<Attribute::NoFree>(
2260           A, this, IRPosition::callsite_function(cast<CallBase>(I)),
2261           DepClassTy::REQUIRED, IsKnown);
2262     };
2263 
2264     bool UsedAssumedInformation = false;
2265     if (!A.checkForAllCallLikeInstructions(CheckForNoFree, *this,
2266                                            UsedAssumedInformation))
2267       return indicatePessimisticFixpoint();
2268     return ChangeStatus::UNCHANGED;
2269   }
2270 
2271   /// See AbstractAttribute::getAsStr().
2272   const std::string getAsStr(Attributor *A) const override {
2273     return getAssumed() ? "nofree" : "may-free";
2274   }
2275 };
2276 
2277 struct AANoFreeFunction final : public AANoFreeImpl {
2278   AANoFreeFunction(const IRPosition &IRP, Attributor &A)
2279       : AANoFreeImpl(IRP, A) {}
2280 
2281   /// See AbstractAttribute::trackStatistics()
2282   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nofree) }
2283 };
2284 
2285 /// NoFree attribute deduction for a call sites.
2286 struct AANoFreeCallSite final : AACalleeToCallSite<AANoFree, AANoFreeImpl> {
2287   AANoFreeCallSite(const IRPosition &IRP, Attributor &A)
2288       : AACalleeToCallSite<AANoFree, AANoFreeImpl>(IRP, A) {}
2289 
2290   /// See AbstractAttribute::trackStatistics()
2291   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nofree); }
2292 };
2293 
2294 /// NoFree attribute for floating values.
2295 struct AANoFreeFloating : AANoFreeImpl {
2296   AANoFreeFloating(const IRPosition &IRP, Attributor &A)
2297       : AANoFreeImpl(IRP, A) {}
2298 
2299   /// See AbstractAttribute::trackStatistics()
2300   void trackStatistics() const override{STATS_DECLTRACK_FLOATING_ATTR(nofree)}
2301 
2302   /// See Abstract Attribute::updateImpl(...).
2303   ChangeStatus updateImpl(Attributor &A) override {
2304     const IRPosition &IRP = getIRPosition();
2305 
2306     bool IsKnown;
2307     if (AA::hasAssumedIRAttr<Attribute::NoFree>(A, this,
2308                                                 IRPosition::function_scope(IRP),
2309                                                 DepClassTy::OPTIONAL, IsKnown))
2310       return ChangeStatus::UNCHANGED;
2311 
2312     Value &AssociatedValue = getIRPosition().getAssociatedValue();
2313     auto Pred = [&](const Use &U, bool &Follow) -> bool {
2314       Instruction *UserI = cast<Instruction>(U.getUser());
2315       if (auto *CB = dyn_cast<CallBase>(UserI)) {
2316         if (CB->isBundleOperand(&U))
2317           return false;
2318         if (!CB->isArgOperand(&U))
2319           return true;
2320         unsigned ArgNo = CB->getArgOperandNo(&U);
2321 
2322         bool IsKnown;
2323         return AA::hasAssumedIRAttr<Attribute::NoFree>(
2324             A, this, IRPosition::callsite_argument(*CB, ArgNo),
2325             DepClassTy::REQUIRED, IsKnown);
2326       }
2327 
2328       if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) ||
2329           isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
2330         Follow = true;
2331         return true;
2332       }
2333       if (isa<StoreInst>(UserI) || isa<LoadInst>(UserI) ||
2334           isa<ReturnInst>(UserI))
2335         return true;
2336 
2337       // Unknown user.
2338       return false;
2339     };
2340     if (!A.checkForAllUses(Pred, *this, AssociatedValue))
2341       return indicatePessimisticFixpoint();
2342 
2343     return ChangeStatus::UNCHANGED;
2344   }
2345 };
2346 
2347 /// NoFree attribute for a call site argument.
2348 struct AANoFreeArgument final : AANoFreeFloating {
2349   AANoFreeArgument(const IRPosition &IRP, Attributor &A)
2350       : AANoFreeFloating(IRP, A) {}
2351 
2352   /// See AbstractAttribute::trackStatistics()
2353   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nofree) }
2354 };
2355 
2356 /// NoFree attribute for call site arguments.
2357 struct AANoFreeCallSiteArgument final : AANoFreeFloating {
2358   AANoFreeCallSiteArgument(const IRPosition &IRP, Attributor &A)
2359       : AANoFreeFloating(IRP, A) {}
2360 
2361   /// See AbstractAttribute::updateImpl(...).
2362   ChangeStatus updateImpl(Attributor &A) override {
2363     // TODO: Once we have call site specific value information we can provide
2364     //       call site specific liveness information and then it makes
2365     //       sense to specialize attributes for call sites arguments instead of
2366     //       redirecting requests to the callee argument.
2367     Argument *Arg = getAssociatedArgument();
2368     if (!Arg)
2369       return indicatePessimisticFixpoint();
2370     const IRPosition &ArgPos = IRPosition::argument(*Arg);
2371     bool IsKnown;
2372     if (AA::hasAssumedIRAttr<Attribute::NoFree>(A, this, ArgPos,
2373                                                 DepClassTy::REQUIRED, IsKnown))
2374       return ChangeStatus::UNCHANGED;
2375     return indicatePessimisticFixpoint();
2376   }
2377 
2378   /// See AbstractAttribute::trackStatistics()
2379   void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nofree)};
2380 };
2381 
2382 /// NoFree attribute for function return value.
2383 struct AANoFreeReturned final : AANoFreeFloating {
2384   AANoFreeReturned(const IRPosition &IRP, Attributor &A)
2385       : AANoFreeFloating(IRP, A) {
2386     llvm_unreachable("NoFree is not applicable to function returns!");
2387   }
2388 
2389   /// See AbstractAttribute::initialize(...).
2390   void initialize(Attributor &A) override {
2391     llvm_unreachable("NoFree is not applicable to function returns!");
2392   }
2393 
2394   /// See AbstractAttribute::updateImpl(...).
2395   ChangeStatus updateImpl(Attributor &A) override {
2396     llvm_unreachable("NoFree is not applicable to function returns!");
2397   }
2398 
2399   /// See AbstractAttribute::trackStatistics()
2400   void trackStatistics() const override {}
2401 };
2402 
2403 /// NoFree attribute deduction for a call site return value.
2404 struct AANoFreeCallSiteReturned final : AANoFreeFloating {
2405   AANoFreeCallSiteReturned(const IRPosition &IRP, Attributor &A)
2406       : AANoFreeFloating(IRP, A) {}
2407 
2408   ChangeStatus manifest(Attributor &A) override {
2409     return ChangeStatus::UNCHANGED;
2410   }
2411   /// See AbstractAttribute::trackStatistics()
2412   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nofree) }
2413 };
2414 } // namespace
2415 
2416 /// ------------------------ NonNull Argument Attribute ------------------------
2417 
2418 bool AANonNull::isImpliedByIR(Attributor &A, const IRPosition &IRP,
2419                               Attribute::AttrKind ImpliedAttributeKind,
2420                               bool IgnoreSubsumingPositions) {
2421   SmallVector<Attribute::AttrKind, 2> AttrKinds;
2422   AttrKinds.push_back(Attribute::NonNull);
2423   if (!NullPointerIsDefined(IRP.getAnchorScope(),
2424                             IRP.getAssociatedType()->getPointerAddressSpace()))
2425     AttrKinds.push_back(Attribute::Dereferenceable);
2426   if (A.hasAttr(IRP, AttrKinds, IgnoreSubsumingPositions, Attribute::NonNull))
2427     return true;
2428 
2429   DominatorTree *DT = nullptr;
2430   AssumptionCache *AC = nullptr;
2431   InformationCache &InfoCache = A.getInfoCache();
2432   if (const Function *Fn = IRP.getAnchorScope()) {
2433     if (!Fn->isDeclaration()) {
2434       DT = InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*Fn);
2435       AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*Fn);
2436     }
2437   }
2438 
2439   SmallVector<AA::ValueAndContext> Worklist;
2440   if (IRP.getPositionKind() != IRP_RETURNED) {
2441     Worklist.push_back({IRP.getAssociatedValue(), IRP.getCtxI()});
2442   } else {
2443     bool UsedAssumedInformation = false;
2444     if (!A.checkForAllInstructions(
2445             [&](Instruction &I) {
2446               Worklist.push_back({*cast<ReturnInst>(I).getReturnValue(), &I});
2447               return true;
2448             },
2449             IRP.getAssociatedFunction(), nullptr, {Instruction::Ret},
2450             UsedAssumedInformation))
2451       return false;
2452   }
2453 
2454   if (llvm::any_of(Worklist, [&](AA::ValueAndContext VAC) {
2455         return !isKnownNonZero(VAC.getValue(), A.getDataLayout(), 0, AC,
2456                                VAC.getCtxI(), DT);
2457       }))
2458     return false;
2459 
2460   A.manifestAttrs(IRP, {Attribute::get(IRP.getAnchorValue().getContext(),
2461                                        Attribute::NonNull)});
2462   return true;
2463 }
2464 
2465 namespace {
2466 static int64_t getKnownNonNullAndDerefBytesForUse(
2467     Attributor &A, const AbstractAttribute &QueryingAA, Value &AssociatedValue,
2468     const Use *U, const Instruction *I, bool &IsNonNull, bool &TrackUse) {
2469   TrackUse = false;
2470 
2471   const Value *UseV = U->get();
2472   if (!UseV->getType()->isPointerTy())
2473     return 0;
2474 
2475   // We need to follow common pointer manipulation uses to the accesses they
2476   // feed into. We can try to be smart to avoid looking through things we do not
2477   // like for now, e.g., non-inbounds GEPs.
2478   if (isa<CastInst>(I)) {
2479     TrackUse = true;
2480     return 0;
2481   }
2482 
2483   if (isa<GetElementPtrInst>(I)) {
2484     TrackUse = true;
2485     return 0;
2486   }
2487 
2488   Type *PtrTy = UseV->getType();
2489   const Function *F = I->getFunction();
2490   bool NullPointerIsDefined =
2491       F ? llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()) : true;
2492   const DataLayout &DL = A.getInfoCache().getDL();
2493   if (const auto *CB = dyn_cast<CallBase>(I)) {
2494     if (CB->isBundleOperand(U)) {
2495       if (RetainedKnowledge RK = getKnowledgeFromUse(
2496               U, {Attribute::NonNull, Attribute::Dereferenceable})) {
2497         IsNonNull |=
2498             (RK.AttrKind == Attribute::NonNull || !NullPointerIsDefined);
2499         return RK.ArgValue;
2500       }
2501       return 0;
2502     }
2503 
2504     if (CB->isCallee(U)) {
2505       IsNonNull |= !NullPointerIsDefined;
2506       return 0;
2507     }
2508 
2509     unsigned ArgNo = CB->getArgOperandNo(U);
2510     IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo);
2511     // As long as we only use known information there is no need to track
2512     // dependences here.
2513     bool IsKnownNonNull;
2514     AA::hasAssumedIRAttr<Attribute::NonNull>(A, &QueryingAA, IRP,
2515                                              DepClassTy::NONE, IsKnownNonNull);
2516     IsNonNull |= IsKnownNonNull;
2517     auto *DerefAA =
2518         A.getAAFor<AADereferenceable>(QueryingAA, IRP, DepClassTy::NONE);
2519     return DerefAA ? DerefAA->getKnownDereferenceableBytes() : 0;
2520   }
2521 
2522   std::optional<MemoryLocation> Loc = MemoryLocation::getOrNone(I);
2523   if (!Loc || Loc->Ptr != UseV || !Loc->Size.isPrecise() ||
2524       Loc->Size.isScalable() || I->isVolatile())
2525     return 0;
2526 
2527   int64_t Offset;
2528   const Value *Base =
2529       getMinimalBaseOfPointer(A, QueryingAA, Loc->Ptr, Offset, DL);
2530   if (Base && Base == &AssociatedValue) {
2531     int64_t DerefBytes = Loc->Size.getValue() + Offset;
2532     IsNonNull |= !NullPointerIsDefined;
2533     return std::max(int64_t(0), DerefBytes);
2534   }
2535 
2536   /// Corner case when an offset is 0.
2537   Base = GetPointerBaseWithConstantOffset(Loc->Ptr, Offset, DL,
2538                                           /*AllowNonInbounds*/ true);
2539   if (Base && Base == &AssociatedValue && Offset == 0) {
2540     int64_t DerefBytes = Loc->Size.getValue();
2541     IsNonNull |= !NullPointerIsDefined;
2542     return std::max(int64_t(0), DerefBytes);
2543   }
2544 
2545   return 0;
2546 }
2547 
2548 struct AANonNullImpl : AANonNull {
2549   AANonNullImpl(const IRPosition &IRP, Attributor &A) : AANonNull(IRP, A) {}
2550 
2551   /// See AbstractAttribute::initialize(...).
2552   void initialize(Attributor &A) override {
2553     Value &V = *getAssociatedValue().stripPointerCasts();
2554     if (isa<ConstantPointerNull>(V)) {
2555       indicatePessimisticFixpoint();
2556       return;
2557     }
2558 
2559     if (Instruction *CtxI = getCtxI())
2560       followUsesInMBEC(*this, A, getState(), *CtxI);
2561   }
2562 
2563   /// See followUsesInMBEC
2564   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
2565                        AANonNull::StateType &State) {
2566     bool IsNonNull = false;
2567     bool TrackUse = false;
2568     getKnownNonNullAndDerefBytesForUse(A, *this, getAssociatedValue(), U, I,
2569                                        IsNonNull, TrackUse);
2570     State.setKnown(IsNonNull);
2571     return TrackUse;
2572   }
2573 
2574   /// See AbstractAttribute::getAsStr().
2575   const std::string getAsStr(Attributor *A) const override {
2576     return getAssumed() ? "nonnull" : "may-null";
2577   }
2578 };
2579 
2580 /// NonNull attribute for a floating value.
2581 struct AANonNullFloating : public AANonNullImpl {
2582   AANonNullFloating(const IRPosition &IRP, Attributor &A)
2583       : AANonNullImpl(IRP, A) {}
2584 
2585   /// See AbstractAttribute::updateImpl(...).
2586   ChangeStatus updateImpl(Attributor &A) override {
2587     auto CheckIRP = [&](const IRPosition &IRP) {
2588       bool IsKnownNonNull;
2589       return AA::hasAssumedIRAttr<Attribute::NonNull>(
2590           A, *this, IRP, DepClassTy::OPTIONAL, IsKnownNonNull);
2591     };
2592 
2593     bool Stripped;
2594     bool UsedAssumedInformation = false;
2595     Value *AssociatedValue = &getAssociatedValue();
2596     SmallVector<AA::ValueAndContext> Values;
2597     if (!A.getAssumedSimplifiedValues(getIRPosition(), *this, Values,
2598                                       AA::AnyScope, UsedAssumedInformation))
2599       Stripped = false;
2600     else
2601       Stripped =
2602           Values.size() != 1 || Values.front().getValue() != AssociatedValue;
2603 
2604     if (!Stripped) {
2605       bool IsKnown;
2606       if (auto *PHI = dyn_cast<PHINode>(AssociatedValue))
2607         if (llvm::all_of(PHI->incoming_values(), [&](Value *Op) {
2608               return AA::hasAssumedIRAttr<Attribute::NonNull>(
2609                   A, this, IRPosition::value(*Op), DepClassTy::OPTIONAL,
2610                   IsKnown);
2611             }))
2612           return ChangeStatus::UNCHANGED;
2613       if (auto *Select = dyn_cast<SelectInst>(AssociatedValue))
2614         if (AA::hasAssumedIRAttr<Attribute::NonNull>(
2615                 A, this, IRPosition::value(*Select->getFalseValue()),
2616                 DepClassTy::OPTIONAL, IsKnown) &&
2617             AA::hasAssumedIRAttr<Attribute::NonNull>(
2618                 A, this, IRPosition::value(*Select->getTrueValue()),
2619                 DepClassTy::OPTIONAL, IsKnown))
2620           return ChangeStatus::UNCHANGED;
2621 
2622       // If we haven't stripped anything we might still be able to use a
2623       // different AA, but only if the IRP changes. Effectively when we
2624       // interpret this not as a call site value but as a floating/argument
2625       // value.
2626       const IRPosition AVIRP = IRPosition::value(*AssociatedValue);
2627       if (AVIRP == getIRPosition() || !CheckIRP(AVIRP))
2628         return indicatePessimisticFixpoint();
2629       return ChangeStatus::UNCHANGED;
2630     }
2631 
2632     for (const auto &VAC : Values)
2633       if (!CheckIRP(IRPosition::value(*VAC.getValue())))
2634         return indicatePessimisticFixpoint();
2635 
2636     return ChangeStatus::UNCHANGED;
2637   }
2638 
2639   /// See AbstractAttribute::trackStatistics()
2640   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
2641 };
2642 
2643 /// NonNull attribute for function return value.
2644 struct AANonNullReturned final
2645     : AAReturnedFromReturnedValues<AANonNull, AANonNull, AANonNull::StateType,
2646                                    false, AANonNull::IRAttributeKind, false> {
2647   AANonNullReturned(const IRPosition &IRP, Attributor &A)
2648       : AAReturnedFromReturnedValues<AANonNull, AANonNull, AANonNull::StateType,
2649                                      false, Attribute::NonNull, false>(IRP, A) {
2650   }
2651 
2652   /// See AbstractAttribute::getAsStr().
2653   const std::string getAsStr(Attributor *A) const override {
2654     return getAssumed() ? "nonnull" : "may-null";
2655   }
2656 
2657   /// See AbstractAttribute::trackStatistics()
2658   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
2659 };
2660 
2661 /// NonNull attribute for function argument.
2662 struct AANonNullArgument final
2663     : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl> {
2664   AANonNullArgument(const IRPosition &IRP, Attributor &A)
2665       : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl>(IRP, A) {}
2666 
2667   /// See AbstractAttribute::trackStatistics()
2668   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nonnull) }
2669 };
2670 
2671 struct AANonNullCallSiteArgument final : AANonNullFloating {
2672   AANonNullCallSiteArgument(const IRPosition &IRP, Attributor &A)
2673       : AANonNullFloating(IRP, A) {}
2674 
2675   /// See AbstractAttribute::trackStatistics()
2676   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(nonnull) }
2677 };
2678 
2679 /// NonNull attribute for a call site return position.
2680 struct AANonNullCallSiteReturned final
2681     : AACalleeToCallSite<AANonNull, AANonNullImpl> {
2682   AANonNullCallSiteReturned(const IRPosition &IRP, Attributor &A)
2683       : AACalleeToCallSite<AANonNull, AANonNullImpl>(IRP, A) {}
2684 
2685   /// See AbstractAttribute::trackStatistics()
2686   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nonnull) }
2687 };
2688 } // namespace
2689 
2690 /// ------------------------ Must-Progress Attributes --------------------------
2691 namespace {
2692 struct AAMustProgressImpl : public AAMustProgress {
2693   AAMustProgressImpl(const IRPosition &IRP, Attributor &A)
2694       : AAMustProgress(IRP, A) {}
2695 
2696   /// See AbstractAttribute::initialize(...).
2697   void initialize(Attributor &A) override {
2698     bool IsKnown;
2699     assert(!AA::hasAssumedIRAttr<Attribute::MustProgress>(
2700         A, nullptr, getIRPosition(), DepClassTy::NONE, IsKnown));
2701     (void)IsKnown;
2702   }
2703 
2704   /// See AbstractAttribute::getAsStr()
2705   const std::string getAsStr(Attributor *A) const override {
2706     return getAssumed() ? "mustprogress" : "may-not-progress";
2707   }
2708 };
2709 
2710 struct AAMustProgressFunction final : AAMustProgressImpl {
2711   AAMustProgressFunction(const IRPosition &IRP, Attributor &A)
2712       : AAMustProgressImpl(IRP, A) {}
2713 
2714   /// See AbstractAttribute::updateImpl(...).
2715   ChangeStatus updateImpl(Attributor &A) override {
2716     bool IsKnown;
2717     if (AA::hasAssumedIRAttr<Attribute::WillReturn>(
2718             A, this, getIRPosition(), DepClassTy::OPTIONAL, IsKnown)) {
2719       if (IsKnown)
2720         return indicateOptimisticFixpoint();
2721       return ChangeStatus::UNCHANGED;
2722     }
2723 
2724     auto CheckForMustProgress = [&](AbstractCallSite ACS) {
2725       IRPosition IPos = IRPosition::callsite_function(*ACS.getInstruction());
2726       bool IsKnownMustProgress;
2727       return AA::hasAssumedIRAttr<Attribute::MustProgress>(
2728           A, this, IPos, DepClassTy::REQUIRED, IsKnownMustProgress,
2729           /* IgnoreSubsumingPositions */ true);
2730     };
2731 
2732     bool AllCallSitesKnown = true;
2733     if (!A.checkForAllCallSites(CheckForMustProgress, *this,
2734                                 /* RequireAllCallSites */ true,
2735                                 AllCallSitesKnown))
2736       return indicatePessimisticFixpoint();
2737 
2738     return ChangeStatus::UNCHANGED;
2739   }
2740 
2741   /// See AbstractAttribute::trackStatistics()
2742   void trackStatistics() const override {
2743     STATS_DECLTRACK_FN_ATTR(mustprogress)
2744   }
2745 };
2746 
2747 /// MustProgress attribute deduction for a call sites.
2748 struct AAMustProgressCallSite final : AAMustProgressImpl {
2749   AAMustProgressCallSite(const IRPosition &IRP, Attributor &A)
2750       : AAMustProgressImpl(IRP, A) {}
2751 
2752   /// See AbstractAttribute::updateImpl(...).
2753   ChangeStatus updateImpl(Attributor &A) override {
2754     // TODO: Once we have call site specific value information we can provide
2755     //       call site specific liveness information and then it makes
2756     //       sense to specialize attributes for call sites arguments instead of
2757     //       redirecting requests to the callee argument.
2758     const IRPosition &FnPos = IRPosition::function(*getAnchorScope());
2759     bool IsKnownMustProgress;
2760     if (!AA::hasAssumedIRAttr<Attribute::MustProgress>(
2761             A, this, FnPos, DepClassTy::REQUIRED, IsKnownMustProgress))
2762       return indicatePessimisticFixpoint();
2763     return ChangeStatus::UNCHANGED;
2764   }
2765 
2766   /// See AbstractAttribute::trackStatistics()
2767   void trackStatistics() const override {
2768     STATS_DECLTRACK_CS_ATTR(mustprogress);
2769   }
2770 };
2771 } // namespace
2772 
2773 /// ------------------------ No-Recurse Attributes ----------------------------
2774 
2775 namespace {
2776 struct AANoRecurseImpl : public AANoRecurse {
2777   AANoRecurseImpl(const IRPosition &IRP, Attributor &A) : AANoRecurse(IRP, A) {}
2778 
2779   /// See AbstractAttribute::initialize(...).
2780   void initialize(Attributor &A) override {
2781     bool IsKnown;
2782     assert(!AA::hasAssumedIRAttr<Attribute::NoRecurse>(
2783         A, nullptr, getIRPosition(), DepClassTy::NONE, IsKnown));
2784     (void)IsKnown;
2785   }
2786 
2787   /// See AbstractAttribute::getAsStr()
2788   const std::string getAsStr(Attributor *A) const override {
2789     return getAssumed() ? "norecurse" : "may-recurse";
2790   }
2791 };
2792 
2793 struct AANoRecurseFunction final : AANoRecurseImpl {
2794   AANoRecurseFunction(const IRPosition &IRP, Attributor &A)
2795       : AANoRecurseImpl(IRP, A) {}
2796 
2797   /// See AbstractAttribute::updateImpl(...).
2798   ChangeStatus updateImpl(Attributor &A) override {
2799 
2800     // If all live call sites are known to be no-recurse, we are as well.
2801     auto CallSitePred = [&](AbstractCallSite ACS) {
2802       bool IsKnownNoRecurse;
2803       if (!AA::hasAssumedIRAttr<Attribute::NoRecurse>(
2804               A, this,
2805               IRPosition::function(*ACS.getInstruction()->getFunction()),
2806               DepClassTy::NONE, IsKnownNoRecurse))
2807         return false;
2808       return IsKnownNoRecurse;
2809     };
2810     bool UsedAssumedInformation = false;
2811     if (A.checkForAllCallSites(CallSitePred, *this, true,
2812                                UsedAssumedInformation)) {
2813       // If we know all call sites and all are known no-recurse, we are done.
2814       // If all known call sites, which might not be all that exist, are known
2815       // to be no-recurse, we are not done but we can continue to assume
2816       // no-recurse. If one of the call sites we have not visited will become
2817       // live, another update is triggered.
2818       if (!UsedAssumedInformation)
2819         indicateOptimisticFixpoint();
2820       return ChangeStatus::UNCHANGED;
2821     }
2822 
2823     const AAInterFnReachability *EdgeReachability =
2824         A.getAAFor<AAInterFnReachability>(*this, getIRPosition(),
2825                                           DepClassTy::REQUIRED);
2826     if (EdgeReachability && EdgeReachability->canReach(A, *getAnchorScope()))
2827       return indicatePessimisticFixpoint();
2828     return ChangeStatus::UNCHANGED;
2829   }
2830 
2831   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(norecurse) }
2832 };
2833 
2834 /// NoRecurse attribute deduction for a call sites.
2835 struct AANoRecurseCallSite final
2836     : AACalleeToCallSite<AANoRecurse, AANoRecurseImpl> {
2837   AANoRecurseCallSite(const IRPosition &IRP, Attributor &A)
2838       : AACalleeToCallSite<AANoRecurse, AANoRecurseImpl>(IRP, A) {}
2839 
2840   /// See AbstractAttribute::trackStatistics()
2841   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(norecurse); }
2842 };
2843 } // namespace
2844 
2845 /// ------------------------ No-Convergent Attribute --------------------------
2846 
2847 namespace {
2848 struct AANonConvergentImpl : public AANonConvergent {
2849   AANonConvergentImpl(const IRPosition &IRP, Attributor &A)
2850       : AANonConvergent(IRP, A) {}
2851 
2852   /// See AbstractAttribute::getAsStr()
2853   const std::string getAsStr(Attributor *A) const override {
2854     return getAssumed() ? "non-convergent" : "may-be-convergent";
2855   }
2856 };
2857 
2858 struct AANonConvergentFunction final : AANonConvergentImpl {
2859   AANonConvergentFunction(const IRPosition &IRP, Attributor &A)
2860       : AANonConvergentImpl(IRP, A) {}
2861 
2862   /// See AbstractAttribute::updateImpl(...).
2863   ChangeStatus updateImpl(Attributor &A) override {
2864     // If all function calls are known to not be convergent, we are not
2865     // convergent.
2866     auto CalleeIsNotConvergent = [&](Instruction &Inst) {
2867       CallBase &CB = cast<CallBase>(Inst);
2868       auto *Callee = dyn_cast_if_present<Function>(CB.getCalledOperand());
2869       if (!Callee || Callee->isIntrinsic()) {
2870         return false;
2871       }
2872       if (Callee->isDeclaration()) {
2873         return !Callee->hasFnAttribute(Attribute::Convergent);
2874       }
2875       const auto *ConvergentAA = A.getAAFor<AANonConvergent>(
2876           *this, IRPosition::function(*Callee), DepClassTy::REQUIRED);
2877       return ConvergentAA && ConvergentAA->isAssumedNotConvergent();
2878     };
2879 
2880     bool UsedAssumedInformation = false;
2881     if (!A.checkForAllCallLikeInstructions(CalleeIsNotConvergent, *this,
2882                                            UsedAssumedInformation)) {
2883       return indicatePessimisticFixpoint();
2884     }
2885     return ChangeStatus::UNCHANGED;
2886   }
2887 
2888   ChangeStatus manifest(Attributor &A) override {
2889     if (isKnownNotConvergent() &&
2890         A.hasAttr(getIRPosition(), Attribute::Convergent)) {
2891       A.removeAttrs(getIRPosition(), {Attribute::Convergent});
2892       return ChangeStatus::CHANGED;
2893     }
2894     return ChangeStatus::UNCHANGED;
2895   }
2896 
2897   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(convergent) }
2898 };
2899 } // namespace
2900 
2901 /// -------------------- Undefined-Behavior Attributes ------------------------
2902 
2903 namespace {
2904 struct AAUndefinedBehaviorImpl : public AAUndefinedBehavior {
2905   AAUndefinedBehaviorImpl(const IRPosition &IRP, Attributor &A)
2906       : AAUndefinedBehavior(IRP, A) {}
2907 
2908   /// See AbstractAttribute::updateImpl(...).
2909   // through a pointer (i.e. also branches etc.)
2910   ChangeStatus updateImpl(Attributor &A) override {
2911     const size_t UBPrevSize = KnownUBInsts.size();
2912     const size_t NoUBPrevSize = AssumedNoUBInsts.size();
2913 
2914     auto InspectMemAccessInstForUB = [&](Instruction &I) {
2915       // Lang ref now states volatile store is not UB, let's skip them.
2916       if (I.isVolatile() && I.mayWriteToMemory())
2917         return true;
2918 
2919       // Skip instructions that are already saved.
2920       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
2921         return true;
2922 
2923       // If we reach here, we know we have an instruction
2924       // that accesses memory through a pointer operand,
2925       // for which getPointerOperand() should give it to us.
2926       Value *PtrOp =
2927           const_cast<Value *>(getPointerOperand(&I, /* AllowVolatile */ true));
2928       assert(PtrOp &&
2929              "Expected pointer operand of memory accessing instruction");
2930 
2931       // Either we stopped and the appropriate action was taken,
2932       // or we got back a simplified value to continue.
2933       std::optional<Value *> SimplifiedPtrOp =
2934           stopOnUndefOrAssumed(A, PtrOp, &I);
2935       if (!SimplifiedPtrOp || !*SimplifiedPtrOp)
2936         return true;
2937       const Value *PtrOpVal = *SimplifiedPtrOp;
2938 
2939       // A memory access through a pointer is considered UB
2940       // only if the pointer has constant null value.
2941       // TODO: Expand it to not only check constant values.
2942       if (!isa<ConstantPointerNull>(PtrOpVal)) {
2943         AssumedNoUBInsts.insert(&I);
2944         return true;
2945       }
2946       const Type *PtrTy = PtrOpVal->getType();
2947 
2948       // Because we only consider instructions inside functions,
2949       // assume that a parent function exists.
2950       const Function *F = I.getFunction();
2951 
2952       // A memory access using constant null pointer is only considered UB
2953       // if null pointer is _not_ defined for the target platform.
2954       if (llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()))
2955         AssumedNoUBInsts.insert(&I);
2956       else
2957         KnownUBInsts.insert(&I);
2958       return true;
2959     };
2960 
2961     auto InspectBrInstForUB = [&](Instruction &I) {
2962       // A conditional branch instruction is considered UB if it has `undef`
2963       // condition.
2964 
2965       // Skip instructions that are already saved.
2966       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
2967         return true;
2968 
2969       // We know we have a branch instruction.
2970       auto *BrInst = cast<BranchInst>(&I);
2971 
2972       // Unconditional branches are never considered UB.
2973       if (BrInst->isUnconditional())
2974         return true;
2975 
2976       // Either we stopped and the appropriate action was taken,
2977       // or we got back a simplified value to continue.
2978       std::optional<Value *> SimplifiedCond =
2979           stopOnUndefOrAssumed(A, BrInst->getCondition(), BrInst);
2980       if (!SimplifiedCond || !*SimplifiedCond)
2981         return true;
2982       AssumedNoUBInsts.insert(&I);
2983       return true;
2984     };
2985 
2986     auto InspectCallSiteForUB = [&](Instruction &I) {
2987       // Check whether a callsite always cause UB or not
2988 
2989       // Skip instructions that are already saved.
2990       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
2991         return true;
2992 
2993       // Check nonnull and noundef argument attribute violation for each
2994       // callsite.
2995       CallBase &CB = cast<CallBase>(I);
2996       auto *Callee = dyn_cast_if_present<Function>(CB.getCalledOperand());
2997       if (!Callee)
2998         return true;
2999       for (unsigned idx = 0; idx < CB.arg_size(); idx++) {
3000         // If current argument is known to be simplified to null pointer and the
3001         // corresponding argument position is known to have nonnull attribute,
3002         // the argument is poison. Furthermore, if the argument is poison and
3003         // the position is known to have noundef attriubte, this callsite is
3004         // considered UB.
3005         if (idx >= Callee->arg_size())
3006           break;
3007         Value *ArgVal = CB.getArgOperand(idx);
3008         if (!ArgVal)
3009           continue;
3010         // Here, we handle three cases.
3011         //   (1) Not having a value means it is dead. (we can replace the value
3012         //       with undef)
3013         //   (2) Simplified to undef. The argument violate noundef attriubte.
3014         //   (3) Simplified to null pointer where known to be nonnull.
3015         //       The argument is a poison value and violate noundef attribute.
3016         IRPosition CalleeArgumentIRP = IRPosition::callsite_argument(CB, idx);
3017         bool IsKnownNoUndef;
3018         AA::hasAssumedIRAttr<Attribute::NoUndef>(
3019             A, this, CalleeArgumentIRP, DepClassTy::NONE, IsKnownNoUndef);
3020         if (!IsKnownNoUndef)
3021           continue;
3022         bool UsedAssumedInformation = false;
3023         std::optional<Value *> SimplifiedVal =
3024             A.getAssumedSimplified(IRPosition::value(*ArgVal), *this,
3025                                    UsedAssumedInformation, AA::Interprocedural);
3026         if (UsedAssumedInformation)
3027           continue;
3028         if (SimplifiedVal && !*SimplifiedVal)
3029           return true;
3030         if (!SimplifiedVal || isa<UndefValue>(**SimplifiedVal)) {
3031           KnownUBInsts.insert(&I);
3032           continue;
3033         }
3034         if (!ArgVal->getType()->isPointerTy() ||
3035             !isa<ConstantPointerNull>(**SimplifiedVal))
3036           continue;
3037         bool IsKnownNonNull;
3038         AA::hasAssumedIRAttr<Attribute::NonNull>(
3039             A, this, CalleeArgumentIRP, DepClassTy::NONE, IsKnownNonNull);
3040         if (IsKnownNonNull)
3041           KnownUBInsts.insert(&I);
3042       }
3043       return true;
3044     };
3045 
3046     auto InspectReturnInstForUB = [&](Instruction &I) {
3047       auto &RI = cast<ReturnInst>(I);
3048       // Either we stopped and the appropriate action was taken,
3049       // or we got back a simplified return value to continue.
3050       std::optional<Value *> SimplifiedRetValue =
3051           stopOnUndefOrAssumed(A, RI.getReturnValue(), &I);
3052       if (!SimplifiedRetValue || !*SimplifiedRetValue)
3053         return true;
3054 
3055       // Check if a return instruction always cause UB or not
3056       // Note: It is guaranteed that the returned position of the anchor
3057       //       scope has noundef attribute when this is called.
3058       //       We also ensure the return position is not "assumed dead"
3059       //       because the returned value was then potentially simplified to
3060       //       `undef` in AAReturnedValues without removing the `noundef`
3061       //       attribute yet.
3062 
3063       // When the returned position has noundef attriubte, UB occurs in the
3064       // following cases.
3065       //   (1) Returned value is known to be undef.
3066       //   (2) The value is known to be a null pointer and the returned
3067       //       position has nonnull attribute (because the returned value is
3068       //       poison).
3069       if (isa<ConstantPointerNull>(*SimplifiedRetValue)) {
3070         bool IsKnownNonNull;
3071         AA::hasAssumedIRAttr<Attribute::NonNull>(
3072             A, this, IRPosition::returned(*getAnchorScope()), DepClassTy::NONE,
3073             IsKnownNonNull);
3074         if (IsKnownNonNull)
3075           KnownUBInsts.insert(&I);
3076       }
3077 
3078       return true;
3079     };
3080 
3081     bool UsedAssumedInformation = false;
3082     A.checkForAllInstructions(InspectMemAccessInstForUB, *this,
3083                               {Instruction::Load, Instruction::Store,
3084                                Instruction::AtomicCmpXchg,
3085                                Instruction::AtomicRMW},
3086                               UsedAssumedInformation,
3087                               /* CheckBBLivenessOnly */ true);
3088     A.checkForAllInstructions(InspectBrInstForUB, *this, {Instruction::Br},
3089                               UsedAssumedInformation,
3090                               /* CheckBBLivenessOnly */ true);
3091     A.checkForAllCallLikeInstructions(InspectCallSiteForUB, *this,
3092                                       UsedAssumedInformation);
3093 
3094     // If the returned position of the anchor scope has noundef attriubte, check
3095     // all returned instructions.
3096     if (!getAnchorScope()->getReturnType()->isVoidTy()) {
3097       const IRPosition &ReturnIRP = IRPosition::returned(*getAnchorScope());
3098       if (!A.isAssumedDead(ReturnIRP, this, nullptr, UsedAssumedInformation)) {
3099         bool IsKnownNoUndef;
3100         AA::hasAssumedIRAttr<Attribute::NoUndef>(
3101             A, this, ReturnIRP, DepClassTy::NONE, IsKnownNoUndef);
3102         if (IsKnownNoUndef)
3103           A.checkForAllInstructions(InspectReturnInstForUB, *this,
3104                                     {Instruction::Ret}, UsedAssumedInformation,
3105                                     /* CheckBBLivenessOnly */ true);
3106       }
3107     }
3108 
3109     if (NoUBPrevSize != AssumedNoUBInsts.size() ||
3110         UBPrevSize != KnownUBInsts.size())
3111       return ChangeStatus::CHANGED;
3112     return ChangeStatus::UNCHANGED;
3113   }
3114 
3115   bool isKnownToCauseUB(Instruction *I) const override {
3116     return KnownUBInsts.count(I);
3117   }
3118 
3119   bool isAssumedToCauseUB(Instruction *I) const override {
3120     // In simple words, if an instruction is not in the assumed to _not_
3121     // cause UB, then it is assumed UB (that includes those
3122     // in the KnownUBInsts set). The rest is boilerplate
3123     // is to ensure that it is one of the instructions we test
3124     // for UB.
3125 
3126     switch (I->getOpcode()) {
3127     case Instruction::Load:
3128     case Instruction::Store:
3129     case Instruction::AtomicCmpXchg:
3130     case Instruction::AtomicRMW:
3131       return !AssumedNoUBInsts.count(I);
3132     case Instruction::Br: {
3133       auto *BrInst = cast<BranchInst>(I);
3134       if (BrInst->isUnconditional())
3135         return false;
3136       return !AssumedNoUBInsts.count(I);
3137     } break;
3138     default:
3139       return false;
3140     }
3141     return false;
3142   }
3143 
3144   ChangeStatus manifest(Attributor &A) override {
3145     if (KnownUBInsts.empty())
3146       return ChangeStatus::UNCHANGED;
3147     for (Instruction *I : KnownUBInsts)
3148       A.changeToUnreachableAfterManifest(I);
3149     return ChangeStatus::CHANGED;
3150   }
3151 
3152   /// See AbstractAttribute::getAsStr()
3153   const std::string getAsStr(Attributor *A) const override {
3154     return getAssumed() ? "undefined-behavior" : "no-ub";
3155   }
3156 
3157   /// Note: The correctness of this analysis depends on the fact that the
3158   /// following 2 sets will stop changing after some point.
3159   /// "Change" here means that their size changes.
3160   /// The size of each set is monotonically increasing
3161   /// (we only add items to them) and it is upper bounded by the number of
3162   /// instructions in the processed function (we can never save more
3163   /// elements in either set than this number). Hence, at some point,
3164   /// they will stop increasing.
3165   /// Consequently, at some point, both sets will have stopped
3166   /// changing, effectively making the analysis reach a fixpoint.
3167 
3168   /// Note: These 2 sets are disjoint and an instruction can be considered
3169   /// one of 3 things:
3170   /// 1) Known to cause UB (AAUndefinedBehavior could prove it) and put it in
3171   ///    the KnownUBInsts set.
3172   /// 2) Assumed to cause UB (in every updateImpl, AAUndefinedBehavior
3173   ///    has a reason to assume it).
3174   /// 3) Assumed to not cause UB. very other instruction - AAUndefinedBehavior
3175   ///    could not find a reason to assume or prove that it can cause UB,
3176   ///    hence it assumes it doesn't. We have a set for these instructions
3177   ///    so that we don't reprocess them in every update.
3178   ///    Note however that instructions in this set may cause UB.
3179 
3180 protected:
3181   /// A set of all live instructions _known_ to cause UB.
3182   SmallPtrSet<Instruction *, 8> KnownUBInsts;
3183 
3184 private:
3185   /// A set of all the (live) instructions that are assumed to _not_ cause UB.
3186   SmallPtrSet<Instruction *, 8> AssumedNoUBInsts;
3187 
3188   // Should be called on updates in which if we're processing an instruction
3189   // \p I that depends on a value \p V, one of the following has to happen:
3190   // - If the value is assumed, then stop.
3191   // - If the value is known but undef, then consider it UB.
3192   // - Otherwise, do specific processing with the simplified value.
3193   // We return std::nullopt in the first 2 cases to signify that an appropriate
3194   // action was taken and the caller should stop.
3195   // Otherwise, we return the simplified value that the caller should
3196   // use for specific processing.
3197   std::optional<Value *> stopOnUndefOrAssumed(Attributor &A, Value *V,
3198                                               Instruction *I) {
3199     bool UsedAssumedInformation = false;
3200     std::optional<Value *> SimplifiedV =
3201         A.getAssumedSimplified(IRPosition::value(*V), *this,
3202                                UsedAssumedInformation, AA::Interprocedural);
3203     if (!UsedAssumedInformation) {
3204       // Don't depend on assumed values.
3205       if (!SimplifiedV) {
3206         // If it is known (which we tested above) but it doesn't have a value,
3207         // then we can assume `undef` and hence the instruction is UB.
3208         KnownUBInsts.insert(I);
3209         return std::nullopt;
3210       }
3211       if (!*SimplifiedV)
3212         return nullptr;
3213       V = *SimplifiedV;
3214     }
3215     if (isa<UndefValue>(V)) {
3216       KnownUBInsts.insert(I);
3217       return std::nullopt;
3218     }
3219     return V;
3220   }
3221 };
3222 
3223 struct AAUndefinedBehaviorFunction final : AAUndefinedBehaviorImpl {
3224   AAUndefinedBehaviorFunction(const IRPosition &IRP, Attributor &A)
3225       : AAUndefinedBehaviorImpl(IRP, A) {}
3226 
3227   /// See AbstractAttribute::trackStatistics()
3228   void trackStatistics() const override {
3229     STATS_DECL(UndefinedBehaviorInstruction, Instruction,
3230                "Number of instructions known to have UB");
3231     BUILD_STAT_NAME(UndefinedBehaviorInstruction, Instruction) +=
3232         KnownUBInsts.size();
3233   }
3234 };
3235 } // namespace
3236 
3237 /// ------------------------ Will-Return Attributes ----------------------------
3238 
3239 namespace {
3240 // Helper function that checks whether a function has any cycle which we don't
3241 // know if it is bounded or not.
3242 // Loops with maximum trip count are considered bounded, any other cycle not.
3243 static bool mayContainUnboundedCycle(Function &F, Attributor &A) {
3244   ScalarEvolution *SE =
3245       A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(F);
3246   LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(F);
3247   // If either SCEV or LoopInfo is not available for the function then we assume
3248   // any cycle to be unbounded cycle.
3249   // We use scc_iterator which uses Tarjan algorithm to find all the maximal
3250   // SCCs.To detect if there's a cycle, we only need to find the maximal ones.
3251   if (!SE || !LI) {
3252     for (scc_iterator<Function *> SCCI = scc_begin(&F); !SCCI.isAtEnd(); ++SCCI)
3253       if (SCCI.hasCycle())
3254         return true;
3255     return false;
3256   }
3257 
3258   // If there's irreducible control, the function may contain non-loop cycles.
3259   if (mayContainIrreducibleControl(F, LI))
3260     return true;
3261 
3262   // Any loop that does not have a max trip count is considered unbounded cycle.
3263   for (auto *L : LI->getLoopsInPreorder()) {
3264     if (!SE->getSmallConstantMaxTripCount(L))
3265       return true;
3266   }
3267   return false;
3268 }
3269 
3270 struct AAWillReturnImpl : public AAWillReturn {
3271   AAWillReturnImpl(const IRPosition &IRP, Attributor &A)
3272       : AAWillReturn(IRP, A) {}
3273 
3274   /// See AbstractAttribute::initialize(...).
3275   void initialize(Attributor &A) override {
3276     bool IsKnown;
3277     assert(!AA::hasAssumedIRAttr<Attribute::WillReturn>(
3278         A, nullptr, getIRPosition(), DepClassTy::NONE, IsKnown));
3279     (void)IsKnown;
3280   }
3281 
3282   /// Check for `mustprogress` and `readonly` as they imply `willreturn`.
3283   bool isImpliedByMustprogressAndReadonly(Attributor &A, bool KnownOnly) {
3284     if (!A.hasAttr(getIRPosition(), {Attribute::MustProgress}))
3285       return false;
3286 
3287     bool IsKnown;
3288     if (AA::isAssumedReadOnly(A, getIRPosition(), *this, IsKnown))
3289       return IsKnown || !KnownOnly;
3290     return false;
3291   }
3292 
3293   /// See AbstractAttribute::updateImpl(...).
3294   ChangeStatus updateImpl(Attributor &A) override {
3295     if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ false))
3296       return ChangeStatus::UNCHANGED;
3297 
3298     auto CheckForWillReturn = [&](Instruction &I) {
3299       IRPosition IPos = IRPosition::callsite_function(cast<CallBase>(I));
3300       bool IsKnown;
3301       if (AA::hasAssumedIRAttr<Attribute::WillReturn>(
3302               A, this, IPos, DepClassTy::REQUIRED, IsKnown)) {
3303         if (IsKnown)
3304           return true;
3305       } else {
3306         return false;
3307       }
3308       bool IsKnownNoRecurse;
3309       return AA::hasAssumedIRAttr<Attribute::NoRecurse>(
3310           A, this, IPos, DepClassTy::REQUIRED, IsKnownNoRecurse);
3311     };
3312 
3313     bool UsedAssumedInformation = false;
3314     if (!A.checkForAllCallLikeInstructions(CheckForWillReturn, *this,
3315                                            UsedAssumedInformation))
3316       return indicatePessimisticFixpoint();
3317 
3318     return ChangeStatus::UNCHANGED;
3319   }
3320 
3321   /// See AbstractAttribute::getAsStr()
3322   const std::string getAsStr(Attributor *A) const override {
3323     return getAssumed() ? "willreturn" : "may-noreturn";
3324   }
3325 };
3326 
3327 struct AAWillReturnFunction final : AAWillReturnImpl {
3328   AAWillReturnFunction(const IRPosition &IRP, Attributor &A)
3329       : AAWillReturnImpl(IRP, A) {}
3330 
3331   /// See AbstractAttribute::initialize(...).
3332   void initialize(Attributor &A) override {
3333     AAWillReturnImpl::initialize(A);
3334 
3335     Function *F = getAnchorScope();
3336     assert(F && "Did expect an anchor function");
3337     if (F->isDeclaration() || mayContainUnboundedCycle(*F, A))
3338       indicatePessimisticFixpoint();
3339   }
3340 
3341   /// See AbstractAttribute::trackStatistics()
3342   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(willreturn) }
3343 };
3344 
3345 /// WillReturn attribute deduction for a call sites.
3346 struct AAWillReturnCallSite final
3347     : AACalleeToCallSite<AAWillReturn, AAWillReturnImpl> {
3348   AAWillReturnCallSite(const IRPosition &IRP, Attributor &A)
3349       : AACalleeToCallSite<AAWillReturn, AAWillReturnImpl>(IRP, A) {}
3350 
3351   /// See AbstractAttribute::updateImpl(...).
3352   ChangeStatus updateImpl(Attributor &A) override {
3353     if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ false))
3354       return ChangeStatus::UNCHANGED;
3355 
3356     return AACalleeToCallSite::updateImpl(A);
3357   }
3358 
3359   /// See AbstractAttribute::trackStatistics()
3360   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(willreturn); }
3361 };
3362 } // namespace
3363 
3364 /// -------------------AAIntraFnReachability Attribute--------------------------
3365 
3366 /// All information associated with a reachability query. This boilerplate code
3367 /// is used by both AAIntraFnReachability and AAInterFnReachability, with
3368 /// different \p ToTy values.
3369 template <typename ToTy> struct ReachabilityQueryInfo {
3370   enum class Reachable {
3371     No,
3372     Yes,
3373   };
3374 
3375   /// Start here,
3376   const Instruction *From = nullptr;
3377   /// reach this place,
3378   const ToTy *To = nullptr;
3379   /// without going through any of these instructions,
3380   const AA::InstExclusionSetTy *ExclusionSet = nullptr;
3381   /// and remember if it worked:
3382   Reachable Result = Reachable::No;
3383 
3384   /// Precomputed hash for this RQI.
3385   unsigned Hash = 0;
3386 
3387   unsigned computeHashValue() const {
3388     assert(Hash == 0 && "Computed hash twice!");
3389     using InstSetDMI = DenseMapInfo<const AA::InstExclusionSetTy *>;
3390     using PairDMI = DenseMapInfo<std::pair<const Instruction *, const ToTy *>>;
3391     return const_cast<ReachabilityQueryInfo<ToTy> *>(this)->Hash =
3392                detail::combineHashValue(PairDMI ::getHashValue({From, To}),
3393                                         InstSetDMI::getHashValue(ExclusionSet));
3394   }
3395 
3396   ReachabilityQueryInfo(const Instruction *From, const ToTy *To)
3397       : From(From), To(To) {}
3398 
3399   /// Constructor replacement to ensure unique and stable sets are used for the
3400   /// cache.
3401   ReachabilityQueryInfo(Attributor &A, const Instruction &From, const ToTy &To,
3402                         const AA::InstExclusionSetTy *ES, bool MakeUnique)
3403       : From(&From), To(&To), ExclusionSet(ES) {
3404 
3405     if (!ES || ES->empty()) {
3406       ExclusionSet = nullptr;
3407     } else if (MakeUnique) {
3408       ExclusionSet = A.getInfoCache().getOrCreateUniqueBlockExecutionSet(ES);
3409     }
3410   }
3411 
3412   ReachabilityQueryInfo(const ReachabilityQueryInfo &RQI)
3413       : From(RQI.From), To(RQI.To), ExclusionSet(RQI.ExclusionSet) {}
3414 };
3415 
3416 namespace llvm {
3417 template <typename ToTy> struct DenseMapInfo<ReachabilityQueryInfo<ToTy> *> {
3418   using InstSetDMI = DenseMapInfo<const AA::InstExclusionSetTy *>;
3419   using PairDMI = DenseMapInfo<std::pair<const Instruction *, const ToTy *>>;
3420 
3421   static ReachabilityQueryInfo<ToTy> EmptyKey;
3422   static ReachabilityQueryInfo<ToTy> TombstoneKey;
3423 
3424   static inline ReachabilityQueryInfo<ToTy> *getEmptyKey() { return &EmptyKey; }
3425   static inline ReachabilityQueryInfo<ToTy> *getTombstoneKey() {
3426     return &TombstoneKey;
3427   }
3428   static unsigned getHashValue(const ReachabilityQueryInfo<ToTy> *RQI) {
3429     return RQI->Hash ? RQI->Hash : RQI->computeHashValue();
3430   }
3431   static bool isEqual(const ReachabilityQueryInfo<ToTy> *LHS,
3432                       const ReachabilityQueryInfo<ToTy> *RHS) {
3433     if (!PairDMI::isEqual({LHS->From, LHS->To}, {RHS->From, RHS->To}))
3434       return false;
3435     return InstSetDMI::isEqual(LHS->ExclusionSet, RHS->ExclusionSet);
3436   }
3437 };
3438 
3439 #define DefineKeys(ToTy)                                                       \
3440   template <>                                                                  \
3441   ReachabilityQueryInfo<ToTy>                                                  \
3442       DenseMapInfo<ReachabilityQueryInfo<ToTy> *>::EmptyKey =                  \
3443           ReachabilityQueryInfo<ToTy>(                                         \
3444               DenseMapInfo<const Instruction *>::getEmptyKey(),                \
3445               DenseMapInfo<const ToTy *>::getEmptyKey());                      \
3446   template <>                                                                  \
3447   ReachabilityQueryInfo<ToTy>                                                  \
3448       DenseMapInfo<ReachabilityQueryInfo<ToTy> *>::TombstoneKey =              \
3449           ReachabilityQueryInfo<ToTy>(                                         \
3450               DenseMapInfo<const Instruction *>::getTombstoneKey(),            \
3451               DenseMapInfo<const ToTy *>::getTombstoneKey());
3452 
3453 DefineKeys(Instruction) DefineKeys(Function)
3454 #undef DefineKeys
3455 
3456 } // namespace llvm
3457 
3458 namespace {
3459 
3460 template <typename BaseTy, typename ToTy>
3461 struct CachedReachabilityAA : public BaseTy {
3462   using RQITy = ReachabilityQueryInfo<ToTy>;
3463 
3464   CachedReachabilityAA(const IRPosition &IRP, Attributor &A) : BaseTy(IRP, A) {}
3465 
3466   /// See AbstractAttribute::isQueryAA.
3467   bool isQueryAA() const override { return true; }
3468 
3469   /// See AbstractAttribute::updateImpl(...).
3470   ChangeStatus updateImpl(Attributor &A) override {
3471     ChangeStatus Changed = ChangeStatus::UNCHANGED;
3472     for (unsigned u = 0, e = QueryVector.size(); u < e; ++u) {
3473       RQITy *RQI = QueryVector[u];
3474       if (RQI->Result == RQITy::Reachable::No &&
3475           isReachableImpl(A, *RQI, /*IsTemporaryRQI=*/false))
3476         Changed = ChangeStatus::CHANGED;
3477     }
3478     return Changed;
3479   }
3480 
3481   virtual bool isReachableImpl(Attributor &A, RQITy &RQI,
3482                                bool IsTemporaryRQI) = 0;
3483 
3484   bool rememberResult(Attributor &A, typename RQITy::Reachable Result,
3485                       RQITy &RQI, bool UsedExclusionSet, bool IsTemporaryRQI) {
3486     RQI.Result = Result;
3487 
3488     // Remove the temporary RQI from the cache.
3489     if (IsTemporaryRQI)
3490       QueryCache.erase(&RQI);
3491 
3492     // Insert a plain RQI (w/o exclusion set) if that makes sense. Two options:
3493     // 1) If it is reachable, it doesn't matter if we have an exclusion set for
3494     // this query. 2) We did not use the exclusion set, potentially because
3495     // there is none.
3496     if (Result == RQITy::Reachable::Yes || !UsedExclusionSet) {
3497       RQITy PlainRQI(RQI.From, RQI.To);
3498       if (!QueryCache.count(&PlainRQI)) {
3499         RQITy *RQIPtr = new (A.Allocator) RQITy(RQI.From, RQI.To);
3500         RQIPtr->Result = Result;
3501         QueryVector.push_back(RQIPtr);
3502         QueryCache.insert(RQIPtr);
3503       }
3504     }
3505 
3506     // Check if we need to insert a new permanent RQI with the exclusion set.
3507     if (IsTemporaryRQI && Result != RQITy::Reachable::Yes && UsedExclusionSet) {
3508       assert((!RQI.ExclusionSet || !RQI.ExclusionSet->empty()) &&
3509              "Did not expect empty set!");
3510       RQITy *RQIPtr = new (A.Allocator)
3511           RQITy(A, *RQI.From, *RQI.To, RQI.ExclusionSet, true);
3512       assert(RQIPtr->Result == RQITy::Reachable::No && "Already reachable?");
3513       RQIPtr->Result = Result;
3514       assert(!QueryCache.count(RQIPtr));
3515       QueryVector.push_back(RQIPtr);
3516       QueryCache.insert(RQIPtr);
3517     }
3518 
3519     if (Result == RQITy::Reachable::No && IsTemporaryRQI)
3520       A.registerForUpdate(*this);
3521     return Result == RQITy::Reachable::Yes;
3522   }
3523 
3524   const std::string getAsStr(Attributor *A) const override {
3525     // TODO: Return the number of reachable queries.
3526     return "#queries(" + std::to_string(QueryVector.size()) + ")";
3527   }
3528 
3529   bool checkQueryCache(Attributor &A, RQITy &StackRQI,
3530                        typename RQITy::Reachable &Result) {
3531     if (!this->getState().isValidState()) {
3532       Result = RQITy::Reachable::Yes;
3533       return true;
3534     }
3535 
3536     // If we have an exclusion set we might be able to find our answer by
3537     // ignoring it first.
3538     if (StackRQI.ExclusionSet) {
3539       RQITy PlainRQI(StackRQI.From, StackRQI.To);
3540       auto It = QueryCache.find(&PlainRQI);
3541       if (It != QueryCache.end() && (*It)->Result == RQITy::Reachable::No) {
3542         Result = RQITy::Reachable::No;
3543         return true;
3544       }
3545     }
3546 
3547     auto It = QueryCache.find(&StackRQI);
3548     if (It != QueryCache.end()) {
3549       Result = (*It)->Result;
3550       return true;
3551     }
3552 
3553     // Insert a temporary for recursive queries. We will replace it with a
3554     // permanent entry later.
3555     QueryCache.insert(&StackRQI);
3556     return false;
3557   }
3558 
3559 private:
3560   SmallVector<RQITy *> QueryVector;
3561   DenseSet<RQITy *> QueryCache;
3562 };
3563 
3564 struct AAIntraFnReachabilityFunction final
3565     : public CachedReachabilityAA<AAIntraFnReachability, Instruction> {
3566   using Base = CachedReachabilityAA<AAIntraFnReachability, Instruction>;
3567   AAIntraFnReachabilityFunction(const IRPosition &IRP, Attributor &A)
3568       : Base(IRP, A) {
3569     DT = A.getInfoCache().getAnalysisResultForFunction<DominatorTreeAnalysis>(
3570         *IRP.getAssociatedFunction());
3571   }
3572 
3573   bool isAssumedReachable(
3574       Attributor &A, const Instruction &From, const Instruction &To,
3575       const AA::InstExclusionSetTy *ExclusionSet) const override {
3576     auto *NonConstThis = const_cast<AAIntraFnReachabilityFunction *>(this);
3577     if (&From == &To)
3578       return true;
3579 
3580     RQITy StackRQI(A, From, To, ExclusionSet, false);
3581     typename RQITy::Reachable Result;
3582     if (!NonConstThis->checkQueryCache(A, StackRQI, Result))
3583       return NonConstThis->isReachableImpl(A, StackRQI,
3584                                            /*IsTemporaryRQI=*/true);
3585     return Result == RQITy::Reachable::Yes;
3586   }
3587 
3588   ChangeStatus updateImpl(Attributor &A) override {
3589     // We only depend on liveness. DeadEdges is all we care about, check if any
3590     // of them changed.
3591     auto *LivenessAA =
3592         A.getAAFor<AAIsDead>(*this, getIRPosition(), DepClassTy::OPTIONAL);
3593     if (LivenessAA &&
3594         llvm::all_of(DeadEdges,
3595                      [&](const auto &DeadEdge) {
3596                        return LivenessAA->isEdgeDead(DeadEdge.first,
3597                                                      DeadEdge.second);
3598                      }) &&
3599         llvm::all_of(DeadBlocks, [&](const BasicBlock *BB) {
3600           return LivenessAA->isAssumedDead(BB);
3601         })) {
3602       return ChangeStatus::UNCHANGED;
3603     }
3604     DeadEdges.clear();
3605     DeadBlocks.clear();
3606     return Base::updateImpl(A);
3607   }
3608 
3609   bool isReachableImpl(Attributor &A, RQITy &RQI,
3610                        bool IsTemporaryRQI) override {
3611     const Instruction *Origin = RQI.From;
3612     bool UsedExclusionSet = false;
3613 
3614     auto WillReachInBlock = [&](const Instruction &From, const Instruction &To,
3615                                 const AA::InstExclusionSetTy *ExclusionSet) {
3616       const Instruction *IP = &From;
3617       while (IP && IP != &To) {
3618         if (ExclusionSet && IP != Origin && ExclusionSet->count(IP)) {
3619           UsedExclusionSet = true;
3620           break;
3621         }
3622         IP = IP->getNextNode();
3623       }
3624       return IP == &To;
3625     };
3626 
3627     const BasicBlock *FromBB = RQI.From->getParent();
3628     const BasicBlock *ToBB = RQI.To->getParent();
3629     assert(FromBB->getParent() == ToBB->getParent() &&
3630            "Not an intra-procedural query!");
3631 
3632     // Check intra-block reachability, however, other reaching paths are still
3633     // possible.
3634     if (FromBB == ToBB &&
3635         WillReachInBlock(*RQI.From, *RQI.To, RQI.ExclusionSet))
3636       return rememberResult(A, RQITy::Reachable::Yes, RQI, UsedExclusionSet,
3637                             IsTemporaryRQI);
3638 
3639     // Check if reaching the ToBB block is sufficient or if even that would not
3640     // ensure reaching the target. In the latter case we are done.
3641     if (!WillReachInBlock(ToBB->front(), *RQI.To, RQI.ExclusionSet))
3642       return rememberResult(A, RQITy::Reachable::No, RQI, UsedExclusionSet,
3643                             IsTemporaryRQI);
3644 
3645     const Function *Fn = FromBB->getParent();
3646     SmallPtrSet<const BasicBlock *, 16> ExclusionBlocks;
3647     if (RQI.ExclusionSet)
3648       for (auto *I : *RQI.ExclusionSet)
3649         if (I->getFunction() == Fn)
3650           ExclusionBlocks.insert(I->getParent());
3651 
3652     // Check if we make it out of the FromBB block at all.
3653     if (ExclusionBlocks.count(FromBB) &&
3654         !WillReachInBlock(*RQI.From, *FromBB->getTerminator(),
3655                           RQI.ExclusionSet))
3656       return rememberResult(A, RQITy::Reachable::No, RQI, true, IsTemporaryRQI);
3657 
3658     auto *LivenessAA =
3659         A.getAAFor<AAIsDead>(*this, getIRPosition(), DepClassTy::OPTIONAL);
3660     if (LivenessAA && LivenessAA->isAssumedDead(ToBB)) {
3661       DeadBlocks.insert(ToBB);
3662       return rememberResult(A, RQITy::Reachable::No, RQI, UsedExclusionSet,
3663                             IsTemporaryRQI);
3664     }
3665 
3666     SmallPtrSet<const BasicBlock *, 16> Visited;
3667     SmallVector<const BasicBlock *, 16> Worklist;
3668     Worklist.push_back(FromBB);
3669 
3670     DenseSet<std::pair<const BasicBlock *, const BasicBlock *>> LocalDeadEdges;
3671     while (!Worklist.empty()) {
3672       const BasicBlock *BB = Worklist.pop_back_val();
3673       if (!Visited.insert(BB).second)
3674         continue;
3675       for (const BasicBlock *SuccBB : successors(BB)) {
3676         if (LivenessAA && LivenessAA->isEdgeDead(BB, SuccBB)) {
3677           LocalDeadEdges.insert({BB, SuccBB});
3678           continue;
3679         }
3680         // We checked before if we just need to reach the ToBB block.
3681         if (SuccBB == ToBB)
3682           return rememberResult(A, RQITy::Reachable::Yes, RQI, UsedExclusionSet,
3683                                 IsTemporaryRQI);
3684         if (DT && ExclusionBlocks.empty() && DT->dominates(BB, ToBB))
3685           return rememberResult(A, RQITy::Reachable::Yes, RQI, UsedExclusionSet,
3686                                 IsTemporaryRQI);
3687 
3688         if (ExclusionBlocks.count(SuccBB)) {
3689           UsedExclusionSet = true;
3690           continue;
3691         }
3692         Worklist.push_back(SuccBB);
3693       }
3694     }
3695 
3696     DeadEdges.insert(LocalDeadEdges.begin(), LocalDeadEdges.end());
3697     return rememberResult(A, RQITy::Reachable::No, RQI, UsedExclusionSet,
3698                           IsTemporaryRQI);
3699   }
3700 
3701   /// See AbstractAttribute::trackStatistics()
3702   void trackStatistics() const override {}
3703 
3704 private:
3705   // Set of assumed dead blocks we used in the last query. If any changes we
3706   // update the state.
3707   DenseSet<const BasicBlock *> DeadBlocks;
3708 
3709   // Set of assumed dead edges we used in the last query. If any changes we
3710   // update the state.
3711   DenseSet<std::pair<const BasicBlock *, const BasicBlock *>> DeadEdges;
3712 
3713   /// The dominator tree of the function to short-circuit reasoning.
3714   const DominatorTree *DT = nullptr;
3715 };
3716 } // namespace
3717 
3718 /// ------------------------ NoAlias Argument Attribute ------------------------
3719 
3720 bool AANoAlias::isImpliedByIR(Attributor &A, const IRPosition &IRP,
3721                               Attribute::AttrKind ImpliedAttributeKind,
3722                               bool IgnoreSubsumingPositions) {
3723   assert(ImpliedAttributeKind == Attribute::NoAlias &&
3724          "Unexpected attribute kind");
3725   Value *Val = &IRP.getAssociatedValue();
3726   if (IRP.getPositionKind() != IRP_CALL_SITE_ARGUMENT) {
3727     if (isa<AllocaInst>(Val))
3728       return true;
3729   } else {
3730     IgnoreSubsumingPositions = true;
3731   }
3732 
3733   if (isa<UndefValue>(Val))
3734     return true;
3735 
3736   if (isa<ConstantPointerNull>(Val) &&
3737       !NullPointerIsDefined(IRP.getAnchorScope(),
3738                             Val->getType()->getPointerAddressSpace()))
3739     return true;
3740 
3741   if (A.hasAttr(IRP, {Attribute::ByVal, Attribute::NoAlias},
3742                 IgnoreSubsumingPositions, Attribute::NoAlias))
3743     return true;
3744 
3745   return false;
3746 }
3747 
3748 namespace {
3749 struct AANoAliasImpl : AANoAlias {
3750   AANoAliasImpl(const IRPosition &IRP, Attributor &A) : AANoAlias(IRP, A) {
3751     assert(getAssociatedType()->isPointerTy() &&
3752            "Noalias is a pointer attribute");
3753   }
3754 
3755   const std::string getAsStr(Attributor *A) const override {
3756     return getAssumed() ? "noalias" : "may-alias";
3757   }
3758 };
3759 
3760 /// NoAlias attribute for a floating value.
3761 struct AANoAliasFloating final : AANoAliasImpl {
3762   AANoAliasFloating(const IRPosition &IRP, Attributor &A)
3763       : AANoAliasImpl(IRP, A) {}
3764 
3765   /// See AbstractAttribute::updateImpl(...).
3766   ChangeStatus updateImpl(Attributor &A) override {
3767     // TODO: Implement this.
3768     return indicatePessimisticFixpoint();
3769   }
3770 
3771   /// See AbstractAttribute::trackStatistics()
3772   void trackStatistics() const override {
3773     STATS_DECLTRACK_FLOATING_ATTR(noalias)
3774   }
3775 };
3776 
3777 /// NoAlias attribute for an argument.
3778 struct AANoAliasArgument final
3779     : AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl> {
3780   using Base = AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl>;
3781   AANoAliasArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
3782 
3783   /// See AbstractAttribute::update(...).
3784   ChangeStatus updateImpl(Attributor &A) override {
3785     // We have to make sure no-alias on the argument does not break
3786     // synchronization when this is a callback argument, see also [1] below.
3787     // If synchronization cannot be affected, we delegate to the base updateImpl
3788     // function, otherwise we give up for now.
3789 
3790     // If the function is no-sync, no-alias cannot break synchronization.
3791     bool IsKnownNoSycn;
3792     if (AA::hasAssumedIRAttr<Attribute::NoSync>(
3793             A, this, IRPosition::function_scope(getIRPosition()),
3794             DepClassTy::OPTIONAL, IsKnownNoSycn))
3795       return Base::updateImpl(A);
3796 
3797     // If the argument is read-only, no-alias cannot break synchronization.
3798     bool IsKnown;
3799     if (AA::isAssumedReadOnly(A, getIRPosition(), *this, IsKnown))
3800       return Base::updateImpl(A);
3801 
3802     // If the argument is never passed through callbacks, no-alias cannot break
3803     // synchronization.
3804     bool UsedAssumedInformation = false;
3805     if (A.checkForAllCallSites(
3806             [](AbstractCallSite ACS) { return !ACS.isCallbackCall(); }, *this,
3807             true, UsedAssumedInformation))
3808       return Base::updateImpl(A);
3809 
3810     // TODO: add no-alias but make sure it doesn't break synchronization by
3811     // introducing fake uses. See:
3812     // [1] Compiler Optimizations for OpenMP, J. Doerfert and H. Finkel,
3813     //     International Workshop on OpenMP 2018,
3814     //     http://compilers.cs.uni-saarland.de/people/doerfert/par_opt18.pdf
3815 
3816     return indicatePessimisticFixpoint();
3817   }
3818 
3819   /// See AbstractAttribute::trackStatistics()
3820   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noalias) }
3821 };
3822 
3823 struct AANoAliasCallSiteArgument final : AANoAliasImpl {
3824   AANoAliasCallSiteArgument(const IRPosition &IRP, Attributor &A)
3825       : AANoAliasImpl(IRP, A) {}
3826 
3827   /// Determine if the underlying value may alias with the call site argument
3828   /// \p OtherArgNo of \p ICS (= the underlying call site).
3829   bool mayAliasWithArgument(Attributor &A, AAResults *&AAR,
3830                             const AAMemoryBehavior &MemBehaviorAA,
3831                             const CallBase &CB, unsigned OtherArgNo) {
3832     // We do not need to worry about aliasing with the underlying IRP.
3833     if (this->getCalleeArgNo() == (int)OtherArgNo)
3834       return false;
3835 
3836     // If it is not a pointer or pointer vector we do not alias.
3837     const Value *ArgOp = CB.getArgOperand(OtherArgNo);
3838     if (!ArgOp->getType()->isPtrOrPtrVectorTy())
3839       return false;
3840 
3841     auto *CBArgMemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
3842         *this, IRPosition::callsite_argument(CB, OtherArgNo), DepClassTy::NONE);
3843 
3844     // If the argument is readnone, there is no read-write aliasing.
3845     if (CBArgMemBehaviorAA && CBArgMemBehaviorAA->isAssumedReadNone()) {
3846       A.recordDependence(*CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL);
3847       return false;
3848     }
3849 
3850     // If the argument is readonly and the underlying value is readonly, there
3851     // is no read-write aliasing.
3852     bool IsReadOnly = MemBehaviorAA.isAssumedReadOnly();
3853     if (CBArgMemBehaviorAA && CBArgMemBehaviorAA->isAssumedReadOnly() &&
3854         IsReadOnly) {
3855       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
3856       A.recordDependence(*CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL);
3857       return false;
3858     }
3859 
3860     // We have to utilize actual alias analysis queries so we need the object.
3861     if (!AAR)
3862       AAR = A.getInfoCache().getAnalysisResultForFunction<AAManager>(
3863           *getAnchorScope());
3864 
3865     // Try to rule it out at the call site.
3866     bool IsAliasing = !AAR || !AAR->isNoAlias(&getAssociatedValue(), ArgOp);
3867     LLVM_DEBUG(dbgs() << "[NoAliasCSArg] Check alias between "
3868                          "callsite arguments: "
3869                       << getAssociatedValue() << " " << *ArgOp << " => "
3870                       << (IsAliasing ? "" : "no-") << "alias \n");
3871 
3872     return IsAliasing;
3873   }
3874 
3875   bool isKnownNoAliasDueToNoAliasPreservation(
3876       Attributor &A, AAResults *&AAR, const AAMemoryBehavior &MemBehaviorAA) {
3877     // We can deduce "noalias" if the following conditions hold.
3878     // (i)   Associated value is assumed to be noalias in the definition.
3879     // (ii)  Associated value is assumed to be no-capture in all the uses
3880     //       possibly executed before this callsite.
3881     // (iii) There is no other pointer argument which could alias with the
3882     //       value.
3883 
3884     auto IsDereferenceableOrNull = [&](Value *O, const DataLayout &DL) {
3885       const auto *DerefAA = A.getAAFor<AADereferenceable>(
3886           *this, IRPosition::value(*O), DepClassTy::OPTIONAL);
3887       return DerefAA ? DerefAA->getAssumedDereferenceableBytes() : 0;
3888     };
3889 
3890     const IRPosition &VIRP = IRPosition::value(getAssociatedValue());
3891     const Function *ScopeFn = VIRP.getAnchorScope();
3892     // Check whether the value is captured in the scope using AANoCapture.
3893     // Look at CFG and check only uses possibly executed before this
3894     // callsite.
3895     auto UsePred = [&](const Use &U, bool &Follow) -> bool {
3896       Instruction *UserI = cast<Instruction>(U.getUser());
3897 
3898       // If UserI is the curr instruction and there is a single potential use of
3899       // the value in UserI we allow the use.
3900       // TODO: We should inspect the operands and allow those that cannot alias
3901       //       with the value.
3902       if (UserI == getCtxI() && UserI->getNumOperands() == 1)
3903         return true;
3904 
3905       if (ScopeFn) {
3906         if (auto *CB = dyn_cast<CallBase>(UserI)) {
3907           if (CB->isArgOperand(&U)) {
3908 
3909             unsigned ArgNo = CB->getArgOperandNo(&U);
3910 
3911             bool IsKnownNoCapture;
3912             if (AA::hasAssumedIRAttr<Attribute::NoCapture>(
3913                     A, this, IRPosition::callsite_argument(*CB, ArgNo),
3914                     DepClassTy::OPTIONAL, IsKnownNoCapture))
3915               return true;
3916           }
3917         }
3918 
3919         if (!AA::isPotentiallyReachable(
3920                 A, *UserI, *getCtxI(), *this, /* ExclusionSet */ nullptr,
3921                 [ScopeFn](const Function &Fn) { return &Fn != ScopeFn; }))
3922           return true;
3923       }
3924 
3925       // TODO: We should track the capturing uses in AANoCapture but the problem
3926       //       is CGSCC runs. For those we would need to "allow" AANoCapture for
3927       //       a value in the module slice.
3928       switch (DetermineUseCaptureKind(U, IsDereferenceableOrNull)) {
3929       case UseCaptureKind::NO_CAPTURE:
3930         return true;
3931       case UseCaptureKind::MAY_CAPTURE:
3932         LLVM_DEBUG(dbgs() << "[AANoAliasCSArg] Unknown user: " << *UserI
3933                           << "\n");
3934         return false;
3935       case UseCaptureKind::PASSTHROUGH:
3936         Follow = true;
3937         return true;
3938       }
3939       llvm_unreachable("unknown UseCaptureKind");
3940     };
3941 
3942     bool IsKnownNoCapture;
3943     const AANoCapture *NoCaptureAA = nullptr;
3944     bool IsAssumedNoCapture = AA::hasAssumedIRAttr<Attribute::NoCapture>(
3945         A, this, VIRP, DepClassTy::NONE, IsKnownNoCapture, false, &NoCaptureAA);
3946     if (!IsAssumedNoCapture &&
3947         (!NoCaptureAA || !NoCaptureAA->isAssumedNoCaptureMaybeReturned())) {
3948       if (!A.checkForAllUses(UsePred, *this, getAssociatedValue())) {
3949         LLVM_DEBUG(
3950             dbgs() << "[AANoAliasCSArg] " << getAssociatedValue()
3951                    << " cannot be noalias as it is potentially captured\n");
3952         return false;
3953       }
3954     }
3955     if (NoCaptureAA)
3956       A.recordDependence(*NoCaptureAA, *this, DepClassTy::OPTIONAL);
3957 
3958     // Check there is no other pointer argument which could alias with the
3959     // value passed at this call site.
3960     // TODO: AbstractCallSite
3961     const auto &CB = cast<CallBase>(getAnchorValue());
3962     for (unsigned OtherArgNo = 0; OtherArgNo < CB.arg_size(); OtherArgNo++)
3963       if (mayAliasWithArgument(A, AAR, MemBehaviorAA, CB, OtherArgNo))
3964         return false;
3965 
3966     return true;
3967   }
3968 
3969   /// See AbstractAttribute::updateImpl(...).
3970   ChangeStatus updateImpl(Attributor &A) override {
3971     // If the argument is readnone we are done as there are no accesses via the
3972     // argument.
3973     auto *MemBehaviorAA =
3974         A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(), DepClassTy::NONE);
3975     if (MemBehaviorAA && MemBehaviorAA->isAssumedReadNone()) {
3976       A.recordDependence(*MemBehaviorAA, *this, DepClassTy::OPTIONAL);
3977       return ChangeStatus::UNCHANGED;
3978     }
3979 
3980     bool IsKnownNoAlias;
3981     const IRPosition &VIRP = IRPosition::value(getAssociatedValue());
3982     if (!AA::hasAssumedIRAttr<Attribute::NoAlias>(
3983             A, this, VIRP, DepClassTy::REQUIRED, IsKnownNoAlias)) {
3984       LLVM_DEBUG(dbgs() << "[AANoAlias] " << getAssociatedValue()
3985                         << " is not no-alias at the definition\n");
3986       return indicatePessimisticFixpoint();
3987     }
3988 
3989     AAResults *AAR = nullptr;
3990     if (MemBehaviorAA &&
3991         isKnownNoAliasDueToNoAliasPreservation(A, AAR, *MemBehaviorAA)) {
3992       LLVM_DEBUG(
3993           dbgs() << "[AANoAlias] No-Alias deduced via no-alias preservation\n");
3994       return ChangeStatus::UNCHANGED;
3995     }
3996 
3997     return indicatePessimisticFixpoint();
3998   }
3999 
4000   /// See AbstractAttribute::trackStatistics()
4001   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noalias) }
4002 };
4003 
4004 /// NoAlias attribute for function return value.
4005 struct AANoAliasReturned final : AANoAliasImpl {
4006   AANoAliasReturned(const IRPosition &IRP, Attributor &A)
4007       : AANoAliasImpl(IRP, A) {}
4008 
4009   /// See AbstractAttribute::updateImpl(...).
4010   ChangeStatus updateImpl(Attributor &A) override {
4011 
4012     auto CheckReturnValue = [&](Value &RV) -> bool {
4013       if (Constant *C = dyn_cast<Constant>(&RV))
4014         if (C->isNullValue() || isa<UndefValue>(C))
4015           return true;
4016 
4017       /// For now, we can only deduce noalias if we have call sites.
4018       /// FIXME: add more support.
4019       if (!isa<CallBase>(&RV))
4020         return false;
4021 
4022       const IRPosition &RVPos = IRPosition::value(RV);
4023       bool IsKnownNoAlias;
4024       if (!AA::hasAssumedIRAttr<Attribute::NoAlias>(
4025               A, this, RVPos, DepClassTy::REQUIRED, IsKnownNoAlias))
4026         return false;
4027 
4028       bool IsKnownNoCapture;
4029       const AANoCapture *NoCaptureAA = nullptr;
4030       bool IsAssumedNoCapture = AA::hasAssumedIRAttr<Attribute::NoCapture>(
4031           A, this, RVPos, DepClassTy::REQUIRED, IsKnownNoCapture, false,
4032           &NoCaptureAA);
4033       return IsAssumedNoCapture ||
4034              (NoCaptureAA && NoCaptureAA->isAssumedNoCaptureMaybeReturned());
4035     };
4036 
4037     if (!A.checkForAllReturnedValues(CheckReturnValue, *this))
4038       return indicatePessimisticFixpoint();
4039 
4040     return ChangeStatus::UNCHANGED;
4041   }
4042 
4043   /// See AbstractAttribute::trackStatistics()
4044   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noalias) }
4045 };
4046 
4047 /// NoAlias attribute deduction for a call site return value.
4048 struct AANoAliasCallSiteReturned final
4049     : AACalleeToCallSite<AANoAlias, AANoAliasImpl> {
4050   AANoAliasCallSiteReturned(const IRPosition &IRP, Attributor &A)
4051       : AACalleeToCallSite<AANoAlias, AANoAliasImpl>(IRP, A) {}
4052 
4053   /// See AbstractAttribute::trackStatistics()
4054   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noalias); }
4055 };
4056 } // namespace
4057 
4058 /// -------------------AAIsDead Function Attribute-----------------------
4059 
4060 namespace {
4061 struct AAIsDeadValueImpl : public AAIsDead {
4062   AAIsDeadValueImpl(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {}
4063 
4064   /// See AAIsDead::isAssumedDead().
4065   bool isAssumedDead() const override { return isAssumed(IS_DEAD); }
4066 
4067   /// See AAIsDead::isKnownDead().
4068   bool isKnownDead() const override { return isKnown(IS_DEAD); }
4069 
4070   /// See AAIsDead::isAssumedDead(BasicBlock *).
4071   bool isAssumedDead(const BasicBlock *BB) const override { return false; }
4072 
4073   /// See AAIsDead::isKnownDead(BasicBlock *).
4074   bool isKnownDead(const BasicBlock *BB) const override { return false; }
4075 
4076   /// See AAIsDead::isAssumedDead(Instruction *I).
4077   bool isAssumedDead(const Instruction *I) const override {
4078     return I == getCtxI() && isAssumedDead();
4079   }
4080 
4081   /// See AAIsDead::isKnownDead(Instruction *I).
4082   bool isKnownDead(const Instruction *I) const override {
4083     return isAssumedDead(I) && isKnownDead();
4084   }
4085 
4086   /// See AbstractAttribute::getAsStr().
4087   const std::string getAsStr(Attributor *A) const override {
4088     return isAssumedDead() ? "assumed-dead" : "assumed-live";
4089   }
4090 
4091   /// Check if all uses are assumed dead.
4092   bool areAllUsesAssumedDead(Attributor &A, Value &V) {
4093     // Callers might not check the type, void has no uses.
4094     if (V.getType()->isVoidTy() || V.use_empty())
4095       return true;
4096 
4097     // If we replace a value with a constant there are no uses left afterwards.
4098     if (!isa<Constant>(V)) {
4099       if (auto *I = dyn_cast<Instruction>(&V))
4100         if (!A.isRunOn(*I->getFunction()))
4101           return false;
4102       bool UsedAssumedInformation = false;
4103       std::optional<Constant *> C =
4104           A.getAssumedConstant(V, *this, UsedAssumedInformation);
4105       if (!C || *C)
4106         return true;
4107     }
4108 
4109     auto UsePred = [&](const Use &U, bool &Follow) { return false; };
4110     // Explicitly set the dependence class to required because we want a long
4111     // chain of N dependent instructions to be considered live as soon as one is
4112     // without going through N update cycles. This is not required for
4113     // correctness.
4114     return A.checkForAllUses(UsePred, *this, V, /* CheckBBLivenessOnly */ false,
4115                              DepClassTy::REQUIRED,
4116                              /* IgnoreDroppableUses */ false);
4117   }
4118 
4119   /// Determine if \p I is assumed to be side-effect free.
4120   bool isAssumedSideEffectFree(Attributor &A, Instruction *I) {
4121     if (!I || wouldInstructionBeTriviallyDead(I))
4122       return true;
4123 
4124     auto *CB = dyn_cast<CallBase>(I);
4125     if (!CB || isa<IntrinsicInst>(CB))
4126       return false;
4127 
4128     const IRPosition &CallIRP = IRPosition::callsite_function(*CB);
4129 
4130     bool IsKnownNoUnwind;
4131     if (!AA::hasAssumedIRAttr<Attribute::NoUnwind>(
4132             A, this, CallIRP, DepClassTy::OPTIONAL, IsKnownNoUnwind))
4133       return false;
4134 
4135     bool IsKnown;
4136     return AA::isAssumedReadOnly(A, CallIRP, *this, IsKnown);
4137   }
4138 };
4139 
4140 struct AAIsDeadFloating : public AAIsDeadValueImpl {
4141   AAIsDeadFloating(const IRPosition &IRP, Attributor &A)
4142       : AAIsDeadValueImpl(IRP, A) {}
4143 
4144   /// See AbstractAttribute::initialize(...).
4145   void initialize(Attributor &A) override {
4146     AAIsDeadValueImpl::initialize(A);
4147 
4148     if (isa<UndefValue>(getAssociatedValue())) {
4149       indicatePessimisticFixpoint();
4150       return;
4151     }
4152 
4153     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
4154     if (!isAssumedSideEffectFree(A, I)) {
4155       if (!isa_and_nonnull<StoreInst>(I) && !isa_and_nonnull<FenceInst>(I))
4156         indicatePessimisticFixpoint();
4157       else
4158         removeAssumedBits(HAS_NO_EFFECT);
4159     }
4160   }
4161 
4162   bool isDeadFence(Attributor &A, FenceInst &FI) {
4163     const auto *ExecDomainAA = A.lookupAAFor<AAExecutionDomain>(
4164         IRPosition::function(*FI.getFunction()), *this, DepClassTy::NONE);
4165     if (!ExecDomainAA || !ExecDomainAA->isNoOpFence(FI))
4166       return false;
4167     A.recordDependence(*ExecDomainAA, *this, DepClassTy::OPTIONAL);
4168     return true;
4169   }
4170 
4171   bool isDeadStore(Attributor &A, StoreInst &SI,
4172                    SmallSetVector<Instruction *, 8> *AssumeOnlyInst = nullptr) {
4173     // Lang ref now states volatile store is not UB/dead, let's skip them.
4174     if (SI.isVolatile())
4175       return false;
4176 
4177     // If we are collecting assumes to be deleted we are in the manifest stage.
4178     // It's problematic to collect the potential copies again now so we use the
4179     // cached ones.
4180     bool UsedAssumedInformation = false;
4181     if (!AssumeOnlyInst) {
4182       PotentialCopies.clear();
4183       if (!AA::getPotentialCopiesOfStoredValue(A, SI, PotentialCopies, *this,
4184                                                UsedAssumedInformation)) {
4185         LLVM_DEBUG(
4186             dbgs()
4187             << "[AAIsDead] Could not determine potential copies of store!\n");
4188         return false;
4189       }
4190     }
4191     LLVM_DEBUG(dbgs() << "[AAIsDead] Store has " << PotentialCopies.size()
4192                       << " potential copies.\n");
4193 
4194     InformationCache &InfoCache = A.getInfoCache();
4195     return llvm::all_of(PotentialCopies, [&](Value *V) {
4196       if (A.isAssumedDead(IRPosition::value(*V), this, nullptr,
4197                           UsedAssumedInformation))
4198         return true;
4199       if (auto *LI = dyn_cast<LoadInst>(V)) {
4200         if (llvm::all_of(LI->uses(), [&](const Use &U) {
4201               auto &UserI = cast<Instruction>(*U.getUser());
4202               if (InfoCache.isOnlyUsedByAssume(UserI)) {
4203                 if (AssumeOnlyInst)
4204                   AssumeOnlyInst->insert(&UserI);
4205                 return true;
4206               }
4207               return A.isAssumedDead(U, this, nullptr, UsedAssumedInformation);
4208             })) {
4209           return true;
4210         }
4211       }
4212       LLVM_DEBUG(dbgs() << "[AAIsDead] Potential copy " << *V
4213                         << " is assumed live!\n");
4214       return false;
4215     });
4216   }
4217 
4218   /// See AbstractAttribute::getAsStr().
4219   const std::string getAsStr(Attributor *A) const override {
4220     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
4221     if (isa_and_nonnull<StoreInst>(I))
4222       if (isValidState())
4223         return "assumed-dead-store";
4224     if (isa_and_nonnull<FenceInst>(I))
4225       if (isValidState())
4226         return "assumed-dead-fence";
4227     return AAIsDeadValueImpl::getAsStr(A);
4228   }
4229 
4230   /// See AbstractAttribute::updateImpl(...).
4231   ChangeStatus updateImpl(Attributor &A) override {
4232     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
4233     if (auto *SI = dyn_cast_or_null<StoreInst>(I)) {
4234       if (!isDeadStore(A, *SI))
4235         return indicatePessimisticFixpoint();
4236     } else if (auto *FI = dyn_cast_or_null<FenceInst>(I)) {
4237       if (!isDeadFence(A, *FI))
4238         return indicatePessimisticFixpoint();
4239     } else {
4240       if (!isAssumedSideEffectFree(A, I))
4241         return indicatePessimisticFixpoint();
4242       if (!areAllUsesAssumedDead(A, getAssociatedValue()))
4243         return indicatePessimisticFixpoint();
4244     }
4245     return ChangeStatus::UNCHANGED;
4246   }
4247 
4248   bool isRemovableStore() const override {
4249     return isAssumed(IS_REMOVABLE) && isa<StoreInst>(&getAssociatedValue());
4250   }
4251 
4252   /// See AbstractAttribute::manifest(...).
4253   ChangeStatus manifest(Attributor &A) override {
4254     Value &V = getAssociatedValue();
4255     if (auto *I = dyn_cast<Instruction>(&V)) {
4256       // If we get here we basically know the users are all dead. We check if
4257       // isAssumedSideEffectFree returns true here again because it might not be
4258       // the case and only the users are dead but the instruction (=call) is
4259       // still needed.
4260       if (auto *SI = dyn_cast<StoreInst>(I)) {
4261         SmallSetVector<Instruction *, 8> AssumeOnlyInst;
4262         bool IsDead = isDeadStore(A, *SI, &AssumeOnlyInst);
4263         (void)IsDead;
4264         assert(IsDead && "Store was assumed to be dead!");
4265         A.deleteAfterManifest(*I);
4266         for (size_t i = 0; i < AssumeOnlyInst.size(); ++i) {
4267           Instruction *AOI = AssumeOnlyInst[i];
4268           for (auto *Usr : AOI->users())
4269             AssumeOnlyInst.insert(cast<Instruction>(Usr));
4270           A.deleteAfterManifest(*AOI);
4271         }
4272         return ChangeStatus::CHANGED;
4273       }
4274       if (auto *FI = dyn_cast<FenceInst>(I)) {
4275         assert(isDeadFence(A, *FI));
4276         A.deleteAfterManifest(*FI);
4277         return ChangeStatus::CHANGED;
4278       }
4279       if (isAssumedSideEffectFree(A, I) && !isa<InvokeInst>(I)) {
4280         A.deleteAfterManifest(*I);
4281         return ChangeStatus::CHANGED;
4282       }
4283     }
4284     return ChangeStatus::UNCHANGED;
4285   }
4286 
4287   /// See AbstractAttribute::trackStatistics()
4288   void trackStatistics() const override {
4289     STATS_DECLTRACK_FLOATING_ATTR(IsDead)
4290   }
4291 
4292 private:
4293   // The potential copies of a dead store, used for deletion during manifest.
4294   SmallSetVector<Value *, 4> PotentialCopies;
4295 };
4296 
4297 struct AAIsDeadArgument : public AAIsDeadFloating {
4298   AAIsDeadArgument(const IRPosition &IRP, Attributor &A)
4299       : AAIsDeadFloating(IRP, A) {}
4300 
4301   /// See AbstractAttribute::manifest(...).
4302   ChangeStatus manifest(Attributor &A) override {
4303     Argument &Arg = *getAssociatedArgument();
4304     if (A.isValidFunctionSignatureRewrite(Arg, /* ReplacementTypes */ {}))
4305       if (A.registerFunctionSignatureRewrite(
4306               Arg, /* ReplacementTypes */ {},
4307               Attributor::ArgumentReplacementInfo::CalleeRepairCBTy{},
4308               Attributor::ArgumentReplacementInfo::ACSRepairCBTy{})) {
4309         return ChangeStatus::CHANGED;
4310       }
4311     return ChangeStatus::UNCHANGED;
4312   }
4313 
4314   /// See AbstractAttribute::trackStatistics()
4315   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(IsDead) }
4316 };
4317 
4318 struct AAIsDeadCallSiteArgument : public AAIsDeadValueImpl {
4319   AAIsDeadCallSiteArgument(const IRPosition &IRP, Attributor &A)
4320       : AAIsDeadValueImpl(IRP, A) {}
4321 
4322   /// See AbstractAttribute::initialize(...).
4323   void initialize(Attributor &A) override {
4324     AAIsDeadValueImpl::initialize(A);
4325     if (isa<UndefValue>(getAssociatedValue()))
4326       indicatePessimisticFixpoint();
4327   }
4328 
4329   /// See AbstractAttribute::updateImpl(...).
4330   ChangeStatus updateImpl(Attributor &A) override {
4331     // TODO: Once we have call site specific value information we can provide
4332     //       call site specific liveness information and then it makes
4333     //       sense to specialize attributes for call sites arguments instead of
4334     //       redirecting requests to the callee argument.
4335     Argument *Arg = getAssociatedArgument();
4336     if (!Arg)
4337       return indicatePessimisticFixpoint();
4338     const IRPosition &ArgPos = IRPosition::argument(*Arg);
4339     auto *ArgAA = A.getAAFor<AAIsDead>(*this, ArgPos, DepClassTy::REQUIRED);
4340     if (!ArgAA)
4341       return indicatePessimisticFixpoint();
4342     return clampStateAndIndicateChange(getState(), ArgAA->getState());
4343   }
4344 
4345   /// See AbstractAttribute::manifest(...).
4346   ChangeStatus manifest(Attributor &A) override {
4347     CallBase &CB = cast<CallBase>(getAnchorValue());
4348     Use &U = CB.getArgOperandUse(getCallSiteArgNo());
4349     assert(!isa<UndefValue>(U.get()) &&
4350            "Expected undef values to be filtered out!");
4351     UndefValue &UV = *UndefValue::get(U->getType());
4352     if (A.changeUseAfterManifest(U, UV))
4353       return ChangeStatus::CHANGED;
4354     return ChangeStatus::UNCHANGED;
4355   }
4356 
4357   /// See AbstractAttribute::trackStatistics()
4358   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(IsDead) }
4359 };
4360 
4361 struct AAIsDeadCallSiteReturned : public AAIsDeadFloating {
4362   AAIsDeadCallSiteReturned(const IRPosition &IRP, Attributor &A)
4363       : AAIsDeadFloating(IRP, A) {}
4364 
4365   /// See AAIsDead::isAssumedDead().
4366   bool isAssumedDead() const override {
4367     return AAIsDeadFloating::isAssumedDead() && IsAssumedSideEffectFree;
4368   }
4369 
4370   /// See AbstractAttribute::initialize(...).
4371   void initialize(Attributor &A) override {
4372     AAIsDeadFloating::initialize(A);
4373     if (isa<UndefValue>(getAssociatedValue())) {
4374       indicatePessimisticFixpoint();
4375       return;
4376     }
4377 
4378     // We track this separately as a secondary state.
4379     IsAssumedSideEffectFree = isAssumedSideEffectFree(A, getCtxI());
4380   }
4381 
4382   /// See AbstractAttribute::updateImpl(...).
4383   ChangeStatus updateImpl(Attributor &A) override {
4384     ChangeStatus Changed = ChangeStatus::UNCHANGED;
4385     if (IsAssumedSideEffectFree && !isAssumedSideEffectFree(A, getCtxI())) {
4386       IsAssumedSideEffectFree = false;
4387       Changed = ChangeStatus::CHANGED;
4388     }
4389     if (!areAllUsesAssumedDead(A, getAssociatedValue()))
4390       return indicatePessimisticFixpoint();
4391     return Changed;
4392   }
4393 
4394   /// See AbstractAttribute::trackStatistics()
4395   void trackStatistics() const override {
4396     if (IsAssumedSideEffectFree)
4397       STATS_DECLTRACK_CSRET_ATTR(IsDead)
4398     else
4399       STATS_DECLTRACK_CSRET_ATTR(UnusedResult)
4400   }
4401 
4402   /// See AbstractAttribute::getAsStr().
4403   const std::string getAsStr(Attributor *A) const override {
4404     return isAssumedDead()
4405                ? "assumed-dead"
4406                : (getAssumed() ? "assumed-dead-users" : "assumed-live");
4407   }
4408 
4409 private:
4410   bool IsAssumedSideEffectFree = true;
4411 };
4412 
4413 struct AAIsDeadReturned : public AAIsDeadValueImpl {
4414   AAIsDeadReturned(const IRPosition &IRP, Attributor &A)
4415       : AAIsDeadValueImpl(IRP, A) {}
4416 
4417   /// See AbstractAttribute::updateImpl(...).
4418   ChangeStatus updateImpl(Attributor &A) override {
4419 
4420     bool UsedAssumedInformation = false;
4421     A.checkForAllInstructions([](Instruction &) { return true; }, *this,
4422                               {Instruction::Ret}, UsedAssumedInformation);
4423 
4424     auto PredForCallSite = [&](AbstractCallSite ACS) {
4425       if (ACS.isCallbackCall() || !ACS.getInstruction())
4426         return false;
4427       return areAllUsesAssumedDead(A, *ACS.getInstruction());
4428     };
4429 
4430     if (!A.checkForAllCallSites(PredForCallSite, *this, true,
4431                                 UsedAssumedInformation))
4432       return indicatePessimisticFixpoint();
4433 
4434     return ChangeStatus::UNCHANGED;
4435   }
4436 
4437   /// See AbstractAttribute::manifest(...).
4438   ChangeStatus manifest(Attributor &A) override {
4439     // TODO: Rewrite the signature to return void?
4440     bool AnyChange = false;
4441     UndefValue &UV = *UndefValue::get(getAssociatedFunction()->getReturnType());
4442     auto RetInstPred = [&](Instruction &I) {
4443       ReturnInst &RI = cast<ReturnInst>(I);
4444       if (!isa<UndefValue>(RI.getReturnValue()))
4445         AnyChange |= A.changeUseAfterManifest(RI.getOperandUse(0), UV);
4446       return true;
4447     };
4448     bool UsedAssumedInformation = false;
4449     A.checkForAllInstructions(RetInstPred, *this, {Instruction::Ret},
4450                               UsedAssumedInformation);
4451     return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
4452   }
4453 
4454   /// See AbstractAttribute::trackStatistics()
4455   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(IsDead) }
4456 };
4457 
4458 struct AAIsDeadFunction : public AAIsDead {
4459   AAIsDeadFunction(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {}
4460 
4461   /// See AbstractAttribute::initialize(...).
4462   void initialize(Attributor &A) override {
4463     Function *F = getAnchorScope();
4464     assert(F && "Did expect an anchor function");
4465     if (!isAssumedDeadInternalFunction(A)) {
4466       ToBeExploredFrom.insert(&F->getEntryBlock().front());
4467       assumeLive(A, F->getEntryBlock());
4468     }
4469   }
4470 
4471   bool isAssumedDeadInternalFunction(Attributor &A) {
4472     if (!getAnchorScope()->hasLocalLinkage())
4473       return false;
4474     bool UsedAssumedInformation = false;
4475     return A.checkForAllCallSites([](AbstractCallSite) { return false; }, *this,
4476                                   true, UsedAssumedInformation);
4477   }
4478 
4479   /// See AbstractAttribute::getAsStr().
4480   const std::string getAsStr(Attributor *A) const override {
4481     return "Live[#BB " + std::to_string(AssumedLiveBlocks.size()) + "/" +
4482            std::to_string(getAnchorScope()->size()) + "][#TBEP " +
4483            std::to_string(ToBeExploredFrom.size()) + "][#KDE " +
4484            std::to_string(KnownDeadEnds.size()) + "]";
4485   }
4486 
4487   /// See AbstractAttribute::manifest(...).
4488   ChangeStatus manifest(Attributor &A) override {
4489     assert(getState().isValidState() &&
4490            "Attempted to manifest an invalid state!");
4491 
4492     ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
4493     Function &F = *getAnchorScope();
4494 
4495     if (AssumedLiveBlocks.empty()) {
4496       A.deleteAfterManifest(F);
4497       return ChangeStatus::CHANGED;
4498     }
4499 
4500     // Flag to determine if we can change an invoke to a call assuming the
4501     // callee is nounwind. This is not possible if the personality of the
4502     // function allows to catch asynchronous exceptions.
4503     bool Invoke2CallAllowed = !mayCatchAsynchronousExceptions(F);
4504 
4505     KnownDeadEnds.set_union(ToBeExploredFrom);
4506     for (const Instruction *DeadEndI : KnownDeadEnds) {
4507       auto *CB = dyn_cast<CallBase>(DeadEndI);
4508       if (!CB)
4509         continue;
4510       bool IsKnownNoReturn;
4511       bool MayReturn = !AA::hasAssumedIRAttr<Attribute::NoReturn>(
4512           A, this, IRPosition::callsite_function(*CB), DepClassTy::OPTIONAL,
4513           IsKnownNoReturn);
4514       if (MayReturn && (!Invoke2CallAllowed || !isa<InvokeInst>(CB)))
4515         continue;
4516 
4517       if (auto *II = dyn_cast<InvokeInst>(DeadEndI))
4518         A.registerInvokeWithDeadSuccessor(const_cast<InvokeInst &>(*II));
4519       else
4520         A.changeToUnreachableAfterManifest(
4521             const_cast<Instruction *>(DeadEndI->getNextNode()));
4522       HasChanged = ChangeStatus::CHANGED;
4523     }
4524 
4525     STATS_DECL(AAIsDead, BasicBlock, "Number of dead basic blocks deleted.");
4526     for (BasicBlock &BB : F)
4527       if (!AssumedLiveBlocks.count(&BB)) {
4528         A.deleteAfterManifest(BB);
4529         ++BUILD_STAT_NAME(AAIsDead, BasicBlock);
4530         HasChanged = ChangeStatus::CHANGED;
4531       }
4532 
4533     return HasChanged;
4534   }
4535 
4536   /// See AbstractAttribute::updateImpl(...).
4537   ChangeStatus updateImpl(Attributor &A) override;
4538 
4539   bool isEdgeDead(const BasicBlock *From, const BasicBlock *To) const override {
4540     assert(From->getParent() == getAnchorScope() &&
4541            To->getParent() == getAnchorScope() &&
4542            "Used AAIsDead of the wrong function");
4543     return isValidState() && !AssumedLiveEdges.count(std::make_pair(From, To));
4544   }
4545 
4546   /// See AbstractAttribute::trackStatistics()
4547   void trackStatistics() const override {}
4548 
4549   /// Returns true if the function is assumed dead.
4550   bool isAssumedDead() const override { return false; }
4551 
4552   /// See AAIsDead::isKnownDead().
4553   bool isKnownDead() const override { return false; }
4554 
4555   /// See AAIsDead::isAssumedDead(BasicBlock *).
4556   bool isAssumedDead(const BasicBlock *BB) const override {
4557     assert(BB->getParent() == getAnchorScope() &&
4558            "BB must be in the same anchor scope function.");
4559 
4560     if (!getAssumed())
4561       return false;
4562     return !AssumedLiveBlocks.count(BB);
4563   }
4564 
4565   /// See AAIsDead::isKnownDead(BasicBlock *).
4566   bool isKnownDead(const BasicBlock *BB) const override {
4567     return getKnown() && isAssumedDead(BB);
4568   }
4569 
4570   /// See AAIsDead::isAssumed(Instruction *I).
4571   bool isAssumedDead(const Instruction *I) const override {
4572     assert(I->getParent()->getParent() == getAnchorScope() &&
4573            "Instruction must be in the same anchor scope function.");
4574 
4575     if (!getAssumed())
4576       return false;
4577 
4578     // If it is not in AssumedLiveBlocks then it for sure dead.
4579     // Otherwise, it can still be after noreturn call in a live block.
4580     if (!AssumedLiveBlocks.count(I->getParent()))
4581       return true;
4582 
4583     // If it is not after a liveness barrier it is live.
4584     const Instruction *PrevI = I->getPrevNode();
4585     while (PrevI) {
4586       if (KnownDeadEnds.count(PrevI) || ToBeExploredFrom.count(PrevI))
4587         return true;
4588       PrevI = PrevI->getPrevNode();
4589     }
4590     return false;
4591   }
4592 
4593   /// See AAIsDead::isKnownDead(Instruction *I).
4594   bool isKnownDead(const Instruction *I) const override {
4595     return getKnown() && isAssumedDead(I);
4596   }
4597 
4598   /// Assume \p BB is (partially) live now and indicate to the Attributor \p A
4599   /// that internal function called from \p BB should now be looked at.
4600   bool assumeLive(Attributor &A, const BasicBlock &BB) {
4601     if (!AssumedLiveBlocks.insert(&BB).second)
4602       return false;
4603 
4604     // We assume that all of BB is (probably) live now and if there are calls to
4605     // internal functions we will assume that those are now live as well. This
4606     // is a performance optimization for blocks with calls to a lot of internal
4607     // functions. It can however cause dead functions to be treated as live.
4608     for (const Instruction &I : BB)
4609       if (const auto *CB = dyn_cast<CallBase>(&I))
4610         if (auto *F = dyn_cast_if_present<Function>(CB->getCalledOperand()))
4611           if (F->hasLocalLinkage())
4612             A.markLiveInternalFunction(*F);
4613     return true;
4614   }
4615 
4616   /// Collection of instructions that need to be explored again, e.g., we
4617   /// did assume they do not transfer control to (one of their) successors.
4618   SmallSetVector<const Instruction *, 8> ToBeExploredFrom;
4619 
4620   /// Collection of instructions that are known to not transfer control.
4621   SmallSetVector<const Instruction *, 8> KnownDeadEnds;
4622 
4623   /// Collection of all assumed live edges
4624   DenseSet<std::pair<const BasicBlock *, const BasicBlock *>> AssumedLiveEdges;
4625 
4626   /// Collection of all assumed live BasicBlocks.
4627   DenseSet<const BasicBlock *> AssumedLiveBlocks;
4628 };
4629 
4630 static bool
4631 identifyAliveSuccessors(Attributor &A, const CallBase &CB,
4632                         AbstractAttribute &AA,
4633                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
4634   const IRPosition &IPos = IRPosition::callsite_function(CB);
4635 
4636   bool IsKnownNoReturn;
4637   if (AA::hasAssumedIRAttr<Attribute::NoReturn>(
4638           A, &AA, IPos, DepClassTy::OPTIONAL, IsKnownNoReturn))
4639     return !IsKnownNoReturn;
4640   if (CB.isTerminator())
4641     AliveSuccessors.push_back(&CB.getSuccessor(0)->front());
4642   else
4643     AliveSuccessors.push_back(CB.getNextNode());
4644   return false;
4645 }
4646 
4647 static bool
4648 identifyAliveSuccessors(Attributor &A, const InvokeInst &II,
4649                         AbstractAttribute &AA,
4650                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
4651   bool UsedAssumedInformation =
4652       identifyAliveSuccessors(A, cast<CallBase>(II), AA, AliveSuccessors);
4653 
4654   // First, determine if we can change an invoke to a call assuming the
4655   // callee is nounwind. This is not possible if the personality of the
4656   // function allows to catch asynchronous exceptions.
4657   if (AAIsDeadFunction::mayCatchAsynchronousExceptions(*II.getFunction())) {
4658     AliveSuccessors.push_back(&II.getUnwindDest()->front());
4659   } else {
4660     const IRPosition &IPos = IRPosition::callsite_function(II);
4661 
4662     bool IsKnownNoUnwind;
4663     if (AA::hasAssumedIRAttr<Attribute::NoUnwind>(
4664             A, &AA, IPos, DepClassTy::OPTIONAL, IsKnownNoUnwind)) {
4665       UsedAssumedInformation |= !IsKnownNoUnwind;
4666     } else {
4667       AliveSuccessors.push_back(&II.getUnwindDest()->front());
4668     }
4669   }
4670   return UsedAssumedInformation;
4671 }
4672 
4673 static bool
4674 identifyAliveSuccessors(Attributor &A, const BranchInst &BI,
4675                         AbstractAttribute &AA,
4676                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
4677   bool UsedAssumedInformation = false;
4678   if (BI.getNumSuccessors() == 1) {
4679     AliveSuccessors.push_back(&BI.getSuccessor(0)->front());
4680   } else {
4681     std::optional<Constant *> C =
4682         A.getAssumedConstant(*BI.getCondition(), AA, UsedAssumedInformation);
4683     if (!C || isa_and_nonnull<UndefValue>(*C)) {
4684       // No value yet, assume both edges are dead.
4685     } else if (isa_and_nonnull<ConstantInt>(*C)) {
4686       const BasicBlock *SuccBB =
4687           BI.getSuccessor(1 - cast<ConstantInt>(*C)->getValue().getZExtValue());
4688       AliveSuccessors.push_back(&SuccBB->front());
4689     } else {
4690       AliveSuccessors.push_back(&BI.getSuccessor(0)->front());
4691       AliveSuccessors.push_back(&BI.getSuccessor(1)->front());
4692       UsedAssumedInformation = false;
4693     }
4694   }
4695   return UsedAssumedInformation;
4696 }
4697 
4698 static bool
4699 identifyAliveSuccessors(Attributor &A, const SwitchInst &SI,
4700                         AbstractAttribute &AA,
4701                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
4702   bool UsedAssumedInformation = false;
4703   SmallVector<AA::ValueAndContext> Values;
4704   if (!A.getAssumedSimplifiedValues(IRPosition::value(*SI.getCondition()), &AA,
4705                                     Values, AA::AnyScope,
4706                                     UsedAssumedInformation)) {
4707     // Something went wrong, assume all successors are live.
4708     for (const BasicBlock *SuccBB : successors(SI.getParent()))
4709       AliveSuccessors.push_back(&SuccBB->front());
4710     return false;
4711   }
4712 
4713   if (Values.empty() ||
4714       (Values.size() == 1 &&
4715        isa_and_nonnull<UndefValue>(Values.front().getValue()))) {
4716     // No valid value yet, assume all edges are dead.
4717     return UsedAssumedInformation;
4718   }
4719 
4720   Type &Ty = *SI.getCondition()->getType();
4721   SmallPtrSet<ConstantInt *, 8> Constants;
4722   auto CheckForConstantInt = [&](Value *V) {
4723     if (auto *CI = dyn_cast_if_present<ConstantInt>(AA::getWithType(*V, Ty))) {
4724       Constants.insert(CI);
4725       return true;
4726     }
4727     return false;
4728   };
4729 
4730   if (!all_of(Values, [&](AA::ValueAndContext &VAC) {
4731         return CheckForConstantInt(VAC.getValue());
4732       })) {
4733     for (const BasicBlock *SuccBB : successors(SI.getParent()))
4734       AliveSuccessors.push_back(&SuccBB->front());
4735     return UsedAssumedInformation;
4736   }
4737 
4738   unsigned MatchedCases = 0;
4739   for (const auto &CaseIt : SI.cases()) {
4740     if (Constants.count(CaseIt.getCaseValue())) {
4741       ++MatchedCases;
4742       AliveSuccessors.push_back(&CaseIt.getCaseSuccessor()->front());
4743     }
4744   }
4745 
4746   // If all potential values have been matched, we will not visit the default
4747   // case.
4748   if (MatchedCases < Constants.size())
4749     AliveSuccessors.push_back(&SI.getDefaultDest()->front());
4750   return UsedAssumedInformation;
4751 }
4752 
4753 ChangeStatus AAIsDeadFunction::updateImpl(Attributor &A) {
4754   ChangeStatus Change = ChangeStatus::UNCHANGED;
4755 
4756   if (AssumedLiveBlocks.empty()) {
4757     if (isAssumedDeadInternalFunction(A))
4758       return ChangeStatus::UNCHANGED;
4759 
4760     Function *F = getAnchorScope();
4761     ToBeExploredFrom.insert(&F->getEntryBlock().front());
4762     assumeLive(A, F->getEntryBlock());
4763     Change = ChangeStatus::CHANGED;
4764   }
4765 
4766   LLVM_DEBUG(dbgs() << "[AAIsDead] Live [" << AssumedLiveBlocks.size() << "/"
4767                     << getAnchorScope()->size() << "] BBs and "
4768                     << ToBeExploredFrom.size() << " exploration points and "
4769                     << KnownDeadEnds.size() << " known dead ends\n");
4770 
4771   // Copy and clear the list of instructions we need to explore from. It is
4772   // refilled with instructions the next update has to look at.
4773   SmallVector<const Instruction *, 8> Worklist(ToBeExploredFrom.begin(),
4774                                                ToBeExploredFrom.end());
4775   decltype(ToBeExploredFrom) NewToBeExploredFrom;
4776 
4777   SmallVector<const Instruction *, 8> AliveSuccessors;
4778   while (!Worklist.empty()) {
4779     const Instruction *I = Worklist.pop_back_val();
4780     LLVM_DEBUG(dbgs() << "[AAIsDead] Exploration inst: " << *I << "\n");
4781 
4782     // Fast forward for uninteresting instructions. We could look for UB here
4783     // though.
4784     while (!I->isTerminator() && !isa<CallBase>(I))
4785       I = I->getNextNode();
4786 
4787     AliveSuccessors.clear();
4788 
4789     bool UsedAssumedInformation = false;
4790     switch (I->getOpcode()) {
4791     // TODO: look for (assumed) UB to backwards propagate "deadness".
4792     default:
4793       assert(I->isTerminator() &&
4794              "Expected non-terminators to be handled already!");
4795       for (const BasicBlock *SuccBB : successors(I->getParent()))
4796         AliveSuccessors.push_back(&SuccBB->front());
4797       break;
4798     case Instruction::Call:
4799       UsedAssumedInformation = identifyAliveSuccessors(A, cast<CallInst>(*I),
4800                                                        *this, AliveSuccessors);
4801       break;
4802     case Instruction::Invoke:
4803       UsedAssumedInformation = identifyAliveSuccessors(A, cast<InvokeInst>(*I),
4804                                                        *this, AliveSuccessors);
4805       break;
4806     case Instruction::Br:
4807       UsedAssumedInformation = identifyAliveSuccessors(A, cast<BranchInst>(*I),
4808                                                        *this, AliveSuccessors);
4809       break;
4810     case Instruction::Switch:
4811       UsedAssumedInformation = identifyAliveSuccessors(A, cast<SwitchInst>(*I),
4812                                                        *this, AliveSuccessors);
4813       break;
4814     }
4815 
4816     if (UsedAssumedInformation) {
4817       NewToBeExploredFrom.insert(I);
4818     } else if (AliveSuccessors.empty() ||
4819                (I->isTerminator() &&
4820                 AliveSuccessors.size() < I->getNumSuccessors())) {
4821       if (KnownDeadEnds.insert(I))
4822         Change = ChangeStatus::CHANGED;
4823     }
4824 
4825     LLVM_DEBUG(dbgs() << "[AAIsDead] #AliveSuccessors: "
4826                       << AliveSuccessors.size() << " UsedAssumedInformation: "
4827                       << UsedAssumedInformation << "\n");
4828 
4829     for (const Instruction *AliveSuccessor : AliveSuccessors) {
4830       if (!I->isTerminator()) {
4831         assert(AliveSuccessors.size() == 1 &&
4832                "Non-terminator expected to have a single successor!");
4833         Worklist.push_back(AliveSuccessor);
4834       } else {
4835         // record the assumed live edge
4836         auto Edge = std::make_pair(I->getParent(), AliveSuccessor->getParent());
4837         if (AssumedLiveEdges.insert(Edge).second)
4838           Change = ChangeStatus::CHANGED;
4839         if (assumeLive(A, *AliveSuccessor->getParent()))
4840           Worklist.push_back(AliveSuccessor);
4841       }
4842     }
4843   }
4844 
4845   // Check if the content of ToBeExploredFrom changed, ignore the order.
4846   if (NewToBeExploredFrom.size() != ToBeExploredFrom.size() ||
4847       llvm::any_of(NewToBeExploredFrom, [&](const Instruction *I) {
4848         return !ToBeExploredFrom.count(I);
4849       })) {
4850     Change = ChangeStatus::CHANGED;
4851     ToBeExploredFrom = std::move(NewToBeExploredFrom);
4852   }
4853 
4854   // If we know everything is live there is no need to query for liveness.
4855   // Instead, indicating a pessimistic fixpoint will cause the state to be
4856   // "invalid" and all queries to be answered conservatively without lookups.
4857   // To be in this state we have to (1) finished the exploration and (3) not
4858   // discovered any non-trivial dead end and (2) not ruled unreachable code
4859   // dead.
4860   if (ToBeExploredFrom.empty() &&
4861       getAnchorScope()->size() == AssumedLiveBlocks.size() &&
4862       llvm::all_of(KnownDeadEnds, [](const Instruction *DeadEndI) {
4863         return DeadEndI->isTerminator() && DeadEndI->getNumSuccessors() == 0;
4864       }))
4865     return indicatePessimisticFixpoint();
4866   return Change;
4867 }
4868 
4869 /// Liveness information for a call sites.
4870 struct AAIsDeadCallSite final : AAIsDeadFunction {
4871   AAIsDeadCallSite(const IRPosition &IRP, Attributor &A)
4872       : AAIsDeadFunction(IRP, A) {}
4873 
4874   /// See AbstractAttribute::initialize(...).
4875   void initialize(Attributor &A) override {
4876     // TODO: Once we have call site specific value information we can provide
4877     //       call site specific liveness information and then it makes
4878     //       sense to specialize attributes for call sites instead of
4879     //       redirecting requests to the callee.
4880     llvm_unreachable("Abstract attributes for liveness are not "
4881                      "supported for call sites yet!");
4882   }
4883 
4884   /// See AbstractAttribute::updateImpl(...).
4885   ChangeStatus updateImpl(Attributor &A) override {
4886     return indicatePessimisticFixpoint();
4887   }
4888 
4889   /// See AbstractAttribute::trackStatistics()
4890   void trackStatistics() const override {}
4891 };
4892 } // namespace
4893 
4894 /// -------------------- Dereferenceable Argument Attribute --------------------
4895 
4896 namespace {
4897 struct AADereferenceableImpl : AADereferenceable {
4898   AADereferenceableImpl(const IRPosition &IRP, Attributor &A)
4899       : AADereferenceable(IRP, A) {}
4900   using StateType = DerefState;
4901 
4902   /// See AbstractAttribute::initialize(...).
4903   void initialize(Attributor &A) override {
4904     Value &V = *getAssociatedValue().stripPointerCasts();
4905     SmallVector<Attribute, 4> Attrs;
4906     A.getAttrs(getIRPosition(),
4907                {Attribute::Dereferenceable, Attribute::DereferenceableOrNull},
4908                Attrs, /* IgnoreSubsumingPositions */ false);
4909     for (const Attribute &Attr : Attrs)
4910       takeKnownDerefBytesMaximum(Attr.getValueAsInt());
4911 
4912     // Ensure we initialize the non-null AA (if necessary).
4913     bool IsKnownNonNull;
4914     AA::hasAssumedIRAttr<Attribute::NonNull>(
4915         A, this, getIRPosition(), DepClassTy::OPTIONAL, IsKnownNonNull);
4916 
4917     bool CanBeNull, CanBeFreed;
4918     takeKnownDerefBytesMaximum(V.getPointerDereferenceableBytes(
4919         A.getDataLayout(), CanBeNull, CanBeFreed));
4920 
4921     if (Instruction *CtxI = getCtxI())
4922       followUsesInMBEC(*this, A, getState(), *CtxI);
4923   }
4924 
4925   /// See AbstractAttribute::getState()
4926   /// {
4927   StateType &getState() override { return *this; }
4928   const StateType &getState() const override { return *this; }
4929   /// }
4930 
4931   /// Helper function for collecting accessed bytes in must-be-executed-context
4932   void addAccessedBytesForUse(Attributor &A, const Use *U, const Instruction *I,
4933                               DerefState &State) {
4934     const Value *UseV = U->get();
4935     if (!UseV->getType()->isPointerTy())
4936       return;
4937 
4938     std::optional<MemoryLocation> Loc = MemoryLocation::getOrNone(I);
4939     if (!Loc || Loc->Ptr != UseV || !Loc->Size.isPrecise() || I->isVolatile())
4940       return;
4941 
4942     int64_t Offset;
4943     const Value *Base = GetPointerBaseWithConstantOffset(
4944         Loc->Ptr, Offset, A.getDataLayout(), /*AllowNonInbounds*/ true);
4945     if (Base && Base == &getAssociatedValue())
4946       State.addAccessedBytes(Offset, Loc->Size.getValue());
4947   }
4948 
4949   /// See followUsesInMBEC
4950   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
4951                        AADereferenceable::StateType &State) {
4952     bool IsNonNull = false;
4953     bool TrackUse = false;
4954     int64_t DerefBytes = getKnownNonNullAndDerefBytesForUse(
4955         A, *this, getAssociatedValue(), U, I, IsNonNull, TrackUse);
4956     LLVM_DEBUG(dbgs() << "[AADereferenceable] Deref bytes: " << DerefBytes
4957                       << " for instruction " << *I << "\n");
4958 
4959     addAccessedBytesForUse(A, U, I, State);
4960     State.takeKnownDerefBytesMaximum(DerefBytes);
4961     return TrackUse;
4962   }
4963 
4964   /// See AbstractAttribute::manifest(...).
4965   ChangeStatus manifest(Attributor &A) override {
4966     ChangeStatus Change = AADereferenceable::manifest(A);
4967     bool IsKnownNonNull;
4968     bool IsAssumedNonNull = AA::hasAssumedIRAttr<Attribute::NonNull>(
4969         A, this, getIRPosition(), DepClassTy::NONE, IsKnownNonNull);
4970     if (IsAssumedNonNull &&
4971         A.hasAttr(getIRPosition(), Attribute::DereferenceableOrNull)) {
4972       A.removeAttrs(getIRPosition(), {Attribute::DereferenceableOrNull});
4973       return ChangeStatus::CHANGED;
4974     }
4975     return Change;
4976   }
4977 
4978   void getDeducedAttributes(Attributor &A, LLVMContext &Ctx,
4979                             SmallVectorImpl<Attribute> &Attrs) const override {
4980     // TODO: Add *_globally support
4981     bool IsKnownNonNull;
4982     bool IsAssumedNonNull = AA::hasAssumedIRAttr<Attribute::NonNull>(
4983         A, this, getIRPosition(), DepClassTy::NONE, IsKnownNonNull);
4984     if (IsAssumedNonNull)
4985       Attrs.emplace_back(Attribute::getWithDereferenceableBytes(
4986           Ctx, getAssumedDereferenceableBytes()));
4987     else
4988       Attrs.emplace_back(Attribute::getWithDereferenceableOrNullBytes(
4989           Ctx, getAssumedDereferenceableBytes()));
4990   }
4991 
4992   /// See AbstractAttribute::getAsStr().
4993   const std::string getAsStr(Attributor *A) const override {
4994     if (!getAssumedDereferenceableBytes())
4995       return "unknown-dereferenceable";
4996     bool IsKnownNonNull;
4997     bool IsAssumedNonNull = false;
4998     if (A)
4999       IsAssumedNonNull = AA::hasAssumedIRAttr<Attribute::NonNull>(
5000           *A, this, getIRPosition(), DepClassTy::NONE, IsKnownNonNull);
5001     return std::string("dereferenceable") +
5002            (IsAssumedNonNull ? "" : "_or_null") +
5003            (isAssumedGlobal() ? "_globally" : "") + "<" +
5004            std::to_string(getKnownDereferenceableBytes()) + "-" +
5005            std::to_string(getAssumedDereferenceableBytes()) + ">" +
5006            (!A ? " [non-null is unknown]" : "");
5007   }
5008 };
5009 
5010 /// Dereferenceable attribute for a floating value.
5011 struct AADereferenceableFloating : AADereferenceableImpl {
5012   AADereferenceableFloating(const IRPosition &IRP, Attributor &A)
5013       : AADereferenceableImpl(IRP, A) {}
5014 
5015   /// See AbstractAttribute::updateImpl(...).
5016   ChangeStatus updateImpl(Attributor &A) override {
5017     bool Stripped;
5018     bool UsedAssumedInformation = false;
5019     SmallVector<AA::ValueAndContext> Values;
5020     if (!A.getAssumedSimplifiedValues(getIRPosition(), *this, Values,
5021                                       AA::AnyScope, UsedAssumedInformation)) {
5022       Values.push_back({getAssociatedValue(), getCtxI()});
5023       Stripped = false;
5024     } else {
5025       Stripped = Values.size() != 1 ||
5026                  Values.front().getValue() != &getAssociatedValue();
5027     }
5028 
5029     const DataLayout &DL = A.getDataLayout();
5030     DerefState T;
5031 
5032     auto VisitValueCB = [&](const Value &V) -> bool {
5033       unsigned IdxWidth =
5034           DL.getIndexSizeInBits(V.getType()->getPointerAddressSpace());
5035       APInt Offset(IdxWidth, 0);
5036       const Value *Base = stripAndAccumulateOffsets(
5037           A, *this, &V, DL, Offset, /* GetMinOffset */ false,
5038           /* AllowNonInbounds */ true);
5039 
5040       const auto *AA = A.getAAFor<AADereferenceable>(
5041           *this, IRPosition::value(*Base), DepClassTy::REQUIRED);
5042       int64_t DerefBytes = 0;
5043       if (!AA || (!Stripped && this == AA)) {
5044         // Use IR information if we did not strip anything.
5045         // TODO: track globally.
5046         bool CanBeNull, CanBeFreed;
5047         DerefBytes =
5048             Base->getPointerDereferenceableBytes(DL, CanBeNull, CanBeFreed);
5049         T.GlobalState.indicatePessimisticFixpoint();
5050       } else {
5051         const DerefState &DS = AA->getState();
5052         DerefBytes = DS.DerefBytesState.getAssumed();
5053         T.GlobalState &= DS.GlobalState;
5054       }
5055 
5056       // For now we do not try to "increase" dereferenceability due to negative
5057       // indices as we first have to come up with code to deal with loops and
5058       // for overflows of the dereferenceable bytes.
5059       int64_t OffsetSExt = Offset.getSExtValue();
5060       if (OffsetSExt < 0)
5061         OffsetSExt = 0;
5062 
5063       T.takeAssumedDerefBytesMinimum(
5064           std::max(int64_t(0), DerefBytes - OffsetSExt));
5065 
5066       if (this == AA) {
5067         if (!Stripped) {
5068           // If nothing was stripped IR information is all we got.
5069           T.takeKnownDerefBytesMaximum(
5070               std::max(int64_t(0), DerefBytes - OffsetSExt));
5071           T.indicatePessimisticFixpoint();
5072         } else if (OffsetSExt > 0) {
5073           // If something was stripped but there is circular reasoning we look
5074           // for the offset. If it is positive we basically decrease the
5075           // dereferenceable bytes in a circular loop now, which will simply
5076           // drive them down to the known value in a very slow way which we
5077           // can accelerate.
5078           T.indicatePessimisticFixpoint();
5079         }
5080       }
5081 
5082       return T.isValidState();
5083     };
5084 
5085     for (const auto &VAC : Values)
5086       if (!VisitValueCB(*VAC.getValue()))
5087         return indicatePessimisticFixpoint();
5088 
5089     return clampStateAndIndicateChange(getState(), T);
5090   }
5091 
5092   /// See AbstractAttribute::trackStatistics()
5093   void trackStatistics() const override {
5094     STATS_DECLTRACK_FLOATING_ATTR(dereferenceable)
5095   }
5096 };
5097 
5098 /// Dereferenceable attribute for a return value.
5099 struct AADereferenceableReturned final
5100     : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl> {
5101   using Base =
5102       AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl>;
5103   AADereferenceableReturned(const IRPosition &IRP, Attributor &A)
5104       : Base(IRP, A) {}
5105 
5106   /// See AbstractAttribute::trackStatistics()
5107   void trackStatistics() const override {
5108     STATS_DECLTRACK_FNRET_ATTR(dereferenceable)
5109   }
5110 };
5111 
5112 /// Dereferenceable attribute for an argument
5113 struct AADereferenceableArgument final
5114     : AAArgumentFromCallSiteArguments<AADereferenceable,
5115                                       AADereferenceableImpl> {
5116   using Base =
5117       AAArgumentFromCallSiteArguments<AADereferenceable, AADereferenceableImpl>;
5118   AADereferenceableArgument(const IRPosition &IRP, Attributor &A)
5119       : Base(IRP, A) {}
5120 
5121   /// See AbstractAttribute::trackStatistics()
5122   void trackStatistics() const override {
5123     STATS_DECLTRACK_ARG_ATTR(dereferenceable)
5124   }
5125 };
5126 
5127 /// Dereferenceable attribute for a call site argument.
5128 struct AADereferenceableCallSiteArgument final : AADereferenceableFloating {
5129   AADereferenceableCallSiteArgument(const IRPosition &IRP, Attributor &A)
5130       : AADereferenceableFloating(IRP, A) {}
5131 
5132   /// See AbstractAttribute::trackStatistics()
5133   void trackStatistics() const override {
5134     STATS_DECLTRACK_CSARG_ATTR(dereferenceable)
5135   }
5136 };
5137 
5138 /// Dereferenceable attribute deduction for a call site return value.
5139 struct AADereferenceableCallSiteReturned final
5140     : AACalleeToCallSite<AADereferenceable, AADereferenceableImpl> {
5141   using Base = AACalleeToCallSite<AADereferenceable, AADereferenceableImpl>;
5142   AADereferenceableCallSiteReturned(const IRPosition &IRP, Attributor &A)
5143       : Base(IRP, A) {}
5144 
5145   /// See AbstractAttribute::trackStatistics()
5146   void trackStatistics() const override {
5147     STATS_DECLTRACK_CS_ATTR(dereferenceable);
5148   }
5149 };
5150 } // namespace
5151 
5152 // ------------------------ Align Argument Attribute ------------------------
5153 
5154 namespace {
5155 static unsigned getKnownAlignForUse(Attributor &A, AAAlign &QueryingAA,
5156                                     Value &AssociatedValue, const Use *U,
5157                                     const Instruction *I, bool &TrackUse) {
5158   // We need to follow common pointer manipulation uses to the accesses they
5159   // feed into.
5160   if (isa<CastInst>(I)) {
5161     // Follow all but ptr2int casts.
5162     TrackUse = !isa<PtrToIntInst>(I);
5163     return 0;
5164   }
5165   if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) {
5166     if (GEP->hasAllConstantIndices())
5167       TrackUse = true;
5168     return 0;
5169   }
5170 
5171   MaybeAlign MA;
5172   if (const auto *CB = dyn_cast<CallBase>(I)) {
5173     if (CB->isBundleOperand(U) || CB->isCallee(U))
5174       return 0;
5175 
5176     unsigned ArgNo = CB->getArgOperandNo(U);
5177     IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo);
5178     // As long as we only use known information there is no need to track
5179     // dependences here.
5180     auto *AlignAA = A.getAAFor<AAAlign>(QueryingAA, IRP, DepClassTy::NONE);
5181     if (AlignAA)
5182       MA = MaybeAlign(AlignAA->getKnownAlign());
5183   }
5184 
5185   const DataLayout &DL = A.getDataLayout();
5186   const Value *UseV = U->get();
5187   if (auto *SI = dyn_cast<StoreInst>(I)) {
5188     if (SI->getPointerOperand() == UseV)
5189       MA = SI->getAlign();
5190   } else if (auto *LI = dyn_cast<LoadInst>(I)) {
5191     if (LI->getPointerOperand() == UseV)
5192       MA = LI->getAlign();
5193   }
5194 
5195   if (!MA || *MA <= QueryingAA.getKnownAlign())
5196     return 0;
5197 
5198   unsigned Alignment = MA->value();
5199   int64_t Offset;
5200 
5201   if (const Value *Base = GetPointerBaseWithConstantOffset(UseV, Offset, DL)) {
5202     if (Base == &AssociatedValue) {
5203       // BasePointerAddr + Offset = Alignment * Q for some integer Q.
5204       // So we can say that the maximum power of two which is a divisor of
5205       // gcd(Offset, Alignment) is an alignment.
5206 
5207       uint32_t gcd = std::gcd(uint32_t(abs((int32_t)Offset)), Alignment);
5208       Alignment = llvm::bit_floor(gcd);
5209     }
5210   }
5211 
5212   return Alignment;
5213 }
5214 
5215 struct AAAlignImpl : AAAlign {
5216   AAAlignImpl(const IRPosition &IRP, Attributor &A) : AAAlign(IRP, A) {}
5217 
5218   /// See AbstractAttribute::initialize(...).
5219   void initialize(Attributor &A) override {
5220     SmallVector<Attribute, 4> Attrs;
5221     A.getAttrs(getIRPosition(), {Attribute::Alignment}, Attrs);
5222     for (const Attribute &Attr : Attrs)
5223       takeKnownMaximum(Attr.getValueAsInt());
5224 
5225     Value &V = *getAssociatedValue().stripPointerCasts();
5226     takeKnownMaximum(V.getPointerAlignment(A.getDataLayout()).value());
5227 
5228     if (Instruction *CtxI = getCtxI())
5229       followUsesInMBEC(*this, A, getState(), *CtxI);
5230   }
5231 
5232   /// See AbstractAttribute::manifest(...).
5233   ChangeStatus manifest(Attributor &A) override {
5234     ChangeStatus LoadStoreChanged = ChangeStatus::UNCHANGED;
5235 
5236     // Check for users that allow alignment annotations.
5237     Value &AssociatedValue = getAssociatedValue();
5238     for (const Use &U : AssociatedValue.uses()) {
5239       if (auto *SI = dyn_cast<StoreInst>(U.getUser())) {
5240         if (SI->getPointerOperand() == &AssociatedValue)
5241           if (SI->getAlign() < getAssumedAlign()) {
5242             STATS_DECLTRACK(AAAlign, Store,
5243                             "Number of times alignment added to a store");
5244             SI->setAlignment(getAssumedAlign());
5245             LoadStoreChanged = ChangeStatus::CHANGED;
5246           }
5247       } else if (auto *LI = dyn_cast<LoadInst>(U.getUser())) {
5248         if (LI->getPointerOperand() == &AssociatedValue)
5249           if (LI->getAlign() < getAssumedAlign()) {
5250             LI->setAlignment(getAssumedAlign());
5251             STATS_DECLTRACK(AAAlign, Load,
5252                             "Number of times alignment added to a load");
5253             LoadStoreChanged = ChangeStatus::CHANGED;
5254           }
5255       }
5256     }
5257 
5258     ChangeStatus Changed = AAAlign::manifest(A);
5259 
5260     Align InheritAlign =
5261         getAssociatedValue().getPointerAlignment(A.getDataLayout());
5262     if (InheritAlign >= getAssumedAlign())
5263       return LoadStoreChanged;
5264     return Changed | LoadStoreChanged;
5265   }
5266 
5267   // TODO: Provide a helper to determine the implied ABI alignment and check in
5268   //       the existing manifest method and a new one for AAAlignImpl that value
5269   //       to avoid making the alignment explicit if it did not improve.
5270 
5271   /// See AbstractAttribute::getDeducedAttributes
5272   void getDeducedAttributes(Attributor &A, LLVMContext &Ctx,
5273                             SmallVectorImpl<Attribute> &Attrs) const override {
5274     if (getAssumedAlign() > 1)
5275       Attrs.emplace_back(
5276           Attribute::getWithAlignment(Ctx, Align(getAssumedAlign())));
5277   }
5278 
5279   /// See followUsesInMBEC
5280   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
5281                        AAAlign::StateType &State) {
5282     bool TrackUse = false;
5283 
5284     unsigned int KnownAlign =
5285         getKnownAlignForUse(A, *this, getAssociatedValue(), U, I, TrackUse);
5286     State.takeKnownMaximum(KnownAlign);
5287 
5288     return TrackUse;
5289   }
5290 
5291   /// See AbstractAttribute::getAsStr().
5292   const std::string getAsStr(Attributor *A) const override {
5293     return "align<" + std::to_string(getKnownAlign().value()) + "-" +
5294            std::to_string(getAssumedAlign().value()) + ">";
5295   }
5296 };
5297 
5298 /// Align attribute for a floating value.
5299 struct AAAlignFloating : AAAlignImpl {
5300   AAAlignFloating(const IRPosition &IRP, Attributor &A) : AAAlignImpl(IRP, A) {}
5301 
5302   /// See AbstractAttribute::updateImpl(...).
5303   ChangeStatus updateImpl(Attributor &A) override {
5304     const DataLayout &DL = A.getDataLayout();
5305 
5306     bool Stripped;
5307     bool UsedAssumedInformation = false;
5308     SmallVector<AA::ValueAndContext> Values;
5309     if (!A.getAssumedSimplifiedValues(getIRPosition(), *this, Values,
5310                                       AA::AnyScope, UsedAssumedInformation)) {
5311       Values.push_back({getAssociatedValue(), getCtxI()});
5312       Stripped = false;
5313     } else {
5314       Stripped = Values.size() != 1 ||
5315                  Values.front().getValue() != &getAssociatedValue();
5316     }
5317 
5318     StateType T;
5319     auto VisitValueCB = [&](Value &V) -> bool {
5320       if (isa<UndefValue>(V) || isa<ConstantPointerNull>(V))
5321         return true;
5322       const auto *AA = A.getAAFor<AAAlign>(*this, IRPosition::value(V),
5323                                            DepClassTy::REQUIRED);
5324       if (!AA || (!Stripped && this == AA)) {
5325         int64_t Offset;
5326         unsigned Alignment = 1;
5327         if (const Value *Base =
5328                 GetPointerBaseWithConstantOffset(&V, Offset, DL)) {
5329           // TODO: Use AAAlign for the base too.
5330           Align PA = Base->getPointerAlignment(DL);
5331           // BasePointerAddr + Offset = Alignment * Q for some integer Q.
5332           // So we can say that the maximum power of two which is a divisor of
5333           // gcd(Offset, Alignment) is an alignment.
5334 
5335           uint32_t gcd =
5336               std::gcd(uint32_t(abs((int32_t)Offset)), uint32_t(PA.value()));
5337           Alignment = llvm::bit_floor(gcd);
5338         } else {
5339           Alignment = V.getPointerAlignment(DL).value();
5340         }
5341         // Use only IR information if we did not strip anything.
5342         T.takeKnownMaximum(Alignment);
5343         T.indicatePessimisticFixpoint();
5344       } else {
5345         // Use abstract attribute information.
5346         const AAAlign::StateType &DS = AA->getState();
5347         T ^= DS;
5348       }
5349       return T.isValidState();
5350     };
5351 
5352     for (const auto &VAC : Values) {
5353       if (!VisitValueCB(*VAC.getValue()))
5354         return indicatePessimisticFixpoint();
5355     }
5356 
5357     //  TODO: If we know we visited all incoming values, thus no are assumed
5358     //  dead, we can take the known information from the state T.
5359     return clampStateAndIndicateChange(getState(), T);
5360   }
5361 
5362   /// See AbstractAttribute::trackStatistics()
5363   void trackStatistics() const override { STATS_DECLTRACK_FLOATING_ATTR(align) }
5364 };
5365 
5366 /// Align attribute for function return value.
5367 struct AAAlignReturned final
5368     : AAReturnedFromReturnedValues<AAAlign, AAAlignImpl> {
5369   using Base = AAReturnedFromReturnedValues<AAAlign, AAAlignImpl>;
5370   AAAlignReturned(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
5371 
5372   /// See AbstractAttribute::trackStatistics()
5373   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(aligned) }
5374 };
5375 
5376 /// Align attribute for function argument.
5377 struct AAAlignArgument final
5378     : AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl> {
5379   using Base = AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl>;
5380   AAAlignArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
5381 
5382   /// See AbstractAttribute::manifest(...).
5383   ChangeStatus manifest(Attributor &A) override {
5384     // If the associated argument is involved in a must-tail call we give up
5385     // because we would need to keep the argument alignments of caller and
5386     // callee in-sync. Just does not seem worth the trouble right now.
5387     if (A.getInfoCache().isInvolvedInMustTailCall(*getAssociatedArgument()))
5388       return ChangeStatus::UNCHANGED;
5389     return Base::manifest(A);
5390   }
5391 
5392   /// See AbstractAttribute::trackStatistics()
5393   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(aligned) }
5394 };
5395 
5396 struct AAAlignCallSiteArgument final : AAAlignFloating {
5397   AAAlignCallSiteArgument(const IRPosition &IRP, Attributor &A)
5398       : AAAlignFloating(IRP, A) {}
5399 
5400   /// See AbstractAttribute::manifest(...).
5401   ChangeStatus manifest(Attributor &A) override {
5402     // If the associated argument is involved in a must-tail call we give up
5403     // because we would need to keep the argument alignments of caller and
5404     // callee in-sync. Just does not seem worth the trouble right now.
5405     if (Argument *Arg = getAssociatedArgument())
5406       if (A.getInfoCache().isInvolvedInMustTailCall(*Arg))
5407         return ChangeStatus::UNCHANGED;
5408     ChangeStatus Changed = AAAlignImpl::manifest(A);
5409     Align InheritAlign =
5410         getAssociatedValue().getPointerAlignment(A.getDataLayout());
5411     if (InheritAlign >= getAssumedAlign())
5412       Changed = ChangeStatus::UNCHANGED;
5413     return Changed;
5414   }
5415 
5416   /// See AbstractAttribute::updateImpl(Attributor &A).
5417   ChangeStatus updateImpl(Attributor &A) override {
5418     ChangeStatus Changed = AAAlignFloating::updateImpl(A);
5419     if (Argument *Arg = getAssociatedArgument()) {
5420       // We only take known information from the argument
5421       // so we do not need to track a dependence.
5422       const auto *ArgAlignAA = A.getAAFor<AAAlign>(
5423           *this, IRPosition::argument(*Arg), DepClassTy::NONE);
5424       if (ArgAlignAA)
5425         takeKnownMaximum(ArgAlignAA->getKnownAlign().value());
5426     }
5427     return Changed;
5428   }
5429 
5430   /// See AbstractAttribute::trackStatistics()
5431   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(aligned) }
5432 };
5433 
5434 /// Align attribute deduction for a call site return value.
5435 struct AAAlignCallSiteReturned final
5436     : AACalleeToCallSite<AAAlign, AAAlignImpl> {
5437   using Base = AACalleeToCallSite<AAAlign, AAAlignImpl>;
5438   AAAlignCallSiteReturned(const IRPosition &IRP, Attributor &A)
5439       : Base(IRP, A) {}
5440 
5441   /// See AbstractAttribute::trackStatistics()
5442   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(align); }
5443 };
5444 } // namespace
5445 
5446 /// ------------------ Function No-Return Attribute ----------------------------
5447 namespace {
5448 struct AANoReturnImpl : public AANoReturn {
5449   AANoReturnImpl(const IRPosition &IRP, Attributor &A) : AANoReturn(IRP, A) {}
5450 
5451   /// See AbstractAttribute::initialize(...).
5452   void initialize(Attributor &A) override {
5453     bool IsKnown;
5454     assert(!AA::hasAssumedIRAttr<Attribute::NoReturn>(
5455         A, nullptr, getIRPosition(), DepClassTy::NONE, IsKnown));
5456     (void)IsKnown;
5457   }
5458 
5459   /// See AbstractAttribute::getAsStr().
5460   const std::string getAsStr(Attributor *A) const override {
5461     return getAssumed() ? "noreturn" : "may-return";
5462   }
5463 
5464   /// See AbstractAttribute::updateImpl(Attributor &A).
5465   ChangeStatus updateImpl(Attributor &A) override {
5466     auto CheckForNoReturn = [](Instruction &) { return false; };
5467     bool UsedAssumedInformation = false;
5468     if (!A.checkForAllInstructions(CheckForNoReturn, *this,
5469                                    {(unsigned)Instruction::Ret},
5470                                    UsedAssumedInformation))
5471       return indicatePessimisticFixpoint();
5472     return ChangeStatus::UNCHANGED;
5473   }
5474 };
5475 
5476 struct AANoReturnFunction final : AANoReturnImpl {
5477   AANoReturnFunction(const IRPosition &IRP, Attributor &A)
5478       : AANoReturnImpl(IRP, A) {}
5479 
5480   /// See AbstractAttribute::trackStatistics()
5481   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(noreturn) }
5482 };
5483 
5484 /// NoReturn attribute deduction for a call sites.
5485 struct AANoReturnCallSite final
5486     : AACalleeToCallSite<AANoReturn, AANoReturnImpl> {
5487   AANoReturnCallSite(const IRPosition &IRP, Attributor &A)
5488       : AACalleeToCallSite<AANoReturn, AANoReturnImpl>(IRP, A) {}
5489 
5490   /// See AbstractAttribute::trackStatistics()
5491   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(noreturn); }
5492 };
5493 } // namespace
5494 
5495 /// ----------------------- Instance Info ---------------------------------
5496 
5497 namespace {
5498 /// A class to hold the state of for no-capture attributes.
5499 struct AAInstanceInfoImpl : public AAInstanceInfo {
5500   AAInstanceInfoImpl(const IRPosition &IRP, Attributor &A)
5501       : AAInstanceInfo(IRP, A) {}
5502 
5503   /// See AbstractAttribute::initialize(...).
5504   void initialize(Attributor &A) override {
5505     Value &V = getAssociatedValue();
5506     if (auto *C = dyn_cast<Constant>(&V)) {
5507       if (C->isThreadDependent())
5508         indicatePessimisticFixpoint();
5509       else
5510         indicateOptimisticFixpoint();
5511       return;
5512     }
5513     if (auto *CB = dyn_cast<CallBase>(&V))
5514       if (CB->arg_size() == 0 && !CB->mayHaveSideEffects() &&
5515           !CB->mayReadFromMemory()) {
5516         indicateOptimisticFixpoint();
5517         return;
5518       }
5519     if (auto *I = dyn_cast<Instruction>(&V)) {
5520       const auto *CI =
5521           A.getInfoCache().getAnalysisResultForFunction<CycleAnalysis>(
5522               *I->getFunction());
5523       if (mayBeInCycle(CI, I, /* HeaderOnly */ false)) {
5524         indicatePessimisticFixpoint();
5525         return;
5526       }
5527     }
5528   }
5529 
5530   /// See AbstractAttribute::updateImpl(...).
5531   ChangeStatus updateImpl(Attributor &A) override {
5532     ChangeStatus Changed = ChangeStatus::UNCHANGED;
5533 
5534     Value &V = getAssociatedValue();
5535     const Function *Scope = nullptr;
5536     if (auto *I = dyn_cast<Instruction>(&V))
5537       Scope = I->getFunction();
5538     if (auto *A = dyn_cast<Argument>(&V)) {
5539       Scope = A->getParent();
5540       if (!Scope->hasLocalLinkage())
5541         return Changed;
5542     }
5543     if (!Scope)
5544       return indicateOptimisticFixpoint();
5545 
5546     bool IsKnownNoRecurse;
5547     if (AA::hasAssumedIRAttr<Attribute::NoRecurse>(
5548             A, this, IRPosition::function(*Scope), DepClassTy::OPTIONAL,
5549             IsKnownNoRecurse))
5550       return Changed;
5551 
5552     auto UsePred = [&](const Use &U, bool &Follow) {
5553       const Instruction *UserI = dyn_cast<Instruction>(U.getUser());
5554       if (!UserI || isa<GetElementPtrInst>(UserI) || isa<CastInst>(UserI) ||
5555           isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
5556         Follow = true;
5557         return true;
5558       }
5559       if (isa<LoadInst>(UserI) || isa<CmpInst>(UserI) ||
5560           (isa<StoreInst>(UserI) &&
5561            cast<StoreInst>(UserI)->getValueOperand() != U.get()))
5562         return true;
5563       if (auto *CB = dyn_cast<CallBase>(UserI)) {
5564         // This check is not guaranteeing uniqueness but for now that we cannot
5565         // end up with two versions of \p U thinking it was one.
5566         auto *Callee = dyn_cast_if_present<Function>(CB->getCalledOperand());
5567         if (!Callee || !Callee->hasLocalLinkage())
5568           return true;
5569         if (!CB->isArgOperand(&U))
5570           return false;
5571         const auto *ArgInstanceInfoAA = A.getAAFor<AAInstanceInfo>(
5572             *this, IRPosition::callsite_argument(*CB, CB->getArgOperandNo(&U)),
5573             DepClassTy::OPTIONAL);
5574         if (!ArgInstanceInfoAA ||
5575             !ArgInstanceInfoAA->isAssumedUniqueForAnalysis())
5576           return false;
5577         // If this call base might reach the scope again we might forward the
5578         // argument back here. This is very conservative.
5579         if (AA::isPotentiallyReachable(
5580                 A, *CB, *Scope, *this, /* ExclusionSet */ nullptr,
5581                 [Scope](const Function &Fn) { return &Fn != Scope; }))
5582           return false;
5583         return true;
5584       }
5585       return false;
5586     };
5587 
5588     auto EquivalentUseCB = [&](const Use &OldU, const Use &NewU) {
5589       if (auto *SI = dyn_cast<StoreInst>(OldU.getUser())) {
5590         auto *Ptr = SI->getPointerOperand()->stripPointerCasts();
5591         if ((isa<AllocaInst>(Ptr) || isNoAliasCall(Ptr)) &&
5592             AA::isDynamicallyUnique(A, *this, *Ptr))
5593           return true;
5594       }
5595       return false;
5596     };
5597 
5598     if (!A.checkForAllUses(UsePred, *this, V, /* CheckBBLivenessOnly */ true,
5599                            DepClassTy::OPTIONAL,
5600                            /* IgnoreDroppableUses */ true, EquivalentUseCB))
5601       return indicatePessimisticFixpoint();
5602 
5603     return Changed;
5604   }
5605 
5606   /// See AbstractState::getAsStr().
5607   const std::string getAsStr(Attributor *A) const override {
5608     return isAssumedUniqueForAnalysis() ? "<unique [fAa]>" : "<unknown>";
5609   }
5610 
5611   /// See AbstractAttribute::trackStatistics()
5612   void trackStatistics() const override {}
5613 };
5614 
5615 /// InstanceInfo attribute for floating values.
5616 struct AAInstanceInfoFloating : AAInstanceInfoImpl {
5617   AAInstanceInfoFloating(const IRPosition &IRP, Attributor &A)
5618       : AAInstanceInfoImpl(IRP, A) {}
5619 };
5620 
5621 /// NoCapture attribute for function arguments.
5622 struct AAInstanceInfoArgument final : AAInstanceInfoFloating {
5623   AAInstanceInfoArgument(const IRPosition &IRP, Attributor &A)
5624       : AAInstanceInfoFloating(IRP, A) {}
5625 };
5626 
5627 /// InstanceInfo attribute for call site arguments.
5628 struct AAInstanceInfoCallSiteArgument final : AAInstanceInfoImpl {
5629   AAInstanceInfoCallSiteArgument(const IRPosition &IRP, Attributor &A)
5630       : AAInstanceInfoImpl(IRP, A) {}
5631 
5632   /// See AbstractAttribute::updateImpl(...).
5633   ChangeStatus updateImpl(Attributor &A) override {
5634     // TODO: Once we have call site specific value information we can provide
5635     //       call site specific liveness information and then it makes
5636     //       sense to specialize attributes for call sites arguments instead of
5637     //       redirecting requests to the callee argument.
5638     Argument *Arg = getAssociatedArgument();
5639     if (!Arg)
5640       return indicatePessimisticFixpoint();
5641     const IRPosition &ArgPos = IRPosition::argument(*Arg);
5642     auto *ArgAA =
5643         A.getAAFor<AAInstanceInfo>(*this, ArgPos, DepClassTy::REQUIRED);
5644     if (!ArgAA)
5645       return indicatePessimisticFixpoint();
5646     return clampStateAndIndicateChange(getState(), ArgAA->getState());
5647   }
5648 };
5649 
5650 /// InstanceInfo attribute for function return value.
5651 struct AAInstanceInfoReturned final : AAInstanceInfoImpl {
5652   AAInstanceInfoReturned(const IRPosition &IRP, Attributor &A)
5653       : AAInstanceInfoImpl(IRP, A) {
5654     llvm_unreachable("InstanceInfo is not applicable to function returns!");
5655   }
5656 
5657   /// See AbstractAttribute::initialize(...).
5658   void initialize(Attributor &A) override {
5659     llvm_unreachable("InstanceInfo is not applicable to function returns!");
5660   }
5661 
5662   /// See AbstractAttribute::updateImpl(...).
5663   ChangeStatus updateImpl(Attributor &A) override {
5664     llvm_unreachable("InstanceInfo is not applicable to function returns!");
5665   }
5666 };
5667 
5668 /// InstanceInfo attribute deduction for a call site return value.
5669 struct AAInstanceInfoCallSiteReturned final : AAInstanceInfoFloating {
5670   AAInstanceInfoCallSiteReturned(const IRPosition &IRP, Attributor &A)
5671       : AAInstanceInfoFloating(IRP, A) {}
5672 };
5673 } // namespace
5674 
5675 /// ----------------------- Variable Capturing ---------------------------------
5676 bool AANoCapture::isImpliedByIR(Attributor &A, const IRPosition &IRP,
5677                                 Attribute::AttrKind ImpliedAttributeKind,
5678                                 bool IgnoreSubsumingPositions) {
5679   assert(ImpliedAttributeKind == Attribute::NoCapture &&
5680          "Unexpected attribute kind");
5681   Value &V = IRP.getAssociatedValue();
5682   if (!IRP.isArgumentPosition())
5683     return V.use_empty();
5684 
5685   // You cannot "capture" null in the default address space.
5686   if (isa<UndefValue>(V) || (isa<ConstantPointerNull>(V) &&
5687                              V.getType()->getPointerAddressSpace() == 0)) {
5688     return true;
5689   }
5690 
5691   if (A.hasAttr(IRP, {Attribute::NoCapture},
5692                 /* IgnoreSubsumingPositions */ true, Attribute::NoCapture))
5693     return true;
5694 
5695   if (IRP.getPositionKind() == IRP_CALL_SITE_ARGUMENT)
5696     if (Argument *Arg = IRP.getAssociatedArgument())
5697       if (A.hasAttr(IRPosition::argument(*Arg),
5698                     {Attribute::NoCapture, Attribute::ByVal},
5699                     /* IgnoreSubsumingPositions */ true)) {
5700         A.manifestAttrs(IRP,
5701                         Attribute::get(V.getContext(), Attribute::NoCapture));
5702         return true;
5703       }
5704 
5705   if (const Function *F = IRP.getAssociatedFunction()) {
5706     // Check what state the associated function can actually capture.
5707     AANoCapture::StateType State;
5708     determineFunctionCaptureCapabilities(IRP, *F, State);
5709     if (State.isKnown(NO_CAPTURE)) {
5710       A.manifestAttrs(IRP,
5711                       Attribute::get(V.getContext(), Attribute::NoCapture));
5712       return true;
5713     }
5714   }
5715 
5716   return false;
5717 }
5718 
5719 /// Set the NOT_CAPTURED_IN_MEM and NOT_CAPTURED_IN_RET bits in \p Known
5720 /// depending on the ability of the function associated with \p IRP to capture
5721 /// state in memory and through "returning/throwing", respectively.
5722 void AANoCapture::determineFunctionCaptureCapabilities(const IRPosition &IRP,
5723                                                        const Function &F,
5724                                                        BitIntegerState &State) {
5725   // TODO: Once we have memory behavior attributes we should use them here.
5726 
5727   // If we know we cannot communicate or write to memory, we do not care about
5728   // ptr2int anymore.
5729   bool ReadOnly = F.onlyReadsMemory();
5730   bool NoThrow = F.doesNotThrow();
5731   bool IsVoidReturn = F.getReturnType()->isVoidTy();
5732   if (ReadOnly && NoThrow && IsVoidReturn) {
5733     State.addKnownBits(NO_CAPTURE);
5734     return;
5735   }
5736 
5737   // A function cannot capture state in memory if it only reads memory, it can
5738   // however return/throw state and the state might be influenced by the
5739   // pointer value, e.g., loading from a returned pointer might reveal a bit.
5740   if (ReadOnly)
5741     State.addKnownBits(NOT_CAPTURED_IN_MEM);
5742 
5743   // A function cannot communicate state back if it does not through
5744   // exceptions and doesn not return values.
5745   if (NoThrow && IsVoidReturn)
5746     State.addKnownBits(NOT_CAPTURED_IN_RET);
5747 
5748   // Check existing "returned" attributes.
5749   int ArgNo = IRP.getCalleeArgNo();
5750   if (!NoThrow || ArgNo < 0 ||
5751       !F.getAttributes().hasAttrSomewhere(Attribute::Returned))
5752     return;
5753 
5754   for (unsigned U = 0, E = F.arg_size(); U < E; ++U)
5755     if (F.hasParamAttribute(U, Attribute::Returned)) {
5756       if (U == unsigned(ArgNo))
5757         State.removeAssumedBits(NOT_CAPTURED_IN_RET);
5758       else if (ReadOnly)
5759         State.addKnownBits(NO_CAPTURE);
5760       else
5761         State.addKnownBits(NOT_CAPTURED_IN_RET);
5762       break;
5763     }
5764 }
5765 
5766 namespace {
5767 /// A class to hold the state of for no-capture attributes.
5768 struct AANoCaptureImpl : public AANoCapture {
5769   AANoCaptureImpl(const IRPosition &IRP, Attributor &A) : AANoCapture(IRP, A) {}
5770 
5771   /// See AbstractAttribute::initialize(...).
5772   void initialize(Attributor &A) override {
5773     bool IsKnown;
5774     assert(!AA::hasAssumedIRAttr<Attribute::NoCapture>(
5775         A, nullptr, getIRPosition(), DepClassTy::NONE, IsKnown));
5776     (void)IsKnown;
5777   }
5778 
5779   /// See AbstractAttribute::updateImpl(...).
5780   ChangeStatus updateImpl(Attributor &A) override;
5781 
5782   /// see AbstractAttribute::isAssumedNoCaptureMaybeReturned(...).
5783   void getDeducedAttributes(Attributor &A, LLVMContext &Ctx,
5784                             SmallVectorImpl<Attribute> &Attrs) const override {
5785     if (!isAssumedNoCaptureMaybeReturned())
5786       return;
5787 
5788     if (isArgumentPosition()) {
5789       if (isAssumedNoCapture())
5790         Attrs.emplace_back(Attribute::get(Ctx, Attribute::NoCapture));
5791       else if (ManifestInternal)
5792         Attrs.emplace_back(Attribute::get(Ctx, "no-capture-maybe-returned"));
5793     }
5794   }
5795 
5796   /// See AbstractState::getAsStr().
5797   const std::string getAsStr(Attributor *A) const override {
5798     if (isKnownNoCapture())
5799       return "known not-captured";
5800     if (isAssumedNoCapture())
5801       return "assumed not-captured";
5802     if (isKnownNoCaptureMaybeReturned())
5803       return "known not-captured-maybe-returned";
5804     if (isAssumedNoCaptureMaybeReturned())
5805       return "assumed not-captured-maybe-returned";
5806     return "assumed-captured";
5807   }
5808 
5809   /// Check the use \p U and update \p State accordingly. Return true if we
5810   /// should continue to update the state.
5811   bool checkUse(Attributor &A, AANoCapture::StateType &State, const Use &U,
5812                 bool &Follow) {
5813     Instruction *UInst = cast<Instruction>(U.getUser());
5814     LLVM_DEBUG(dbgs() << "[AANoCapture] Check use: " << *U.get() << " in "
5815                       << *UInst << "\n");
5816 
5817     // Deal with ptr2int by following uses.
5818     if (isa<PtrToIntInst>(UInst)) {
5819       LLVM_DEBUG(dbgs() << " - ptr2int assume the worst!\n");
5820       return isCapturedIn(State, /* Memory */ true, /* Integer */ true,
5821                           /* Return */ true);
5822     }
5823 
5824     // For stores we already checked if we can follow them, if they make it
5825     // here we give up.
5826     if (isa<StoreInst>(UInst))
5827       return isCapturedIn(State, /* Memory */ true, /* Integer */ true,
5828                           /* Return */ true);
5829 
5830     // Explicitly catch return instructions.
5831     if (isa<ReturnInst>(UInst)) {
5832       if (UInst->getFunction() == getAnchorScope())
5833         return isCapturedIn(State, /* Memory */ false, /* Integer */ false,
5834                             /* Return */ true);
5835       return isCapturedIn(State, /* Memory */ true, /* Integer */ true,
5836                           /* Return */ true);
5837     }
5838 
5839     // For now we only use special logic for call sites. However, the tracker
5840     // itself knows about a lot of other non-capturing cases already.
5841     auto *CB = dyn_cast<CallBase>(UInst);
5842     if (!CB || !CB->isArgOperand(&U))
5843       return isCapturedIn(State, /* Memory */ true, /* Integer */ true,
5844                           /* Return */ true);
5845 
5846     unsigned ArgNo = CB->getArgOperandNo(&U);
5847     const IRPosition &CSArgPos = IRPosition::callsite_argument(*CB, ArgNo);
5848     // If we have a abstract no-capture attribute for the argument we can use
5849     // it to justify a non-capture attribute here. This allows recursion!
5850     bool IsKnownNoCapture;
5851     const AANoCapture *ArgNoCaptureAA = nullptr;
5852     bool IsAssumedNoCapture = AA::hasAssumedIRAttr<Attribute::NoCapture>(
5853         A, this, CSArgPos, DepClassTy::REQUIRED, IsKnownNoCapture, false,
5854         &ArgNoCaptureAA);
5855     if (IsAssumedNoCapture)
5856       return isCapturedIn(State, /* Memory */ false, /* Integer */ false,
5857                           /* Return */ false);
5858     if (ArgNoCaptureAA && ArgNoCaptureAA->isAssumedNoCaptureMaybeReturned()) {
5859       Follow = true;
5860       return isCapturedIn(State, /* Memory */ false, /* Integer */ false,
5861                           /* Return */ false);
5862     }
5863 
5864     // Lastly, we could not find a reason no-capture can be assumed so we don't.
5865     return isCapturedIn(State, /* Memory */ true, /* Integer */ true,
5866                         /* Return */ true);
5867   }
5868 
5869   /// Update \p State according to \p CapturedInMem, \p CapturedInInt, and
5870   /// \p CapturedInRet, then return true if we should continue updating the
5871   /// state.
5872   static bool isCapturedIn(AANoCapture::StateType &State, bool CapturedInMem,
5873                            bool CapturedInInt, bool CapturedInRet) {
5874     LLVM_DEBUG(dbgs() << " - captures [Mem " << CapturedInMem << "|Int "
5875                       << CapturedInInt << "|Ret " << CapturedInRet << "]\n");
5876     if (CapturedInMem)
5877       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_MEM);
5878     if (CapturedInInt)
5879       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_INT);
5880     if (CapturedInRet)
5881       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_RET);
5882     return State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED);
5883   }
5884 };
5885 
5886 ChangeStatus AANoCaptureImpl::updateImpl(Attributor &A) {
5887   const IRPosition &IRP = getIRPosition();
5888   Value *V = isArgumentPosition() ? IRP.getAssociatedArgument()
5889                                   : &IRP.getAssociatedValue();
5890   if (!V)
5891     return indicatePessimisticFixpoint();
5892 
5893   const Function *F =
5894       isArgumentPosition() ? IRP.getAssociatedFunction() : IRP.getAnchorScope();
5895   assert(F && "Expected a function!");
5896   const IRPosition &FnPos = IRPosition::function(*F);
5897 
5898   AANoCapture::StateType T;
5899 
5900   // Readonly means we cannot capture through memory.
5901   bool IsKnown;
5902   if (AA::isAssumedReadOnly(A, FnPos, *this, IsKnown)) {
5903     T.addKnownBits(NOT_CAPTURED_IN_MEM);
5904     if (IsKnown)
5905       addKnownBits(NOT_CAPTURED_IN_MEM);
5906   }
5907 
5908   // Make sure all returned values are different than the underlying value.
5909   // TODO: we could do this in a more sophisticated way inside
5910   //       AAReturnedValues, e.g., track all values that escape through returns
5911   //       directly somehow.
5912   auto CheckReturnedArgs = [&](bool &UsedAssumedInformation) {
5913     SmallVector<AA::ValueAndContext> Values;
5914     if (!A.getAssumedSimplifiedValues(IRPosition::returned(*F), this, Values,
5915                                       AA::ValueScope::Intraprocedural,
5916                                       UsedAssumedInformation))
5917       return false;
5918     bool SeenConstant = false;
5919     for (const AA::ValueAndContext &VAC : Values) {
5920       if (isa<Constant>(VAC.getValue())) {
5921         if (SeenConstant)
5922           return false;
5923         SeenConstant = true;
5924       } else if (!isa<Argument>(VAC.getValue()) ||
5925                  VAC.getValue() == getAssociatedArgument())
5926         return false;
5927     }
5928     return true;
5929   };
5930 
5931   bool IsKnownNoUnwind;
5932   if (AA::hasAssumedIRAttr<Attribute::NoUnwind>(
5933           A, this, FnPos, DepClassTy::OPTIONAL, IsKnownNoUnwind)) {
5934     bool IsVoidTy = F->getReturnType()->isVoidTy();
5935     bool UsedAssumedInformation = false;
5936     if (IsVoidTy || CheckReturnedArgs(UsedAssumedInformation)) {
5937       T.addKnownBits(NOT_CAPTURED_IN_RET);
5938       if (T.isKnown(NOT_CAPTURED_IN_MEM))
5939         return ChangeStatus::UNCHANGED;
5940       if (IsKnownNoUnwind && (IsVoidTy || !UsedAssumedInformation)) {
5941         addKnownBits(NOT_CAPTURED_IN_RET);
5942         if (isKnown(NOT_CAPTURED_IN_MEM))
5943           return indicateOptimisticFixpoint();
5944       }
5945     }
5946   }
5947 
5948   auto IsDereferenceableOrNull = [&](Value *O, const DataLayout &DL) {
5949     const auto *DerefAA = A.getAAFor<AADereferenceable>(
5950         *this, IRPosition::value(*O), DepClassTy::OPTIONAL);
5951     return DerefAA && DerefAA->getAssumedDereferenceableBytes();
5952   };
5953 
5954   auto UseCheck = [&](const Use &U, bool &Follow) -> bool {
5955     switch (DetermineUseCaptureKind(U, IsDereferenceableOrNull)) {
5956     case UseCaptureKind::NO_CAPTURE:
5957       return true;
5958     case UseCaptureKind::MAY_CAPTURE:
5959       return checkUse(A, T, U, Follow);
5960     case UseCaptureKind::PASSTHROUGH:
5961       Follow = true;
5962       return true;
5963     }
5964     llvm_unreachable("Unexpected use capture kind!");
5965   };
5966 
5967   if (!A.checkForAllUses(UseCheck, *this, *V))
5968     return indicatePessimisticFixpoint();
5969 
5970   AANoCapture::StateType &S = getState();
5971   auto Assumed = S.getAssumed();
5972   S.intersectAssumedBits(T.getAssumed());
5973   if (!isAssumedNoCaptureMaybeReturned())
5974     return indicatePessimisticFixpoint();
5975   return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED
5976                                    : ChangeStatus::CHANGED;
5977 }
5978 
5979 /// NoCapture attribute for function arguments.
5980 struct AANoCaptureArgument final : AANoCaptureImpl {
5981   AANoCaptureArgument(const IRPosition &IRP, Attributor &A)
5982       : AANoCaptureImpl(IRP, A) {}
5983 
5984   /// See AbstractAttribute::trackStatistics()
5985   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nocapture) }
5986 };
5987 
5988 /// NoCapture attribute for call site arguments.
5989 struct AANoCaptureCallSiteArgument final : AANoCaptureImpl {
5990   AANoCaptureCallSiteArgument(const IRPosition &IRP, Attributor &A)
5991       : AANoCaptureImpl(IRP, A) {}
5992 
5993   /// See AbstractAttribute::updateImpl(...).
5994   ChangeStatus updateImpl(Attributor &A) override {
5995     // TODO: Once we have call site specific value information we can provide
5996     //       call site specific liveness information and then it makes
5997     //       sense to specialize attributes for call sites arguments instead of
5998     //       redirecting requests to the callee argument.
5999     Argument *Arg = getAssociatedArgument();
6000     if (!Arg)
6001       return indicatePessimisticFixpoint();
6002     const IRPosition &ArgPos = IRPosition::argument(*Arg);
6003     bool IsKnownNoCapture;
6004     const AANoCapture *ArgAA = nullptr;
6005     if (AA::hasAssumedIRAttr<Attribute::NoCapture>(
6006             A, this, ArgPos, DepClassTy::REQUIRED, IsKnownNoCapture, false,
6007             &ArgAA))
6008       return ChangeStatus::UNCHANGED;
6009     if (!ArgAA || !ArgAA->isAssumedNoCaptureMaybeReturned())
6010       return indicatePessimisticFixpoint();
6011     return clampStateAndIndicateChange(getState(), ArgAA->getState());
6012   }
6013 
6014   /// See AbstractAttribute::trackStatistics()
6015   void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nocapture)};
6016 };
6017 
6018 /// NoCapture attribute for floating values.
6019 struct AANoCaptureFloating final : AANoCaptureImpl {
6020   AANoCaptureFloating(const IRPosition &IRP, Attributor &A)
6021       : AANoCaptureImpl(IRP, A) {}
6022 
6023   /// See AbstractAttribute::trackStatistics()
6024   void trackStatistics() const override {
6025     STATS_DECLTRACK_FLOATING_ATTR(nocapture)
6026   }
6027 };
6028 
6029 /// NoCapture attribute for function return value.
6030 struct AANoCaptureReturned final : AANoCaptureImpl {
6031   AANoCaptureReturned(const IRPosition &IRP, Attributor &A)
6032       : AANoCaptureImpl(IRP, A) {
6033     llvm_unreachable("NoCapture is not applicable to function returns!");
6034   }
6035 
6036   /// See AbstractAttribute::initialize(...).
6037   void initialize(Attributor &A) override {
6038     llvm_unreachable("NoCapture is not applicable to function returns!");
6039   }
6040 
6041   /// See AbstractAttribute::updateImpl(...).
6042   ChangeStatus updateImpl(Attributor &A) override {
6043     llvm_unreachable("NoCapture is not applicable to function returns!");
6044   }
6045 
6046   /// See AbstractAttribute::trackStatistics()
6047   void trackStatistics() const override {}
6048 };
6049 
6050 /// NoCapture attribute deduction for a call site return value.
6051 struct AANoCaptureCallSiteReturned final : AANoCaptureImpl {
6052   AANoCaptureCallSiteReturned(const IRPosition &IRP, Attributor &A)
6053       : AANoCaptureImpl(IRP, A) {}
6054 
6055   /// See AbstractAttribute::initialize(...).
6056   void initialize(Attributor &A) override {
6057     const Function *F = getAnchorScope();
6058     // Check what state the associated function can actually capture.
6059     determineFunctionCaptureCapabilities(getIRPosition(), *F, *this);
6060   }
6061 
6062   /// See AbstractAttribute::trackStatistics()
6063   void trackStatistics() const override {
6064     STATS_DECLTRACK_CSRET_ATTR(nocapture)
6065   }
6066 };
6067 } // namespace
6068 
6069 /// ------------------ Value Simplify Attribute ----------------------------
6070 
6071 bool ValueSimplifyStateType::unionAssumed(std::optional<Value *> Other) {
6072   // FIXME: Add a typecast support.
6073   SimplifiedAssociatedValue = AA::combineOptionalValuesInAAValueLatice(
6074       SimplifiedAssociatedValue, Other, Ty);
6075   if (SimplifiedAssociatedValue == std::optional<Value *>(nullptr))
6076     return false;
6077 
6078   LLVM_DEBUG({
6079     if (SimplifiedAssociatedValue)
6080       dbgs() << "[ValueSimplify] is assumed to be "
6081              << **SimplifiedAssociatedValue << "\n";
6082     else
6083       dbgs() << "[ValueSimplify] is assumed to be <none>\n";
6084   });
6085   return true;
6086 }
6087 
6088 namespace {
6089 struct AAValueSimplifyImpl : AAValueSimplify {
6090   AAValueSimplifyImpl(const IRPosition &IRP, Attributor &A)
6091       : AAValueSimplify(IRP, A) {}
6092 
6093   /// See AbstractAttribute::initialize(...).
6094   void initialize(Attributor &A) override {
6095     if (getAssociatedValue().getType()->isVoidTy())
6096       indicatePessimisticFixpoint();
6097     if (A.hasSimplificationCallback(getIRPosition()))
6098       indicatePessimisticFixpoint();
6099   }
6100 
6101   /// See AbstractAttribute::getAsStr().
6102   const std::string getAsStr(Attributor *A) const override {
6103     LLVM_DEBUG({
6104       dbgs() << "SAV: " << (bool)SimplifiedAssociatedValue << " ";
6105       if (SimplifiedAssociatedValue && *SimplifiedAssociatedValue)
6106         dbgs() << "SAV: " << **SimplifiedAssociatedValue << " ";
6107     });
6108     return isValidState() ? (isAtFixpoint() ? "simplified" : "maybe-simple")
6109                           : "not-simple";
6110   }
6111 
6112   /// See AbstractAttribute::trackStatistics()
6113   void trackStatistics() const override {}
6114 
6115   /// See AAValueSimplify::getAssumedSimplifiedValue()
6116   std::optional<Value *>
6117   getAssumedSimplifiedValue(Attributor &A) const override {
6118     return SimplifiedAssociatedValue;
6119   }
6120 
6121   /// Ensure the return value is \p V with type \p Ty, if not possible return
6122   /// nullptr. If \p Check is true we will only verify such an operation would
6123   /// suceed and return a non-nullptr value if that is the case. No IR is
6124   /// generated or modified.
6125   static Value *ensureType(Attributor &A, Value &V, Type &Ty, Instruction *CtxI,
6126                            bool Check) {
6127     if (auto *TypedV = AA::getWithType(V, Ty))
6128       return TypedV;
6129     if (CtxI && V.getType()->canLosslesslyBitCastTo(&Ty))
6130       return Check ? &V
6131                    : BitCastInst::CreatePointerBitCastOrAddrSpaceCast(&V, &Ty,
6132                                                                       "", CtxI);
6133     return nullptr;
6134   }
6135 
6136   /// Reproduce \p I with type \p Ty or return nullptr if that is not posisble.
6137   /// If \p Check is true we will only verify such an operation would suceed and
6138   /// return a non-nullptr value if that is the case. No IR is generated or
6139   /// modified.
6140   static Value *reproduceInst(Attributor &A,
6141                               const AbstractAttribute &QueryingAA,
6142                               Instruction &I, Type &Ty, Instruction *CtxI,
6143                               bool Check, ValueToValueMapTy &VMap) {
6144     assert(CtxI && "Cannot reproduce an instruction without context!");
6145     if (Check && (I.mayReadFromMemory() ||
6146                   !isSafeToSpeculativelyExecute(&I, CtxI, /* DT */ nullptr,
6147                                                 /* TLI */ nullptr)))
6148       return nullptr;
6149     for (Value *Op : I.operands()) {
6150       Value *NewOp = reproduceValue(A, QueryingAA, *Op, Ty, CtxI, Check, VMap);
6151       if (!NewOp) {
6152         assert(Check && "Manifest of new value unexpectedly failed!");
6153         return nullptr;
6154       }
6155       if (!Check)
6156         VMap[Op] = NewOp;
6157     }
6158     if (Check)
6159       return &I;
6160 
6161     Instruction *CloneI = I.clone();
6162     // TODO: Try to salvage debug information here.
6163     CloneI->setDebugLoc(DebugLoc());
6164     VMap[&I] = CloneI;
6165     CloneI->insertBefore(CtxI);
6166     RemapInstruction(CloneI, VMap);
6167     return CloneI;
6168   }
6169 
6170   /// Reproduce \p V with type \p Ty or return nullptr if that is not posisble.
6171   /// If \p Check is true we will only verify such an operation would suceed and
6172   /// return a non-nullptr value if that is the case. No IR is generated or
6173   /// modified.
6174   static Value *reproduceValue(Attributor &A,
6175                                const AbstractAttribute &QueryingAA, Value &V,
6176                                Type &Ty, Instruction *CtxI, bool Check,
6177                                ValueToValueMapTy &VMap) {
6178     if (const auto &NewV = VMap.lookup(&V))
6179       return NewV;
6180     bool UsedAssumedInformation = false;
6181     std::optional<Value *> SimpleV = A.getAssumedSimplified(
6182         V, QueryingAA, UsedAssumedInformation, AA::Interprocedural);
6183     if (!SimpleV.has_value())
6184       return PoisonValue::get(&Ty);
6185     Value *EffectiveV = &V;
6186     if (*SimpleV)
6187       EffectiveV = *SimpleV;
6188     if (auto *C = dyn_cast<Constant>(EffectiveV))
6189       return C;
6190     if (CtxI && AA::isValidAtPosition(AA::ValueAndContext(*EffectiveV, *CtxI),
6191                                       A.getInfoCache()))
6192       return ensureType(A, *EffectiveV, Ty, CtxI, Check);
6193     if (auto *I = dyn_cast<Instruction>(EffectiveV))
6194       if (Value *NewV = reproduceInst(A, QueryingAA, *I, Ty, CtxI, Check, VMap))
6195         return ensureType(A, *NewV, Ty, CtxI, Check);
6196     return nullptr;
6197   }
6198 
6199   /// Return a value we can use as replacement for the associated one, or
6200   /// nullptr if we don't have one that makes sense.
6201   Value *manifestReplacementValue(Attributor &A, Instruction *CtxI) const {
6202     Value *NewV = SimplifiedAssociatedValue
6203                       ? *SimplifiedAssociatedValue
6204                       : UndefValue::get(getAssociatedType());
6205     if (NewV && NewV != &getAssociatedValue()) {
6206       ValueToValueMapTy VMap;
6207       // First verify we can reprduce the value with the required type at the
6208       // context location before we actually start modifying the IR.
6209       if (reproduceValue(A, *this, *NewV, *getAssociatedType(), CtxI,
6210                          /* CheckOnly */ true, VMap))
6211         return reproduceValue(A, *this, *NewV, *getAssociatedType(), CtxI,
6212                               /* CheckOnly */ false, VMap);
6213     }
6214     return nullptr;
6215   }
6216 
6217   /// Helper function for querying AAValueSimplify and updating candidate.
6218   /// \param IRP The value position we are trying to unify with SimplifiedValue
6219   bool checkAndUpdate(Attributor &A, const AbstractAttribute &QueryingAA,
6220                       const IRPosition &IRP, bool Simplify = true) {
6221     bool UsedAssumedInformation = false;
6222     std::optional<Value *> QueryingValueSimplified = &IRP.getAssociatedValue();
6223     if (Simplify)
6224       QueryingValueSimplified = A.getAssumedSimplified(
6225           IRP, QueryingAA, UsedAssumedInformation, AA::Interprocedural);
6226     return unionAssumed(QueryingValueSimplified);
6227   }
6228 
6229   /// Returns a candidate is found or not
6230   template <typename AAType> bool askSimplifiedValueFor(Attributor &A) {
6231     if (!getAssociatedValue().getType()->isIntegerTy())
6232       return false;
6233 
6234     // This will also pass the call base context.
6235     const auto *AA =
6236         A.getAAFor<AAType>(*this, getIRPosition(), DepClassTy::NONE);
6237     if (!AA)
6238       return false;
6239 
6240     std::optional<Constant *> COpt = AA->getAssumedConstant(A);
6241 
6242     if (!COpt) {
6243       SimplifiedAssociatedValue = std::nullopt;
6244       A.recordDependence(*AA, *this, DepClassTy::OPTIONAL);
6245       return true;
6246     }
6247     if (auto *C = *COpt) {
6248       SimplifiedAssociatedValue = C;
6249       A.recordDependence(*AA, *this, DepClassTy::OPTIONAL);
6250       return true;
6251     }
6252     return false;
6253   }
6254 
6255   bool askSimplifiedValueForOtherAAs(Attributor &A) {
6256     if (askSimplifiedValueFor<AAValueConstantRange>(A))
6257       return true;
6258     if (askSimplifiedValueFor<AAPotentialConstantValues>(A))
6259       return true;
6260     return false;
6261   }
6262 
6263   /// See AbstractAttribute::manifest(...).
6264   ChangeStatus manifest(Attributor &A) override {
6265     ChangeStatus Changed = ChangeStatus::UNCHANGED;
6266     for (auto &U : getAssociatedValue().uses()) {
6267       // Check if we need to adjust the insertion point to make sure the IR is
6268       // valid.
6269       Instruction *IP = dyn_cast<Instruction>(U.getUser());
6270       if (auto *PHI = dyn_cast_or_null<PHINode>(IP))
6271         IP = PHI->getIncomingBlock(U)->getTerminator();
6272       if (auto *NewV = manifestReplacementValue(A, IP)) {
6273         LLVM_DEBUG(dbgs() << "[ValueSimplify] " << getAssociatedValue()
6274                           << " -> " << *NewV << " :: " << *this << "\n");
6275         if (A.changeUseAfterManifest(U, *NewV))
6276           Changed = ChangeStatus::CHANGED;
6277       }
6278     }
6279 
6280     return Changed | AAValueSimplify::manifest(A);
6281   }
6282 
6283   /// See AbstractState::indicatePessimisticFixpoint(...).
6284   ChangeStatus indicatePessimisticFixpoint() override {
6285     SimplifiedAssociatedValue = &getAssociatedValue();
6286     return AAValueSimplify::indicatePessimisticFixpoint();
6287   }
6288 };
6289 
6290 struct AAValueSimplifyArgument final : AAValueSimplifyImpl {
6291   AAValueSimplifyArgument(const IRPosition &IRP, Attributor &A)
6292       : AAValueSimplifyImpl(IRP, A) {}
6293 
6294   void initialize(Attributor &A) override {
6295     AAValueSimplifyImpl::initialize(A);
6296     if (A.hasAttr(getIRPosition(),
6297                   {Attribute::InAlloca, Attribute::Preallocated,
6298                    Attribute::StructRet, Attribute::Nest, Attribute::ByVal},
6299                   /* IgnoreSubsumingPositions */ true))
6300       indicatePessimisticFixpoint();
6301   }
6302 
6303   /// See AbstractAttribute::updateImpl(...).
6304   ChangeStatus updateImpl(Attributor &A) override {
6305     // Byval is only replacable if it is readonly otherwise we would write into
6306     // the replaced value and not the copy that byval creates implicitly.
6307     Argument *Arg = getAssociatedArgument();
6308     if (Arg->hasByValAttr()) {
6309       // TODO: We probably need to verify synchronization is not an issue, e.g.,
6310       //       there is no race by not copying a constant byval.
6311       bool IsKnown;
6312       if (!AA::isAssumedReadOnly(A, getIRPosition(), *this, IsKnown))
6313         return indicatePessimisticFixpoint();
6314     }
6315 
6316     auto Before = SimplifiedAssociatedValue;
6317 
6318     auto PredForCallSite = [&](AbstractCallSite ACS) {
6319       const IRPosition &ACSArgPos =
6320           IRPosition::callsite_argument(ACS, getCallSiteArgNo());
6321       // Check if a coresponding argument was found or if it is on not
6322       // associated (which can happen for callback calls).
6323       if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
6324         return false;
6325 
6326       // Simplify the argument operand explicitly and check if the result is
6327       // valid in the current scope. This avoids refering to simplified values
6328       // in other functions, e.g., we don't want to say a an argument in a
6329       // static function is actually an argument in a different function.
6330       bool UsedAssumedInformation = false;
6331       std::optional<Constant *> SimpleArgOp =
6332           A.getAssumedConstant(ACSArgPos, *this, UsedAssumedInformation);
6333       if (!SimpleArgOp)
6334         return true;
6335       if (!*SimpleArgOp)
6336         return false;
6337       if (!AA::isDynamicallyUnique(A, *this, **SimpleArgOp))
6338         return false;
6339       return unionAssumed(*SimpleArgOp);
6340     };
6341 
6342     // Generate a answer specific to a call site context.
6343     bool Success;
6344     bool UsedAssumedInformation = false;
6345     if (hasCallBaseContext() &&
6346         getCallBaseContext()->getCalledOperand() == Arg->getParent())
6347       Success = PredForCallSite(
6348           AbstractCallSite(&getCallBaseContext()->getCalledOperandUse()));
6349     else
6350       Success = A.checkForAllCallSites(PredForCallSite, *this, true,
6351                                        UsedAssumedInformation);
6352 
6353     if (!Success)
6354       if (!askSimplifiedValueForOtherAAs(A))
6355         return indicatePessimisticFixpoint();
6356 
6357     // If a candidate was found in this update, return CHANGED.
6358     return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED
6359                                                : ChangeStatus ::CHANGED;
6360   }
6361 
6362   /// See AbstractAttribute::trackStatistics()
6363   void trackStatistics() const override {
6364     STATS_DECLTRACK_ARG_ATTR(value_simplify)
6365   }
6366 };
6367 
6368 struct AAValueSimplifyReturned : AAValueSimplifyImpl {
6369   AAValueSimplifyReturned(const IRPosition &IRP, Attributor &A)
6370       : AAValueSimplifyImpl(IRP, A) {}
6371 
6372   /// See AAValueSimplify::getAssumedSimplifiedValue()
6373   std::optional<Value *>
6374   getAssumedSimplifiedValue(Attributor &A) const override {
6375     if (!isValidState())
6376       return nullptr;
6377     return SimplifiedAssociatedValue;
6378   }
6379 
6380   /// See AbstractAttribute::updateImpl(...).
6381   ChangeStatus updateImpl(Attributor &A) override {
6382     auto Before = SimplifiedAssociatedValue;
6383 
6384     auto ReturnInstCB = [&](Instruction &I) {
6385       auto &RI = cast<ReturnInst>(I);
6386       return checkAndUpdate(
6387           A, *this,
6388           IRPosition::value(*RI.getReturnValue(), getCallBaseContext()));
6389     };
6390 
6391     bool UsedAssumedInformation = false;
6392     if (!A.checkForAllInstructions(ReturnInstCB, *this, {Instruction::Ret},
6393                                    UsedAssumedInformation))
6394       if (!askSimplifiedValueForOtherAAs(A))
6395         return indicatePessimisticFixpoint();
6396 
6397     // If a candidate was found in this update, return CHANGED.
6398     return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED
6399                                                : ChangeStatus ::CHANGED;
6400   }
6401 
6402   ChangeStatus manifest(Attributor &A) override {
6403     // We queried AAValueSimplify for the returned values so they will be
6404     // replaced if a simplified form was found. Nothing to do here.
6405     return ChangeStatus::UNCHANGED;
6406   }
6407 
6408   /// See AbstractAttribute::trackStatistics()
6409   void trackStatistics() const override {
6410     STATS_DECLTRACK_FNRET_ATTR(value_simplify)
6411   }
6412 };
6413 
6414 struct AAValueSimplifyFloating : AAValueSimplifyImpl {
6415   AAValueSimplifyFloating(const IRPosition &IRP, Attributor &A)
6416       : AAValueSimplifyImpl(IRP, A) {}
6417 
6418   /// See AbstractAttribute::initialize(...).
6419   void initialize(Attributor &A) override {
6420     AAValueSimplifyImpl::initialize(A);
6421     Value &V = getAnchorValue();
6422 
6423     // TODO: add other stuffs
6424     if (isa<Constant>(V))
6425       indicatePessimisticFixpoint();
6426   }
6427 
6428   /// See AbstractAttribute::updateImpl(...).
6429   ChangeStatus updateImpl(Attributor &A) override {
6430     auto Before = SimplifiedAssociatedValue;
6431     if (!askSimplifiedValueForOtherAAs(A))
6432       return indicatePessimisticFixpoint();
6433 
6434     // If a candidate was found in this update, return CHANGED.
6435     return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED
6436                                                : ChangeStatus ::CHANGED;
6437   }
6438 
6439   /// See AbstractAttribute::trackStatistics()
6440   void trackStatistics() const override {
6441     STATS_DECLTRACK_FLOATING_ATTR(value_simplify)
6442   }
6443 };
6444 
6445 struct AAValueSimplifyFunction : AAValueSimplifyImpl {
6446   AAValueSimplifyFunction(const IRPosition &IRP, Attributor &A)
6447       : AAValueSimplifyImpl(IRP, A) {}
6448 
6449   /// See AbstractAttribute::initialize(...).
6450   void initialize(Attributor &A) override {
6451     SimplifiedAssociatedValue = nullptr;
6452     indicateOptimisticFixpoint();
6453   }
6454   /// See AbstractAttribute::initialize(...).
6455   ChangeStatus updateImpl(Attributor &A) override {
6456     llvm_unreachable(
6457         "AAValueSimplify(Function|CallSite)::updateImpl will not be called");
6458   }
6459   /// See AbstractAttribute::trackStatistics()
6460   void trackStatistics() const override {
6461     STATS_DECLTRACK_FN_ATTR(value_simplify)
6462   }
6463 };
6464 
6465 struct AAValueSimplifyCallSite : AAValueSimplifyFunction {
6466   AAValueSimplifyCallSite(const IRPosition &IRP, Attributor &A)
6467       : AAValueSimplifyFunction(IRP, A) {}
6468   /// See AbstractAttribute::trackStatistics()
6469   void trackStatistics() const override {
6470     STATS_DECLTRACK_CS_ATTR(value_simplify)
6471   }
6472 };
6473 
6474 struct AAValueSimplifyCallSiteReturned : AAValueSimplifyImpl {
6475   AAValueSimplifyCallSiteReturned(const IRPosition &IRP, Attributor &A)
6476       : AAValueSimplifyImpl(IRP, A) {}
6477 
6478   void initialize(Attributor &A) override {
6479     AAValueSimplifyImpl::initialize(A);
6480     Function *Fn = getAssociatedFunction();
6481     assert(Fn && "Did expect an associted function");
6482     for (Argument &Arg : Fn->args()) {
6483       if (Arg.hasReturnedAttr()) {
6484         auto IRP = IRPosition::callsite_argument(*cast<CallBase>(getCtxI()),
6485                                                  Arg.getArgNo());
6486         if (IRP.getPositionKind() == IRPosition::IRP_CALL_SITE_ARGUMENT &&
6487             checkAndUpdate(A, *this, IRP))
6488           indicateOptimisticFixpoint();
6489         else
6490           indicatePessimisticFixpoint();
6491         return;
6492       }
6493     }
6494   }
6495 
6496   /// See AbstractAttribute::updateImpl(...).
6497   ChangeStatus updateImpl(Attributor &A) override {
6498     return indicatePessimisticFixpoint();
6499   }
6500 
6501   void trackStatistics() const override {
6502     STATS_DECLTRACK_CSRET_ATTR(value_simplify)
6503   }
6504 };
6505 
6506 struct AAValueSimplifyCallSiteArgument : AAValueSimplifyFloating {
6507   AAValueSimplifyCallSiteArgument(const IRPosition &IRP, Attributor &A)
6508       : AAValueSimplifyFloating(IRP, A) {}
6509 
6510   /// See AbstractAttribute::manifest(...).
6511   ChangeStatus manifest(Attributor &A) override {
6512     ChangeStatus Changed = ChangeStatus::UNCHANGED;
6513     // TODO: We should avoid simplification duplication to begin with.
6514     auto *FloatAA = A.lookupAAFor<AAValueSimplify>(
6515         IRPosition::value(getAssociatedValue()), this, DepClassTy::NONE);
6516     if (FloatAA && FloatAA->getState().isValidState())
6517       return Changed;
6518 
6519     if (auto *NewV = manifestReplacementValue(A, getCtxI())) {
6520       Use &U = cast<CallBase>(&getAnchorValue())
6521                    ->getArgOperandUse(getCallSiteArgNo());
6522       if (A.changeUseAfterManifest(U, *NewV))
6523         Changed = ChangeStatus::CHANGED;
6524     }
6525 
6526     return Changed | AAValueSimplify::manifest(A);
6527   }
6528 
6529   void trackStatistics() const override {
6530     STATS_DECLTRACK_CSARG_ATTR(value_simplify)
6531   }
6532 };
6533 } // namespace
6534 
6535 /// ----------------------- Heap-To-Stack Conversion ---------------------------
6536 namespace {
6537 struct AAHeapToStackFunction final : public AAHeapToStack {
6538 
6539   struct AllocationInfo {
6540     /// The call that allocates the memory.
6541     CallBase *const CB;
6542 
6543     /// The library function id for the allocation.
6544     LibFunc LibraryFunctionId = NotLibFunc;
6545 
6546     /// The status wrt. a rewrite.
6547     enum {
6548       STACK_DUE_TO_USE,
6549       STACK_DUE_TO_FREE,
6550       INVALID,
6551     } Status = STACK_DUE_TO_USE;
6552 
6553     /// Flag to indicate if we encountered a use that might free this allocation
6554     /// but which is not in the deallocation infos.
6555     bool HasPotentiallyFreeingUnknownUses = false;
6556 
6557     /// Flag to indicate that we should place the new alloca in the function
6558     /// entry block rather than where the call site (CB) is.
6559     bool MoveAllocaIntoEntry = true;
6560 
6561     /// The set of free calls that use this allocation.
6562     SmallSetVector<CallBase *, 1> PotentialFreeCalls{};
6563   };
6564 
6565   struct DeallocationInfo {
6566     /// The call that deallocates the memory.
6567     CallBase *const CB;
6568     /// The value freed by the call.
6569     Value *FreedOp;
6570 
6571     /// Flag to indicate if we don't know all objects this deallocation might
6572     /// free.
6573     bool MightFreeUnknownObjects = false;
6574 
6575     /// The set of allocation calls that are potentially freed.
6576     SmallSetVector<CallBase *, 1> PotentialAllocationCalls{};
6577   };
6578 
6579   AAHeapToStackFunction(const IRPosition &IRP, Attributor &A)
6580       : AAHeapToStack(IRP, A) {}
6581 
6582   ~AAHeapToStackFunction() {
6583     // Ensure we call the destructor so we release any memory allocated in the
6584     // sets.
6585     for (auto &It : AllocationInfos)
6586       It.second->~AllocationInfo();
6587     for (auto &It : DeallocationInfos)
6588       It.second->~DeallocationInfo();
6589   }
6590 
6591   void initialize(Attributor &A) override {
6592     AAHeapToStack::initialize(A);
6593 
6594     const Function *F = getAnchorScope();
6595     const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
6596 
6597     auto AllocationIdentifierCB = [&](Instruction &I) {
6598       CallBase *CB = dyn_cast<CallBase>(&I);
6599       if (!CB)
6600         return true;
6601       if (Value *FreedOp = getFreedOperand(CB, TLI)) {
6602         DeallocationInfos[CB] = new (A.Allocator) DeallocationInfo{CB, FreedOp};
6603         return true;
6604       }
6605       // To do heap to stack, we need to know that the allocation itself is
6606       // removable once uses are rewritten, and that we can initialize the
6607       // alloca to the same pattern as the original allocation result.
6608       if (isRemovableAlloc(CB, TLI)) {
6609         auto *I8Ty = Type::getInt8Ty(CB->getParent()->getContext());
6610         if (nullptr != getInitialValueOfAllocation(CB, TLI, I8Ty)) {
6611           AllocationInfo *AI = new (A.Allocator) AllocationInfo{CB};
6612           AllocationInfos[CB] = AI;
6613           if (TLI)
6614             TLI->getLibFunc(*CB, AI->LibraryFunctionId);
6615         }
6616       }
6617       return true;
6618     };
6619 
6620     bool UsedAssumedInformation = false;
6621     bool Success = A.checkForAllCallLikeInstructions(
6622         AllocationIdentifierCB, *this, UsedAssumedInformation,
6623         /* CheckBBLivenessOnly */ false,
6624         /* CheckPotentiallyDead */ true);
6625     (void)Success;
6626     assert(Success && "Did not expect the call base visit callback to fail!");
6627 
6628     Attributor::SimplifictionCallbackTy SCB =
6629         [](const IRPosition &, const AbstractAttribute *,
6630            bool &) -> std::optional<Value *> { return nullptr; };
6631     for (const auto &It : AllocationInfos)
6632       A.registerSimplificationCallback(IRPosition::callsite_returned(*It.first),
6633                                        SCB);
6634     for (const auto &It : DeallocationInfos)
6635       A.registerSimplificationCallback(IRPosition::callsite_returned(*It.first),
6636                                        SCB);
6637   }
6638 
6639   const std::string getAsStr(Attributor *A) const override {
6640     unsigned NumH2SMallocs = 0, NumInvalidMallocs = 0;
6641     for (const auto &It : AllocationInfos) {
6642       if (It.second->Status == AllocationInfo::INVALID)
6643         ++NumInvalidMallocs;
6644       else
6645         ++NumH2SMallocs;
6646     }
6647     return "[H2S] Mallocs Good/Bad: " + std::to_string(NumH2SMallocs) + "/" +
6648            std::to_string(NumInvalidMallocs);
6649   }
6650 
6651   /// See AbstractAttribute::trackStatistics().
6652   void trackStatistics() const override {
6653     STATS_DECL(
6654         MallocCalls, Function,
6655         "Number of malloc/calloc/aligned_alloc calls converted to allocas");
6656     for (const auto &It : AllocationInfos)
6657       if (It.second->Status != AllocationInfo::INVALID)
6658         ++BUILD_STAT_NAME(MallocCalls, Function);
6659   }
6660 
6661   bool isAssumedHeapToStack(const CallBase &CB) const override {
6662     if (isValidState())
6663       if (AllocationInfo *AI =
6664               AllocationInfos.lookup(const_cast<CallBase *>(&CB)))
6665         return AI->Status != AllocationInfo::INVALID;
6666     return false;
6667   }
6668 
6669   bool isAssumedHeapToStackRemovedFree(CallBase &CB) const override {
6670     if (!isValidState())
6671       return false;
6672 
6673     for (const auto &It : AllocationInfos) {
6674       AllocationInfo &AI = *It.second;
6675       if (AI.Status == AllocationInfo::INVALID)
6676         continue;
6677 
6678       if (AI.PotentialFreeCalls.count(&CB))
6679         return true;
6680     }
6681 
6682     return false;
6683   }
6684 
6685   ChangeStatus manifest(Attributor &A) override {
6686     assert(getState().isValidState() &&
6687            "Attempted to manifest an invalid state!");
6688 
6689     ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
6690     Function *F = getAnchorScope();
6691     const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
6692 
6693     for (auto &It : AllocationInfos) {
6694       AllocationInfo &AI = *It.second;
6695       if (AI.Status == AllocationInfo::INVALID)
6696         continue;
6697 
6698       for (CallBase *FreeCall : AI.PotentialFreeCalls) {
6699         LLVM_DEBUG(dbgs() << "H2S: Removing free call: " << *FreeCall << "\n");
6700         A.deleteAfterManifest(*FreeCall);
6701         HasChanged = ChangeStatus::CHANGED;
6702       }
6703 
6704       LLVM_DEBUG(dbgs() << "H2S: Removing malloc-like call: " << *AI.CB
6705                         << "\n");
6706 
6707       auto Remark = [&](OptimizationRemark OR) {
6708         LibFunc IsAllocShared;
6709         if (TLI->getLibFunc(*AI.CB, IsAllocShared))
6710           if (IsAllocShared == LibFunc___kmpc_alloc_shared)
6711             return OR << "Moving globalized variable to the stack.";
6712         return OR << "Moving memory allocation from the heap to the stack.";
6713       };
6714       if (AI.LibraryFunctionId == LibFunc___kmpc_alloc_shared)
6715         A.emitRemark<OptimizationRemark>(AI.CB, "OMP110", Remark);
6716       else
6717         A.emitRemark<OptimizationRemark>(AI.CB, "HeapToStack", Remark);
6718 
6719       const DataLayout &DL = A.getInfoCache().getDL();
6720       Value *Size;
6721       std::optional<APInt> SizeAPI = getSize(A, *this, AI);
6722       if (SizeAPI) {
6723         Size = ConstantInt::get(AI.CB->getContext(), *SizeAPI);
6724       } else {
6725         LLVMContext &Ctx = AI.CB->getContext();
6726         ObjectSizeOpts Opts;
6727         ObjectSizeOffsetEvaluator Eval(DL, TLI, Ctx, Opts);
6728         SizeOffsetEvalType SizeOffsetPair = Eval.compute(AI.CB);
6729         assert(SizeOffsetPair != ObjectSizeOffsetEvaluator::unknown() &&
6730                cast<ConstantInt>(SizeOffsetPair.second)->isZero());
6731         Size = SizeOffsetPair.first;
6732       }
6733 
6734       Instruction *IP =
6735           AI.MoveAllocaIntoEntry ? &F->getEntryBlock().front() : AI.CB;
6736 
6737       Align Alignment(1);
6738       if (MaybeAlign RetAlign = AI.CB->getRetAlign())
6739         Alignment = std::max(Alignment, *RetAlign);
6740       if (Value *Align = getAllocAlignment(AI.CB, TLI)) {
6741         std::optional<APInt> AlignmentAPI = getAPInt(A, *this, *Align);
6742         assert(AlignmentAPI && AlignmentAPI->getZExtValue() > 0 &&
6743                "Expected an alignment during manifest!");
6744         Alignment =
6745             std::max(Alignment, assumeAligned(AlignmentAPI->getZExtValue()));
6746       }
6747 
6748       // TODO: Hoist the alloca towards the function entry.
6749       unsigned AS = DL.getAllocaAddrSpace();
6750       Instruction *Alloca =
6751           new AllocaInst(Type::getInt8Ty(F->getContext()), AS, Size, Alignment,
6752                          AI.CB->getName() + ".h2s", IP);
6753 
6754       if (Alloca->getType() != AI.CB->getType())
6755         Alloca = BitCastInst::CreatePointerBitCastOrAddrSpaceCast(
6756             Alloca, AI.CB->getType(), "malloc_cast", AI.CB);
6757 
6758       auto *I8Ty = Type::getInt8Ty(F->getContext());
6759       auto *InitVal = getInitialValueOfAllocation(AI.CB, TLI, I8Ty);
6760       assert(InitVal &&
6761              "Must be able to materialize initial memory state of allocation");
6762 
6763       A.changeAfterManifest(IRPosition::inst(*AI.CB), *Alloca);
6764 
6765       if (auto *II = dyn_cast<InvokeInst>(AI.CB)) {
6766         auto *NBB = II->getNormalDest();
6767         BranchInst::Create(NBB, AI.CB->getParent());
6768         A.deleteAfterManifest(*AI.CB);
6769       } else {
6770         A.deleteAfterManifest(*AI.CB);
6771       }
6772 
6773       // Initialize the alloca with the same value as used by the allocation
6774       // function.  We can skip undef as the initial value of an alloc is
6775       // undef, and the memset would simply end up being DSEd.
6776       if (!isa<UndefValue>(InitVal)) {
6777         IRBuilder<> Builder(Alloca->getNextNode());
6778         // TODO: Use alignment above if align!=1
6779         Builder.CreateMemSet(Alloca, InitVal, Size, std::nullopt);
6780       }
6781       HasChanged = ChangeStatus::CHANGED;
6782     }
6783 
6784     return HasChanged;
6785   }
6786 
6787   std::optional<APInt> getAPInt(Attributor &A, const AbstractAttribute &AA,
6788                                 Value &V) {
6789     bool UsedAssumedInformation = false;
6790     std::optional<Constant *> SimpleV =
6791         A.getAssumedConstant(V, AA, UsedAssumedInformation);
6792     if (!SimpleV)
6793       return APInt(64, 0);
6794     if (auto *CI = dyn_cast_or_null<ConstantInt>(*SimpleV))
6795       return CI->getValue();
6796     return std::nullopt;
6797   }
6798 
6799   std::optional<APInt> getSize(Attributor &A, const AbstractAttribute &AA,
6800                                AllocationInfo &AI) {
6801     auto Mapper = [&](const Value *V) -> const Value * {
6802       bool UsedAssumedInformation = false;
6803       if (std::optional<Constant *> SimpleV =
6804               A.getAssumedConstant(*V, AA, UsedAssumedInformation))
6805         if (*SimpleV)
6806           return *SimpleV;
6807       return V;
6808     };
6809 
6810     const Function *F = getAnchorScope();
6811     const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
6812     return getAllocSize(AI.CB, TLI, Mapper);
6813   }
6814 
6815   /// Collection of all malloc-like calls in a function with associated
6816   /// information.
6817   MapVector<CallBase *, AllocationInfo *> AllocationInfos;
6818 
6819   /// Collection of all free-like calls in a function with associated
6820   /// information.
6821   MapVector<CallBase *, DeallocationInfo *> DeallocationInfos;
6822 
6823   ChangeStatus updateImpl(Attributor &A) override;
6824 };
6825 
6826 ChangeStatus AAHeapToStackFunction::updateImpl(Attributor &A) {
6827   ChangeStatus Changed = ChangeStatus::UNCHANGED;
6828   const Function *F = getAnchorScope();
6829   const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
6830 
6831   const auto *LivenessAA =
6832       A.getAAFor<AAIsDead>(*this, IRPosition::function(*F), DepClassTy::NONE);
6833 
6834   MustBeExecutedContextExplorer *Explorer =
6835       A.getInfoCache().getMustBeExecutedContextExplorer();
6836 
6837   bool StackIsAccessibleByOtherThreads =
6838       A.getInfoCache().stackIsAccessibleByOtherThreads();
6839 
6840   LoopInfo *LI =
6841       A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(*F);
6842   std::optional<bool> MayContainIrreducibleControl;
6843   auto IsInLoop = [&](BasicBlock &BB) {
6844     if (&F->getEntryBlock() == &BB)
6845       return false;
6846     if (!MayContainIrreducibleControl.has_value())
6847       MayContainIrreducibleControl = mayContainIrreducibleControl(*F, LI);
6848     if (*MayContainIrreducibleControl)
6849       return true;
6850     if (!LI)
6851       return true;
6852     return LI->getLoopFor(&BB) != nullptr;
6853   };
6854 
6855   // Flag to ensure we update our deallocation information at most once per
6856   // updateImpl call and only if we use the free check reasoning.
6857   bool HasUpdatedFrees = false;
6858 
6859   auto UpdateFrees = [&]() {
6860     HasUpdatedFrees = true;
6861 
6862     for (auto &It : DeallocationInfos) {
6863       DeallocationInfo &DI = *It.second;
6864       // For now we cannot use deallocations that have unknown inputs, skip
6865       // them.
6866       if (DI.MightFreeUnknownObjects)
6867         continue;
6868 
6869       // No need to analyze dead calls, ignore them instead.
6870       bool UsedAssumedInformation = false;
6871       if (A.isAssumedDead(*DI.CB, this, LivenessAA, UsedAssumedInformation,
6872                           /* CheckBBLivenessOnly */ true))
6873         continue;
6874 
6875       // Use the non-optimistic version to get the freed object.
6876       Value *Obj = getUnderlyingObject(DI.FreedOp);
6877       if (!Obj) {
6878         LLVM_DEBUG(dbgs() << "[H2S] Unknown underlying object for free!\n");
6879         DI.MightFreeUnknownObjects = true;
6880         continue;
6881       }
6882 
6883       // Free of null and undef can be ignored as no-ops (or UB in the latter
6884       // case).
6885       if (isa<ConstantPointerNull>(Obj) || isa<UndefValue>(Obj))
6886         continue;
6887 
6888       CallBase *ObjCB = dyn_cast<CallBase>(Obj);
6889       if (!ObjCB) {
6890         LLVM_DEBUG(dbgs() << "[H2S] Free of a non-call object: " << *Obj
6891                           << "\n");
6892         DI.MightFreeUnknownObjects = true;
6893         continue;
6894       }
6895 
6896       AllocationInfo *AI = AllocationInfos.lookup(ObjCB);
6897       if (!AI) {
6898         LLVM_DEBUG(dbgs() << "[H2S] Free of a non-allocation object: " << *Obj
6899                           << "\n");
6900         DI.MightFreeUnknownObjects = true;
6901         continue;
6902       }
6903 
6904       DI.PotentialAllocationCalls.insert(ObjCB);
6905     }
6906   };
6907 
6908   auto FreeCheck = [&](AllocationInfo &AI) {
6909     // If the stack is not accessible by other threads, the "must-free" logic
6910     // doesn't apply as the pointer could be shared and needs to be places in
6911     // "shareable" memory.
6912     if (!StackIsAccessibleByOtherThreads) {
6913       bool IsKnownNoSycn;
6914       if (!AA::hasAssumedIRAttr<Attribute::NoSync>(
6915               A, this, getIRPosition(), DepClassTy::OPTIONAL, IsKnownNoSycn)) {
6916         LLVM_DEBUG(
6917             dbgs() << "[H2S] found an escaping use, stack is not accessible by "
6918                       "other threads and function is not nosync:\n");
6919         return false;
6920       }
6921     }
6922     if (!HasUpdatedFrees)
6923       UpdateFrees();
6924 
6925     // TODO: Allow multi exit functions that have different free calls.
6926     if (AI.PotentialFreeCalls.size() != 1) {
6927       LLVM_DEBUG(dbgs() << "[H2S] did not find one free call but "
6928                         << AI.PotentialFreeCalls.size() << "\n");
6929       return false;
6930     }
6931     CallBase *UniqueFree = *AI.PotentialFreeCalls.begin();
6932     DeallocationInfo *DI = DeallocationInfos.lookup(UniqueFree);
6933     if (!DI) {
6934       LLVM_DEBUG(
6935           dbgs() << "[H2S] unique free call was not known as deallocation call "
6936                  << *UniqueFree << "\n");
6937       return false;
6938     }
6939     if (DI->MightFreeUnknownObjects) {
6940       LLVM_DEBUG(
6941           dbgs() << "[H2S] unique free call might free unknown allocations\n");
6942       return false;
6943     }
6944     if (DI->PotentialAllocationCalls.empty())
6945       return true;
6946     if (DI->PotentialAllocationCalls.size() > 1) {
6947       LLVM_DEBUG(dbgs() << "[H2S] unique free call might free "
6948                         << DI->PotentialAllocationCalls.size()
6949                         << " different allocations\n");
6950       return false;
6951     }
6952     if (*DI->PotentialAllocationCalls.begin() != AI.CB) {
6953       LLVM_DEBUG(
6954           dbgs()
6955           << "[H2S] unique free call not known to free this allocation but "
6956           << **DI->PotentialAllocationCalls.begin() << "\n");
6957       return false;
6958     }
6959 
6960     // __kmpc_alloc_shared and __kmpc_alloc_free are by construction matched.
6961     if (AI.LibraryFunctionId != LibFunc___kmpc_alloc_shared) {
6962       Instruction *CtxI = isa<InvokeInst>(AI.CB) ? AI.CB : AI.CB->getNextNode();
6963       if (!Explorer || !Explorer->findInContextOf(UniqueFree, CtxI)) {
6964         LLVM_DEBUG(
6965             dbgs()
6966             << "[H2S] unique free call might not be executed with the allocation "
6967             << *UniqueFree << "\n");
6968         return false;
6969       }
6970     }
6971     return true;
6972   };
6973 
6974   auto UsesCheck = [&](AllocationInfo &AI) {
6975     bool ValidUsesOnly = true;
6976 
6977     auto Pred = [&](const Use &U, bool &Follow) -> bool {
6978       Instruction *UserI = cast<Instruction>(U.getUser());
6979       if (isa<LoadInst>(UserI))
6980         return true;
6981       if (auto *SI = dyn_cast<StoreInst>(UserI)) {
6982         if (SI->getValueOperand() == U.get()) {
6983           LLVM_DEBUG(dbgs()
6984                      << "[H2S] escaping store to memory: " << *UserI << "\n");
6985           ValidUsesOnly = false;
6986         } else {
6987           // A store into the malloc'ed memory is fine.
6988         }
6989         return true;
6990       }
6991       if (auto *CB = dyn_cast<CallBase>(UserI)) {
6992         if (!CB->isArgOperand(&U) || CB->isLifetimeStartOrEnd())
6993           return true;
6994         if (DeallocationInfos.count(CB)) {
6995           AI.PotentialFreeCalls.insert(CB);
6996           return true;
6997         }
6998 
6999         unsigned ArgNo = CB->getArgOperandNo(&U);
7000         auto CBIRP = IRPosition::callsite_argument(*CB, ArgNo);
7001 
7002         bool IsKnownNoCapture;
7003         bool IsAssumedNoCapture = AA::hasAssumedIRAttr<Attribute::NoCapture>(
7004             A, this, CBIRP, DepClassTy::OPTIONAL, IsKnownNoCapture);
7005 
7006         // If a call site argument use is nofree, we are fine.
7007         bool IsKnownNoFree;
7008         bool IsAssumedNoFree = AA::hasAssumedIRAttr<Attribute::NoFree>(
7009             A, this, CBIRP, DepClassTy::OPTIONAL, IsKnownNoFree);
7010 
7011         if (!IsAssumedNoCapture ||
7012             (AI.LibraryFunctionId != LibFunc___kmpc_alloc_shared &&
7013              !IsAssumedNoFree)) {
7014           AI.HasPotentiallyFreeingUnknownUses |= !IsAssumedNoFree;
7015 
7016           // Emit a missed remark if this is missed OpenMP globalization.
7017           auto Remark = [&](OptimizationRemarkMissed ORM) {
7018             return ORM
7019                    << "Could not move globalized variable to the stack. "
7020                       "Variable is potentially captured in call. Mark "
7021                       "parameter as `__attribute__((noescape))` to override.";
7022           };
7023 
7024           if (ValidUsesOnly &&
7025               AI.LibraryFunctionId == LibFunc___kmpc_alloc_shared)
7026             A.emitRemark<OptimizationRemarkMissed>(CB, "OMP113", Remark);
7027 
7028           LLVM_DEBUG(dbgs() << "[H2S] Bad user: " << *UserI << "\n");
7029           ValidUsesOnly = false;
7030         }
7031         return true;
7032       }
7033 
7034       if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) ||
7035           isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
7036         Follow = true;
7037         return true;
7038       }
7039       // Unknown user for which we can not track uses further (in a way that
7040       // makes sense).
7041       LLVM_DEBUG(dbgs() << "[H2S] Unknown user: " << *UserI << "\n");
7042       ValidUsesOnly = false;
7043       return true;
7044     };
7045     if (!A.checkForAllUses(Pred, *this, *AI.CB, /* CheckBBLivenessOnly */ false,
7046                            DepClassTy::OPTIONAL, /* IgnoreDroppableUses */ true,
7047                            [&](const Use &OldU, const Use &NewU) {
7048                              auto *SI = dyn_cast<StoreInst>(OldU.getUser());
7049                              return !SI || StackIsAccessibleByOtherThreads ||
7050                                     AA::isAssumedThreadLocalObject(
7051                                         A, *SI->getPointerOperand(), *this);
7052                            }))
7053       return false;
7054     return ValidUsesOnly;
7055   };
7056 
7057   // The actual update starts here. We look at all allocations and depending on
7058   // their status perform the appropriate check(s).
7059   for (auto &It : AllocationInfos) {
7060     AllocationInfo &AI = *It.second;
7061     if (AI.Status == AllocationInfo::INVALID)
7062       continue;
7063 
7064     if (Value *Align = getAllocAlignment(AI.CB, TLI)) {
7065       std::optional<APInt> APAlign = getAPInt(A, *this, *Align);
7066       if (!APAlign) {
7067         // Can't generate an alloca which respects the required alignment
7068         // on the allocation.
7069         LLVM_DEBUG(dbgs() << "[H2S] Unknown allocation alignment: " << *AI.CB
7070                           << "\n");
7071         AI.Status = AllocationInfo::INVALID;
7072         Changed = ChangeStatus::CHANGED;
7073         continue;
7074       }
7075       if (APAlign->ugt(llvm::Value::MaximumAlignment) ||
7076           !APAlign->isPowerOf2()) {
7077         LLVM_DEBUG(dbgs() << "[H2S] Invalid allocation alignment: " << APAlign
7078                           << "\n");
7079         AI.Status = AllocationInfo::INVALID;
7080         Changed = ChangeStatus::CHANGED;
7081         continue;
7082       }
7083     }
7084 
7085     std::optional<APInt> Size = getSize(A, *this, AI);
7086     if (AI.LibraryFunctionId != LibFunc___kmpc_alloc_shared &&
7087         MaxHeapToStackSize != -1) {
7088       if (!Size || Size->ugt(MaxHeapToStackSize)) {
7089         LLVM_DEBUG({
7090           if (!Size)
7091             dbgs() << "[H2S] Unknown allocation size: " << *AI.CB << "\n";
7092           else
7093             dbgs() << "[H2S] Allocation size too large: " << *AI.CB << " vs. "
7094                    << MaxHeapToStackSize << "\n";
7095         });
7096 
7097         AI.Status = AllocationInfo::INVALID;
7098         Changed = ChangeStatus::CHANGED;
7099         continue;
7100       }
7101     }
7102 
7103     switch (AI.Status) {
7104     case AllocationInfo::STACK_DUE_TO_USE:
7105       if (UsesCheck(AI))
7106         break;
7107       AI.Status = AllocationInfo::STACK_DUE_TO_FREE;
7108       [[fallthrough]];
7109     case AllocationInfo::STACK_DUE_TO_FREE:
7110       if (FreeCheck(AI))
7111         break;
7112       AI.Status = AllocationInfo::INVALID;
7113       Changed = ChangeStatus::CHANGED;
7114       break;
7115     case AllocationInfo::INVALID:
7116       llvm_unreachable("Invalid allocations should never reach this point!");
7117     };
7118 
7119     // Check if we still think we can move it into the entry block. If the
7120     // alloca comes from a converted __kmpc_alloc_shared then we can usually
7121     // ignore the potential compilations associated with loops.
7122     bool IsGlobalizedLocal =
7123         AI.LibraryFunctionId == LibFunc___kmpc_alloc_shared;
7124     if (AI.MoveAllocaIntoEntry &&
7125         (!Size.has_value() ||
7126          (!IsGlobalizedLocal && IsInLoop(*AI.CB->getParent()))))
7127       AI.MoveAllocaIntoEntry = false;
7128   }
7129 
7130   return Changed;
7131 }
7132 } // namespace
7133 
7134 /// ----------------------- Privatizable Pointers ------------------------------
7135 namespace {
7136 struct AAPrivatizablePtrImpl : public AAPrivatizablePtr {
7137   AAPrivatizablePtrImpl(const IRPosition &IRP, Attributor &A)
7138       : AAPrivatizablePtr(IRP, A), PrivatizableType(std::nullopt) {}
7139 
7140   ChangeStatus indicatePessimisticFixpoint() override {
7141     AAPrivatizablePtr::indicatePessimisticFixpoint();
7142     PrivatizableType = nullptr;
7143     return ChangeStatus::CHANGED;
7144   }
7145 
7146   /// Identify the type we can chose for a private copy of the underlying
7147   /// argument. std::nullopt means it is not clear yet, nullptr means there is
7148   /// none.
7149   virtual std::optional<Type *> identifyPrivatizableType(Attributor &A) = 0;
7150 
7151   /// Return a privatizable type that encloses both T0 and T1.
7152   /// TODO: This is merely a stub for now as we should manage a mapping as well.
7153   std::optional<Type *> combineTypes(std::optional<Type *> T0,
7154                                      std::optional<Type *> T1) {
7155     if (!T0)
7156       return T1;
7157     if (!T1)
7158       return T0;
7159     if (T0 == T1)
7160       return T0;
7161     return nullptr;
7162   }
7163 
7164   std::optional<Type *> getPrivatizableType() const override {
7165     return PrivatizableType;
7166   }
7167 
7168   const std::string getAsStr(Attributor *A) const override {
7169     return isAssumedPrivatizablePtr() ? "[priv]" : "[no-priv]";
7170   }
7171 
7172 protected:
7173   std::optional<Type *> PrivatizableType;
7174 };
7175 
7176 // TODO: Do this for call site arguments (probably also other values) as well.
7177 
7178 struct AAPrivatizablePtrArgument final : public AAPrivatizablePtrImpl {
7179   AAPrivatizablePtrArgument(const IRPosition &IRP, Attributor &A)
7180       : AAPrivatizablePtrImpl(IRP, A) {}
7181 
7182   /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...)
7183   std::optional<Type *> identifyPrivatizableType(Attributor &A) override {
7184     // If this is a byval argument and we know all the call sites (so we can
7185     // rewrite them), there is no need to check them explicitly.
7186     bool UsedAssumedInformation = false;
7187     SmallVector<Attribute, 1> Attrs;
7188     A.getAttrs(getIRPosition(), {Attribute::ByVal}, Attrs,
7189                /* IgnoreSubsumingPositions */ true);
7190     if (!Attrs.empty() &&
7191         A.checkForAllCallSites([](AbstractCallSite ACS) { return true; }, *this,
7192                                true, UsedAssumedInformation))
7193       return Attrs[0].getValueAsType();
7194 
7195     std::optional<Type *> Ty;
7196     unsigned ArgNo = getIRPosition().getCallSiteArgNo();
7197 
7198     // Make sure the associated call site argument has the same type at all call
7199     // sites and it is an allocation we know is safe to privatize, for now that
7200     // means we only allow alloca instructions.
7201     // TODO: We can additionally analyze the accesses in the callee to  create
7202     //       the type from that information instead. That is a little more
7203     //       involved and will be done in a follow up patch.
7204     auto CallSiteCheck = [&](AbstractCallSite ACS) {
7205       IRPosition ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo);
7206       // Check if a coresponding argument was found or if it is one not
7207       // associated (which can happen for callback calls).
7208       if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
7209         return false;
7210 
7211       // Check that all call sites agree on a type.
7212       auto *PrivCSArgAA =
7213           A.getAAFor<AAPrivatizablePtr>(*this, ACSArgPos, DepClassTy::REQUIRED);
7214       if (!PrivCSArgAA)
7215         return false;
7216       std::optional<Type *> CSTy = PrivCSArgAA->getPrivatizableType();
7217 
7218       LLVM_DEBUG({
7219         dbgs() << "[AAPrivatizablePtr] ACSPos: " << ACSArgPos << ", CSTy: ";
7220         if (CSTy && *CSTy)
7221           (*CSTy)->print(dbgs());
7222         else if (CSTy)
7223           dbgs() << "<nullptr>";
7224         else
7225           dbgs() << "<none>";
7226       });
7227 
7228       Ty = combineTypes(Ty, CSTy);
7229 
7230       LLVM_DEBUG({
7231         dbgs() << " : New Type: ";
7232         if (Ty && *Ty)
7233           (*Ty)->print(dbgs());
7234         else if (Ty)
7235           dbgs() << "<nullptr>";
7236         else
7237           dbgs() << "<none>";
7238         dbgs() << "\n";
7239       });
7240 
7241       return !Ty || *Ty;
7242     };
7243 
7244     if (!A.checkForAllCallSites(CallSiteCheck, *this, true,
7245                                 UsedAssumedInformation))
7246       return nullptr;
7247     return Ty;
7248   }
7249 
7250   /// See AbstractAttribute::updateImpl(...).
7251   ChangeStatus updateImpl(Attributor &A) override {
7252     PrivatizableType = identifyPrivatizableType(A);
7253     if (!PrivatizableType)
7254       return ChangeStatus::UNCHANGED;
7255     if (!*PrivatizableType)
7256       return indicatePessimisticFixpoint();
7257 
7258     // The dependence is optional so we don't give up once we give up on the
7259     // alignment.
7260     A.getAAFor<AAAlign>(*this, IRPosition::value(getAssociatedValue()),
7261                         DepClassTy::OPTIONAL);
7262 
7263     // Avoid arguments with padding for now.
7264     if (!A.hasAttr(getIRPosition(), Attribute::ByVal) &&
7265         !isDenselyPacked(*PrivatizableType, A.getInfoCache().getDL())) {
7266       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Padding detected\n");
7267       return indicatePessimisticFixpoint();
7268     }
7269 
7270     // Collect the types that will replace the privatizable type in the function
7271     // signature.
7272     SmallVector<Type *, 16> ReplacementTypes;
7273     identifyReplacementTypes(*PrivatizableType, ReplacementTypes);
7274 
7275     // Verify callee and caller agree on how the promoted argument would be
7276     // passed.
7277     Function &Fn = *getIRPosition().getAnchorScope();
7278     const auto *TTI =
7279         A.getInfoCache().getAnalysisResultForFunction<TargetIRAnalysis>(Fn);
7280     if (!TTI) {
7281       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Missing TTI for function "
7282                         << Fn.getName() << "\n");
7283       return indicatePessimisticFixpoint();
7284     }
7285 
7286     auto CallSiteCheck = [&](AbstractCallSite ACS) {
7287       CallBase *CB = ACS.getInstruction();
7288       return TTI->areTypesABICompatible(
7289           CB->getCaller(),
7290           dyn_cast_if_present<Function>(CB->getCalledOperand()),
7291           ReplacementTypes);
7292     };
7293     bool UsedAssumedInformation = false;
7294     if (!A.checkForAllCallSites(CallSiteCheck, *this, true,
7295                                 UsedAssumedInformation)) {
7296       LLVM_DEBUG(
7297           dbgs() << "[AAPrivatizablePtr] ABI incompatibility detected for "
7298                  << Fn.getName() << "\n");
7299       return indicatePessimisticFixpoint();
7300     }
7301 
7302     // Register a rewrite of the argument.
7303     Argument *Arg = getAssociatedArgument();
7304     if (!A.isValidFunctionSignatureRewrite(*Arg, ReplacementTypes)) {
7305       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Rewrite not valid\n");
7306       return indicatePessimisticFixpoint();
7307     }
7308 
7309     unsigned ArgNo = Arg->getArgNo();
7310 
7311     // Helper to check if for the given call site the associated argument is
7312     // passed to a callback where the privatization would be different.
7313     auto IsCompatiblePrivArgOfCallback = [&](CallBase &CB) {
7314       SmallVector<const Use *, 4> CallbackUses;
7315       AbstractCallSite::getCallbackUses(CB, CallbackUses);
7316       for (const Use *U : CallbackUses) {
7317         AbstractCallSite CBACS(U);
7318         assert(CBACS && CBACS.isCallbackCall());
7319         for (Argument &CBArg : CBACS.getCalledFunction()->args()) {
7320           int CBArgNo = CBACS.getCallArgOperandNo(CBArg);
7321 
7322           LLVM_DEBUG({
7323             dbgs()
7324                 << "[AAPrivatizablePtr] Argument " << *Arg
7325                 << "check if can be privatized in the context of its parent ("
7326                 << Arg->getParent()->getName()
7327                 << ")\n[AAPrivatizablePtr] because it is an argument in a "
7328                    "callback ("
7329                 << CBArgNo << "@" << CBACS.getCalledFunction()->getName()
7330                 << ")\n[AAPrivatizablePtr] " << CBArg << " : "
7331                 << CBACS.getCallArgOperand(CBArg) << " vs "
7332                 << CB.getArgOperand(ArgNo) << "\n"
7333                 << "[AAPrivatizablePtr] " << CBArg << " : "
7334                 << CBACS.getCallArgOperandNo(CBArg) << " vs " << ArgNo << "\n";
7335           });
7336 
7337           if (CBArgNo != int(ArgNo))
7338             continue;
7339           const auto *CBArgPrivAA = A.getAAFor<AAPrivatizablePtr>(
7340               *this, IRPosition::argument(CBArg), DepClassTy::REQUIRED);
7341           if (CBArgPrivAA && CBArgPrivAA->isValidState()) {
7342             auto CBArgPrivTy = CBArgPrivAA->getPrivatizableType();
7343             if (!CBArgPrivTy)
7344               continue;
7345             if (*CBArgPrivTy == PrivatizableType)
7346               continue;
7347           }
7348 
7349           LLVM_DEBUG({
7350             dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
7351                    << " cannot be privatized in the context of its parent ("
7352                    << Arg->getParent()->getName()
7353                    << ")\n[AAPrivatizablePtr] because it is an argument in a "
7354                       "callback ("
7355                    << CBArgNo << "@" << CBACS.getCalledFunction()->getName()
7356                    << ").\n[AAPrivatizablePtr] for which the argument "
7357                       "privatization is not compatible.\n";
7358           });
7359           return false;
7360         }
7361       }
7362       return true;
7363     };
7364 
7365     // Helper to check if for the given call site the associated argument is
7366     // passed to a direct call where the privatization would be different.
7367     auto IsCompatiblePrivArgOfDirectCS = [&](AbstractCallSite ACS) {
7368       CallBase *DC = cast<CallBase>(ACS.getInstruction());
7369       int DCArgNo = ACS.getCallArgOperandNo(ArgNo);
7370       assert(DCArgNo >= 0 && unsigned(DCArgNo) < DC->arg_size() &&
7371              "Expected a direct call operand for callback call operand");
7372 
7373       Function *DCCallee =
7374           dyn_cast_if_present<Function>(DC->getCalledOperand());
7375       LLVM_DEBUG({
7376         dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
7377                << " check if be privatized in the context of its parent ("
7378                << Arg->getParent()->getName()
7379                << ")\n[AAPrivatizablePtr] because it is an argument in a "
7380                   "direct call of ("
7381                << DCArgNo << "@" << DCCallee->getName() << ").\n";
7382       });
7383 
7384       if (unsigned(DCArgNo) < DCCallee->arg_size()) {
7385         const auto *DCArgPrivAA = A.getAAFor<AAPrivatizablePtr>(
7386             *this, IRPosition::argument(*DCCallee->getArg(DCArgNo)),
7387             DepClassTy::REQUIRED);
7388         if (DCArgPrivAA && DCArgPrivAA->isValidState()) {
7389           auto DCArgPrivTy = DCArgPrivAA->getPrivatizableType();
7390           if (!DCArgPrivTy)
7391             return true;
7392           if (*DCArgPrivTy == PrivatizableType)
7393             return true;
7394         }
7395       }
7396 
7397       LLVM_DEBUG({
7398         dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
7399                << " cannot be privatized in the context of its parent ("
7400                << Arg->getParent()->getName()
7401                << ")\n[AAPrivatizablePtr] because it is an argument in a "
7402                   "direct call of ("
7403                << ACS.getInstruction()->getCalledOperand()->getName()
7404                << ").\n[AAPrivatizablePtr] for which the argument "
7405                   "privatization is not compatible.\n";
7406       });
7407       return false;
7408     };
7409 
7410     // Helper to check if the associated argument is used at the given abstract
7411     // call site in a way that is incompatible with the privatization assumed
7412     // here.
7413     auto IsCompatiblePrivArgOfOtherCallSite = [&](AbstractCallSite ACS) {
7414       if (ACS.isDirectCall())
7415         return IsCompatiblePrivArgOfCallback(*ACS.getInstruction());
7416       if (ACS.isCallbackCall())
7417         return IsCompatiblePrivArgOfDirectCS(ACS);
7418       return false;
7419     };
7420 
7421     if (!A.checkForAllCallSites(IsCompatiblePrivArgOfOtherCallSite, *this, true,
7422                                 UsedAssumedInformation))
7423       return indicatePessimisticFixpoint();
7424 
7425     return ChangeStatus::UNCHANGED;
7426   }
7427 
7428   /// Given a type to private \p PrivType, collect the constituates (which are
7429   /// used) in \p ReplacementTypes.
7430   static void
7431   identifyReplacementTypes(Type *PrivType,
7432                            SmallVectorImpl<Type *> &ReplacementTypes) {
7433     // TODO: For now we expand the privatization type to the fullest which can
7434     //       lead to dead arguments that need to be removed later.
7435     assert(PrivType && "Expected privatizable type!");
7436 
7437     // Traverse the type, extract constituate types on the outermost level.
7438     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
7439       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++)
7440         ReplacementTypes.push_back(PrivStructType->getElementType(u));
7441     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
7442       ReplacementTypes.append(PrivArrayType->getNumElements(),
7443                               PrivArrayType->getElementType());
7444     } else {
7445       ReplacementTypes.push_back(PrivType);
7446     }
7447   }
7448 
7449   /// Initialize \p Base according to the type \p PrivType at position \p IP.
7450   /// The values needed are taken from the arguments of \p F starting at
7451   /// position \p ArgNo.
7452   static void createInitialization(Type *PrivType, Value &Base, Function &F,
7453                                    unsigned ArgNo, Instruction &IP) {
7454     assert(PrivType && "Expected privatizable type!");
7455 
7456     IRBuilder<NoFolder> IRB(&IP);
7457     const DataLayout &DL = F.getParent()->getDataLayout();
7458 
7459     // Traverse the type, build GEPs and stores.
7460     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
7461       const StructLayout *PrivStructLayout = DL.getStructLayout(PrivStructType);
7462       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) {
7463         Value *Ptr =
7464             constructPointer(&Base, PrivStructLayout->getElementOffset(u), IRB);
7465         new StoreInst(F.getArg(ArgNo + u), Ptr, &IP);
7466       }
7467     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
7468       Type *PointeeTy = PrivArrayType->getElementType();
7469       uint64_t PointeeTySize = DL.getTypeStoreSize(PointeeTy);
7470       for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) {
7471         Value *Ptr = constructPointer(&Base, u * PointeeTySize, IRB);
7472         new StoreInst(F.getArg(ArgNo + u), Ptr, &IP);
7473       }
7474     } else {
7475       new StoreInst(F.getArg(ArgNo), &Base, &IP);
7476     }
7477   }
7478 
7479   /// Extract values from \p Base according to the type \p PrivType at the
7480   /// call position \p ACS. The values are appended to \p ReplacementValues.
7481   void createReplacementValues(Align Alignment, Type *PrivType,
7482                                AbstractCallSite ACS, Value *Base,
7483                                SmallVectorImpl<Value *> &ReplacementValues) {
7484     assert(Base && "Expected base value!");
7485     assert(PrivType && "Expected privatizable type!");
7486     Instruction *IP = ACS.getInstruction();
7487 
7488     IRBuilder<NoFolder> IRB(IP);
7489     const DataLayout &DL = IP->getModule()->getDataLayout();
7490 
7491     // Traverse the type, build GEPs and loads.
7492     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
7493       const StructLayout *PrivStructLayout = DL.getStructLayout(PrivStructType);
7494       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) {
7495         Type *PointeeTy = PrivStructType->getElementType(u);
7496         Value *Ptr =
7497             constructPointer(Base, PrivStructLayout->getElementOffset(u), IRB);
7498         LoadInst *L = new LoadInst(PointeeTy, Ptr, "", IP);
7499         L->setAlignment(Alignment);
7500         ReplacementValues.push_back(L);
7501       }
7502     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
7503       Type *PointeeTy = PrivArrayType->getElementType();
7504       uint64_t PointeeTySize = DL.getTypeStoreSize(PointeeTy);
7505       for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) {
7506         Value *Ptr = constructPointer(Base, u * PointeeTySize, IRB);
7507         LoadInst *L = new LoadInst(PointeeTy, Ptr, "", IP);
7508         L->setAlignment(Alignment);
7509         ReplacementValues.push_back(L);
7510       }
7511     } else {
7512       LoadInst *L = new LoadInst(PrivType, Base, "", IP);
7513       L->setAlignment(Alignment);
7514       ReplacementValues.push_back(L);
7515     }
7516   }
7517 
7518   /// See AbstractAttribute::manifest(...)
7519   ChangeStatus manifest(Attributor &A) override {
7520     if (!PrivatizableType)
7521       return ChangeStatus::UNCHANGED;
7522     assert(*PrivatizableType && "Expected privatizable type!");
7523 
7524     // Collect all tail calls in the function as we cannot allow new allocas to
7525     // escape into tail recursion.
7526     // TODO: Be smarter about new allocas escaping into tail calls.
7527     SmallVector<CallInst *, 16> TailCalls;
7528     bool UsedAssumedInformation = false;
7529     if (!A.checkForAllInstructions(
7530             [&](Instruction &I) {
7531               CallInst &CI = cast<CallInst>(I);
7532               if (CI.isTailCall())
7533                 TailCalls.push_back(&CI);
7534               return true;
7535             },
7536             *this, {Instruction::Call}, UsedAssumedInformation))
7537       return ChangeStatus::UNCHANGED;
7538 
7539     Argument *Arg = getAssociatedArgument();
7540     // Query AAAlign attribute for alignment of associated argument to
7541     // determine the best alignment of loads.
7542     const auto *AlignAA =
7543         A.getAAFor<AAAlign>(*this, IRPosition::value(*Arg), DepClassTy::NONE);
7544 
7545     // Callback to repair the associated function. A new alloca is placed at the
7546     // beginning and initialized with the values passed through arguments. The
7547     // new alloca replaces the use of the old pointer argument.
7548     Attributor::ArgumentReplacementInfo::CalleeRepairCBTy FnRepairCB =
7549         [=](const Attributor::ArgumentReplacementInfo &ARI,
7550             Function &ReplacementFn, Function::arg_iterator ArgIt) {
7551           BasicBlock &EntryBB = ReplacementFn.getEntryBlock();
7552           Instruction *IP = &*EntryBB.getFirstInsertionPt();
7553           const DataLayout &DL = IP->getModule()->getDataLayout();
7554           unsigned AS = DL.getAllocaAddrSpace();
7555           Instruction *AI = new AllocaInst(*PrivatizableType, AS,
7556                                            Arg->getName() + ".priv", IP);
7557           createInitialization(*PrivatizableType, *AI, ReplacementFn,
7558                                ArgIt->getArgNo(), *IP);
7559 
7560           if (AI->getType() != Arg->getType())
7561             AI = BitCastInst::CreatePointerBitCastOrAddrSpaceCast(
7562                 AI, Arg->getType(), "", IP);
7563           Arg->replaceAllUsesWith(AI);
7564 
7565           for (CallInst *CI : TailCalls)
7566             CI->setTailCall(false);
7567         };
7568 
7569     // Callback to repair a call site of the associated function. The elements
7570     // of the privatizable type are loaded prior to the call and passed to the
7571     // new function version.
7572     Attributor::ArgumentReplacementInfo::ACSRepairCBTy ACSRepairCB =
7573         [=](const Attributor::ArgumentReplacementInfo &ARI,
7574             AbstractCallSite ACS, SmallVectorImpl<Value *> &NewArgOperands) {
7575           // When no alignment is specified for the load instruction,
7576           // natural alignment is assumed.
7577           createReplacementValues(
7578               AlignAA ? AlignAA->getAssumedAlign() : Align(0),
7579               *PrivatizableType, ACS,
7580               ACS.getCallArgOperand(ARI.getReplacedArg().getArgNo()),
7581               NewArgOperands);
7582         };
7583 
7584     // Collect the types that will replace the privatizable type in the function
7585     // signature.
7586     SmallVector<Type *, 16> ReplacementTypes;
7587     identifyReplacementTypes(*PrivatizableType, ReplacementTypes);
7588 
7589     // Register a rewrite of the argument.
7590     if (A.registerFunctionSignatureRewrite(*Arg, ReplacementTypes,
7591                                            std::move(FnRepairCB),
7592                                            std::move(ACSRepairCB)))
7593       return ChangeStatus::CHANGED;
7594     return ChangeStatus::UNCHANGED;
7595   }
7596 
7597   /// See AbstractAttribute::trackStatistics()
7598   void trackStatistics() const override {
7599     STATS_DECLTRACK_ARG_ATTR(privatizable_ptr);
7600   }
7601 };
7602 
7603 struct AAPrivatizablePtrFloating : public AAPrivatizablePtrImpl {
7604   AAPrivatizablePtrFloating(const IRPosition &IRP, Attributor &A)
7605       : AAPrivatizablePtrImpl(IRP, A) {}
7606 
7607   /// See AbstractAttribute::initialize(...).
7608   void initialize(Attributor &A) override {
7609     // TODO: We can privatize more than arguments.
7610     indicatePessimisticFixpoint();
7611   }
7612 
7613   ChangeStatus updateImpl(Attributor &A) override {
7614     llvm_unreachable("AAPrivatizablePtr(Floating|Returned|CallSiteReturned)::"
7615                      "updateImpl will not be called");
7616   }
7617 
7618   /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...)
7619   std::optional<Type *> identifyPrivatizableType(Attributor &A) override {
7620     Value *Obj = getUnderlyingObject(&getAssociatedValue());
7621     if (!Obj) {
7622       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] No underlying object found!\n");
7623       return nullptr;
7624     }
7625 
7626     if (auto *AI = dyn_cast<AllocaInst>(Obj))
7627       if (auto *CI = dyn_cast<ConstantInt>(AI->getArraySize()))
7628         if (CI->isOne())
7629           return AI->getAllocatedType();
7630     if (auto *Arg = dyn_cast<Argument>(Obj)) {
7631       auto *PrivArgAA = A.getAAFor<AAPrivatizablePtr>(
7632           *this, IRPosition::argument(*Arg), DepClassTy::REQUIRED);
7633       if (PrivArgAA && PrivArgAA->isAssumedPrivatizablePtr())
7634         return PrivArgAA->getPrivatizableType();
7635     }
7636 
7637     LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Underlying object neither valid "
7638                          "alloca nor privatizable argument: "
7639                       << *Obj << "!\n");
7640     return nullptr;
7641   }
7642 
7643   /// See AbstractAttribute::trackStatistics()
7644   void trackStatistics() const override {
7645     STATS_DECLTRACK_FLOATING_ATTR(privatizable_ptr);
7646   }
7647 };
7648 
7649 struct AAPrivatizablePtrCallSiteArgument final
7650     : public AAPrivatizablePtrFloating {
7651   AAPrivatizablePtrCallSiteArgument(const IRPosition &IRP, Attributor &A)
7652       : AAPrivatizablePtrFloating(IRP, A) {}
7653 
7654   /// See AbstractAttribute::initialize(...).
7655   void initialize(Attributor &A) override {
7656     if (A.hasAttr(getIRPosition(), Attribute::ByVal))
7657       indicateOptimisticFixpoint();
7658   }
7659 
7660   /// See AbstractAttribute::updateImpl(...).
7661   ChangeStatus updateImpl(Attributor &A) override {
7662     PrivatizableType = identifyPrivatizableType(A);
7663     if (!PrivatizableType)
7664       return ChangeStatus::UNCHANGED;
7665     if (!*PrivatizableType)
7666       return indicatePessimisticFixpoint();
7667 
7668     const IRPosition &IRP = getIRPosition();
7669     bool IsKnownNoCapture;
7670     bool IsAssumedNoCapture = AA::hasAssumedIRAttr<Attribute::NoCapture>(
7671         A, this, IRP, DepClassTy::REQUIRED, IsKnownNoCapture);
7672     if (!IsAssumedNoCapture) {
7673       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might be captured!\n");
7674       return indicatePessimisticFixpoint();
7675     }
7676 
7677     bool IsKnownNoAlias;
7678     if (!AA::hasAssumedIRAttr<Attribute::NoAlias>(
7679             A, this, IRP, DepClassTy::REQUIRED, IsKnownNoAlias)) {
7680       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might alias!\n");
7681       return indicatePessimisticFixpoint();
7682     }
7683 
7684     bool IsKnown;
7685     if (!AA::isAssumedReadOnly(A, IRP, *this, IsKnown)) {
7686       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer is written!\n");
7687       return indicatePessimisticFixpoint();
7688     }
7689 
7690     return ChangeStatus::UNCHANGED;
7691   }
7692 
7693   /// See AbstractAttribute::trackStatistics()
7694   void trackStatistics() const override {
7695     STATS_DECLTRACK_CSARG_ATTR(privatizable_ptr);
7696   }
7697 };
7698 
7699 struct AAPrivatizablePtrCallSiteReturned final
7700     : public AAPrivatizablePtrFloating {
7701   AAPrivatizablePtrCallSiteReturned(const IRPosition &IRP, Attributor &A)
7702       : AAPrivatizablePtrFloating(IRP, A) {}
7703 
7704   /// See AbstractAttribute::initialize(...).
7705   void initialize(Attributor &A) override {
7706     // TODO: We can privatize more than arguments.
7707     indicatePessimisticFixpoint();
7708   }
7709 
7710   /// See AbstractAttribute::trackStatistics()
7711   void trackStatistics() const override {
7712     STATS_DECLTRACK_CSRET_ATTR(privatizable_ptr);
7713   }
7714 };
7715 
7716 struct AAPrivatizablePtrReturned final : public AAPrivatizablePtrFloating {
7717   AAPrivatizablePtrReturned(const IRPosition &IRP, Attributor &A)
7718       : AAPrivatizablePtrFloating(IRP, A) {}
7719 
7720   /// See AbstractAttribute::initialize(...).
7721   void initialize(Attributor &A) override {
7722     // TODO: We can privatize more than arguments.
7723     indicatePessimisticFixpoint();
7724   }
7725 
7726   /// See AbstractAttribute::trackStatistics()
7727   void trackStatistics() const override {
7728     STATS_DECLTRACK_FNRET_ATTR(privatizable_ptr);
7729   }
7730 };
7731 } // namespace
7732 
7733 /// -------------------- Memory Behavior Attributes ----------------------------
7734 /// Includes read-none, read-only, and write-only.
7735 /// ----------------------------------------------------------------------------
7736 namespace {
7737 struct AAMemoryBehaviorImpl : public AAMemoryBehavior {
7738   AAMemoryBehaviorImpl(const IRPosition &IRP, Attributor &A)
7739       : AAMemoryBehavior(IRP, A) {}
7740 
7741   /// See AbstractAttribute::initialize(...).
7742   void initialize(Attributor &A) override {
7743     intersectAssumedBits(BEST_STATE);
7744     getKnownStateFromValue(A, getIRPosition(), getState());
7745     AAMemoryBehavior::initialize(A);
7746   }
7747 
7748   /// Return the memory behavior information encoded in the IR for \p IRP.
7749   static void getKnownStateFromValue(Attributor &A, const IRPosition &IRP,
7750                                      BitIntegerState &State,
7751                                      bool IgnoreSubsumingPositions = false) {
7752     SmallVector<Attribute, 2> Attrs;
7753     A.getAttrs(IRP, AttrKinds, Attrs, IgnoreSubsumingPositions);
7754     for (const Attribute &Attr : Attrs) {
7755       switch (Attr.getKindAsEnum()) {
7756       case Attribute::ReadNone:
7757         State.addKnownBits(NO_ACCESSES);
7758         break;
7759       case Attribute::ReadOnly:
7760         State.addKnownBits(NO_WRITES);
7761         break;
7762       case Attribute::WriteOnly:
7763         State.addKnownBits(NO_READS);
7764         break;
7765       default:
7766         llvm_unreachable("Unexpected attribute!");
7767       }
7768     }
7769 
7770     if (auto *I = dyn_cast<Instruction>(&IRP.getAnchorValue())) {
7771       if (!I->mayReadFromMemory())
7772         State.addKnownBits(NO_READS);
7773       if (!I->mayWriteToMemory())
7774         State.addKnownBits(NO_WRITES);
7775     }
7776   }
7777 
7778   /// See AbstractAttribute::getDeducedAttributes(...).
7779   void getDeducedAttributes(Attributor &A, LLVMContext &Ctx,
7780                             SmallVectorImpl<Attribute> &Attrs) const override {
7781     assert(Attrs.size() == 0);
7782     if (isAssumedReadNone())
7783       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone));
7784     else if (isAssumedReadOnly())
7785       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadOnly));
7786     else if (isAssumedWriteOnly())
7787       Attrs.push_back(Attribute::get(Ctx, Attribute::WriteOnly));
7788     assert(Attrs.size() <= 1);
7789   }
7790 
7791   /// See AbstractAttribute::manifest(...).
7792   ChangeStatus manifest(Attributor &A) override {
7793     const IRPosition &IRP = getIRPosition();
7794 
7795     if (A.hasAttr(IRP, Attribute::ReadNone,
7796                   /* IgnoreSubsumingPositions */ true))
7797       return ChangeStatus::UNCHANGED;
7798 
7799     // Check if we would improve the existing attributes first.
7800     SmallVector<Attribute, 4> DeducedAttrs;
7801     getDeducedAttributes(A, IRP.getAnchorValue().getContext(), DeducedAttrs);
7802     if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) {
7803           return A.hasAttr(IRP, Attr.getKindAsEnum(),
7804                            /* IgnoreSubsumingPositions */ true);
7805         }))
7806       return ChangeStatus::UNCHANGED;
7807 
7808     // Clear existing attributes.
7809     A.removeAttrs(IRP, AttrKinds);
7810     // Clear conflicting writable attribute.
7811     if (isAssumedReadOnly())
7812       A.removeAttrs(IRP, Attribute::Writable);
7813 
7814     // Use the generic manifest method.
7815     return IRAttribute::manifest(A);
7816   }
7817 
7818   /// See AbstractState::getAsStr().
7819   const std::string getAsStr(Attributor *A) const override {
7820     if (isAssumedReadNone())
7821       return "readnone";
7822     if (isAssumedReadOnly())
7823       return "readonly";
7824     if (isAssumedWriteOnly())
7825       return "writeonly";
7826     return "may-read/write";
7827   }
7828 
7829   /// The set of IR attributes AAMemoryBehavior deals with.
7830   static const Attribute::AttrKind AttrKinds[3];
7831 };
7832 
7833 const Attribute::AttrKind AAMemoryBehaviorImpl::AttrKinds[] = {
7834     Attribute::ReadNone, Attribute::ReadOnly, Attribute::WriteOnly};
7835 
7836 /// Memory behavior attribute for a floating value.
7837 struct AAMemoryBehaviorFloating : AAMemoryBehaviorImpl {
7838   AAMemoryBehaviorFloating(const IRPosition &IRP, Attributor &A)
7839       : AAMemoryBehaviorImpl(IRP, A) {}
7840 
7841   /// See AbstractAttribute::updateImpl(...).
7842   ChangeStatus updateImpl(Attributor &A) override;
7843 
7844   /// See AbstractAttribute::trackStatistics()
7845   void trackStatistics() const override {
7846     if (isAssumedReadNone())
7847       STATS_DECLTRACK_FLOATING_ATTR(readnone)
7848     else if (isAssumedReadOnly())
7849       STATS_DECLTRACK_FLOATING_ATTR(readonly)
7850     else if (isAssumedWriteOnly())
7851       STATS_DECLTRACK_FLOATING_ATTR(writeonly)
7852   }
7853 
7854 private:
7855   /// Return true if users of \p UserI might access the underlying
7856   /// variable/location described by \p U and should therefore be analyzed.
7857   bool followUsersOfUseIn(Attributor &A, const Use &U,
7858                           const Instruction *UserI);
7859 
7860   /// Update the state according to the effect of use \p U in \p UserI.
7861   void analyzeUseIn(Attributor &A, const Use &U, const Instruction *UserI);
7862 };
7863 
7864 /// Memory behavior attribute for function argument.
7865 struct AAMemoryBehaviorArgument : AAMemoryBehaviorFloating {
7866   AAMemoryBehaviorArgument(const IRPosition &IRP, Attributor &A)
7867       : AAMemoryBehaviorFloating(IRP, A) {}
7868 
7869   /// See AbstractAttribute::initialize(...).
7870   void initialize(Attributor &A) override {
7871     intersectAssumedBits(BEST_STATE);
7872     const IRPosition &IRP = getIRPosition();
7873     // TODO: Make IgnoreSubsumingPositions a property of an IRAttribute so we
7874     // can query it when we use has/getAttr. That would allow us to reuse the
7875     // initialize of the base class here.
7876     bool HasByVal = A.hasAttr(IRP, {Attribute::ByVal},
7877                               /* IgnoreSubsumingPositions */ true);
7878     getKnownStateFromValue(A, IRP, getState(),
7879                            /* IgnoreSubsumingPositions */ HasByVal);
7880   }
7881 
7882   ChangeStatus manifest(Attributor &A) override {
7883     // TODO: Pointer arguments are not supported on vectors of pointers yet.
7884     if (!getAssociatedValue().getType()->isPointerTy())
7885       return ChangeStatus::UNCHANGED;
7886 
7887     // TODO: From readattrs.ll: "inalloca parameters are always
7888     //                           considered written"
7889     if (A.hasAttr(getIRPosition(),
7890                   {Attribute::InAlloca, Attribute::Preallocated})) {
7891       removeKnownBits(NO_WRITES);
7892       removeAssumedBits(NO_WRITES);
7893     }
7894     A.removeAttrs(getIRPosition(), AttrKinds);
7895     return AAMemoryBehaviorFloating::manifest(A);
7896   }
7897 
7898   /// See AbstractAttribute::trackStatistics()
7899   void trackStatistics() const override {
7900     if (isAssumedReadNone())
7901       STATS_DECLTRACK_ARG_ATTR(readnone)
7902     else if (isAssumedReadOnly())
7903       STATS_DECLTRACK_ARG_ATTR(readonly)
7904     else if (isAssumedWriteOnly())
7905       STATS_DECLTRACK_ARG_ATTR(writeonly)
7906   }
7907 };
7908 
7909 struct AAMemoryBehaviorCallSiteArgument final : AAMemoryBehaviorArgument {
7910   AAMemoryBehaviorCallSiteArgument(const IRPosition &IRP, Attributor &A)
7911       : AAMemoryBehaviorArgument(IRP, A) {}
7912 
7913   /// See AbstractAttribute::initialize(...).
7914   void initialize(Attributor &A) override {
7915     // If we don't have an associated attribute this is either a variadic call
7916     // or an indirect call, either way, nothing to do here.
7917     Argument *Arg = getAssociatedArgument();
7918     if (!Arg) {
7919       indicatePessimisticFixpoint();
7920       return;
7921     }
7922     if (Arg->hasByValAttr()) {
7923       addKnownBits(NO_WRITES);
7924       removeKnownBits(NO_READS);
7925       removeAssumedBits(NO_READS);
7926     }
7927     AAMemoryBehaviorArgument::initialize(A);
7928     if (getAssociatedFunction()->isDeclaration())
7929       indicatePessimisticFixpoint();
7930   }
7931 
7932   /// See AbstractAttribute::updateImpl(...).
7933   ChangeStatus updateImpl(Attributor &A) override {
7934     // TODO: Once we have call site specific value information we can provide
7935     //       call site specific liveness liveness information and then it makes
7936     //       sense to specialize attributes for call sites arguments instead of
7937     //       redirecting requests to the callee argument.
7938     Argument *Arg = getAssociatedArgument();
7939     const IRPosition &ArgPos = IRPosition::argument(*Arg);
7940     auto *ArgAA =
7941         A.getAAFor<AAMemoryBehavior>(*this, ArgPos, DepClassTy::REQUIRED);
7942     if (!ArgAA)
7943       return indicatePessimisticFixpoint();
7944     return clampStateAndIndicateChange(getState(), ArgAA->getState());
7945   }
7946 
7947   /// See AbstractAttribute::trackStatistics()
7948   void trackStatistics() const override {
7949     if (isAssumedReadNone())
7950       STATS_DECLTRACK_CSARG_ATTR(readnone)
7951     else if (isAssumedReadOnly())
7952       STATS_DECLTRACK_CSARG_ATTR(readonly)
7953     else if (isAssumedWriteOnly())
7954       STATS_DECLTRACK_CSARG_ATTR(writeonly)
7955   }
7956 };
7957 
7958 /// Memory behavior attribute for a call site return position.
7959 struct AAMemoryBehaviorCallSiteReturned final : AAMemoryBehaviorFloating {
7960   AAMemoryBehaviorCallSiteReturned(const IRPosition &IRP, Attributor &A)
7961       : AAMemoryBehaviorFloating(IRP, A) {}
7962 
7963   /// See AbstractAttribute::initialize(...).
7964   void initialize(Attributor &A) override {
7965     AAMemoryBehaviorImpl::initialize(A);
7966   }
7967   /// See AbstractAttribute::manifest(...).
7968   ChangeStatus manifest(Attributor &A) override {
7969     // We do not annotate returned values.
7970     return ChangeStatus::UNCHANGED;
7971   }
7972 
7973   /// See AbstractAttribute::trackStatistics()
7974   void trackStatistics() const override {}
7975 };
7976 
7977 /// An AA to represent the memory behavior function attributes.
7978 struct AAMemoryBehaviorFunction final : public AAMemoryBehaviorImpl {
7979   AAMemoryBehaviorFunction(const IRPosition &IRP, Attributor &A)
7980       : AAMemoryBehaviorImpl(IRP, A) {}
7981 
7982   /// See AbstractAttribute::updateImpl(Attributor &A).
7983   ChangeStatus updateImpl(Attributor &A) override;
7984 
7985   /// See AbstractAttribute::manifest(...).
7986   ChangeStatus manifest(Attributor &A) override {
7987     // TODO: It would be better to merge this with AAMemoryLocation, so that
7988     // we could determine read/write per location. This would also have the
7989     // benefit of only one place trying to manifest the memory attribute.
7990     Function &F = cast<Function>(getAnchorValue());
7991     MemoryEffects ME = MemoryEffects::unknown();
7992     if (isAssumedReadNone())
7993       ME = MemoryEffects::none();
7994     else if (isAssumedReadOnly())
7995       ME = MemoryEffects::readOnly();
7996     else if (isAssumedWriteOnly())
7997       ME = MemoryEffects::writeOnly();
7998 
7999     A.removeAttrs(getIRPosition(), AttrKinds);
8000     // Clear conflicting writable attribute.
8001     if (ME.onlyReadsMemory())
8002       for (Argument &Arg : F.args())
8003         A.removeAttrs(IRPosition::argument(Arg), Attribute::Writable);
8004     return A.manifestAttrs(getIRPosition(),
8005                            Attribute::getWithMemoryEffects(F.getContext(), ME));
8006   }
8007 
8008   /// See AbstractAttribute::trackStatistics()
8009   void trackStatistics() const override {
8010     if (isAssumedReadNone())
8011       STATS_DECLTRACK_FN_ATTR(readnone)
8012     else if (isAssumedReadOnly())
8013       STATS_DECLTRACK_FN_ATTR(readonly)
8014     else if (isAssumedWriteOnly())
8015       STATS_DECLTRACK_FN_ATTR(writeonly)
8016   }
8017 };
8018 
8019 /// AAMemoryBehavior attribute for call sites.
8020 struct AAMemoryBehaviorCallSite final
8021     : AACalleeToCallSite<AAMemoryBehavior, AAMemoryBehaviorImpl> {
8022   AAMemoryBehaviorCallSite(const IRPosition &IRP, Attributor &A)
8023       : AACalleeToCallSite<AAMemoryBehavior, AAMemoryBehaviorImpl>(IRP, A) {}
8024 
8025   /// See AbstractAttribute::manifest(...).
8026   ChangeStatus manifest(Attributor &A) override {
8027     // TODO: Deduplicate this with AAMemoryBehaviorFunction.
8028     CallBase &CB = cast<CallBase>(getAnchorValue());
8029     MemoryEffects ME = MemoryEffects::unknown();
8030     if (isAssumedReadNone())
8031       ME = MemoryEffects::none();
8032     else if (isAssumedReadOnly())
8033       ME = MemoryEffects::readOnly();
8034     else if (isAssumedWriteOnly())
8035       ME = MemoryEffects::writeOnly();
8036 
8037     A.removeAttrs(getIRPosition(), AttrKinds);
8038     // Clear conflicting writable attribute.
8039     if (ME.onlyReadsMemory())
8040       for (Use &U : CB.args())
8041         A.removeAttrs(IRPosition::callsite_argument(CB, U.getOperandNo()),
8042                       Attribute::Writable);
8043     return A.manifestAttrs(
8044         getIRPosition(), Attribute::getWithMemoryEffects(CB.getContext(), ME));
8045   }
8046 
8047   /// See AbstractAttribute::trackStatistics()
8048   void trackStatistics() const override {
8049     if (isAssumedReadNone())
8050       STATS_DECLTRACK_CS_ATTR(readnone)
8051     else if (isAssumedReadOnly())
8052       STATS_DECLTRACK_CS_ATTR(readonly)
8053     else if (isAssumedWriteOnly())
8054       STATS_DECLTRACK_CS_ATTR(writeonly)
8055   }
8056 };
8057 
8058 ChangeStatus AAMemoryBehaviorFunction::updateImpl(Attributor &A) {
8059 
8060   // The current assumed state used to determine a change.
8061   auto AssumedState = getAssumed();
8062 
8063   auto CheckRWInst = [&](Instruction &I) {
8064     // If the instruction has an own memory behavior state, use it to restrict
8065     // the local state. No further analysis is required as the other memory
8066     // state is as optimistic as it gets.
8067     if (const auto *CB = dyn_cast<CallBase>(&I)) {
8068       const auto *MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
8069           *this, IRPosition::callsite_function(*CB), DepClassTy::REQUIRED);
8070       if (MemBehaviorAA) {
8071         intersectAssumedBits(MemBehaviorAA->getAssumed());
8072         return !isAtFixpoint();
8073       }
8074     }
8075 
8076     // Remove access kind modifiers if necessary.
8077     if (I.mayReadFromMemory())
8078       removeAssumedBits(NO_READS);
8079     if (I.mayWriteToMemory())
8080       removeAssumedBits(NO_WRITES);
8081     return !isAtFixpoint();
8082   };
8083 
8084   bool UsedAssumedInformation = false;
8085   if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this,
8086                                           UsedAssumedInformation))
8087     return indicatePessimisticFixpoint();
8088 
8089   return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
8090                                         : ChangeStatus::UNCHANGED;
8091 }
8092 
8093 ChangeStatus AAMemoryBehaviorFloating::updateImpl(Attributor &A) {
8094 
8095   const IRPosition &IRP = getIRPosition();
8096   const IRPosition &FnPos = IRPosition::function_scope(IRP);
8097   AAMemoryBehavior::StateType &S = getState();
8098 
8099   // First, check the function scope. We take the known information and we avoid
8100   // work if the assumed information implies the current assumed information for
8101   // this attribute. This is a valid for all but byval arguments.
8102   Argument *Arg = IRP.getAssociatedArgument();
8103   AAMemoryBehavior::base_t FnMemAssumedState =
8104       AAMemoryBehavior::StateType::getWorstState();
8105   if (!Arg || !Arg->hasByValAttr()) {
8106     const auto *FnMemAA =
8107         A.getAAFor<AAMemoryBehavior>(*this, FnPos, DepClassTy::OPTIONAL);
8108     if (FnMemAA) {
8109       FnMemAssumedState = FnMemAA->getAssumed();
8110       S.addKnownBits(FnMemAA->getKnown());
8111       if ((S.getAssumed() & FnMemAA->getAssumed()) == S.getAssumed())
8112         return ChangeStatus::UNCHANGED;
8113     }
8114   }
8115 
8116   // The current assumed state used to determine a change.
8117   auto AssumedState = S.getAssumed();
8118 
8119   // Make sure the value is not captured (except through "return"), if
8120   // it is, any information derived would be irrelevant anyway as we cannot
8121   // check the potential aliases introduced by the capture. However, no need
8122   // to fall back to anythign less optimistic than the function state.
8123   bool IsKnownNoCapture;
8124   const AANoCapture *ArgNoCaptureAA = nullptr;
8125   bool IsAssumedNoCapture = AA::hasAssumedIRAttr<Attribute::NoCapture>(
8126       A, this, IRP, DepClassTy::OPTIONAL, IsKnownNoCapture, false,
8127       &ArgNoCaptureAA);
8128 
8129   if (!IsAssumedNoCapture &&
8130       (!ArgNoCaptureAA || !ArgNoCaptureAA->isAssumedNoCaptureMaybeReturned())) {
8131     S.intersectAssumedBits(FnMemAssumedState);
8132     return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
8133                                           : ChangeStatus::UNCHANGED;
8134   }
8135 
8136   // Visit and expand uses until all are analyzed or a fixpoint is reached.
8137   auto UsePred = [&](const Use &U, bool &Follow) -> bool {
8138     Instruction *UserI = cast<Instruction>(U.getUser());
8139     LLVM_DEBUG(dbgs() << "[AAMemoryBehavior] Use: " << *U << " in " << *UserI
8140                       << " \n");
8141 
8142     // Droppable users, e.g., llvm::assume does not actually perform any action.
8143     if (UserI->isDroppable())
8144       return true;
8145 
8146     // Check if the users of UserI should also be visited.
8147     Follow = followUsersOfUseIn(A, U, UserI);
8148 
8149     // If UserI might touch memory we analyze the use in detail.
8150     if (UserI->mayReadOrWriteMemory())
8151       analyzeUseIn(A, U, UserI);
8152 
8153     return !isAtFixpoint();
8154   };
8155 
8156   if (!A.checkForAllUses(UsePred, *this, getAssociatedValue()))
8157     return indicatePessimisticFixpoint();
8158 
8159   return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
8160                                         : ChangeStatus::UNCHANGED;
8161 }
8162 
8163 bool AAMemoryBehaviorFloating::followUsersOfUseIn(Attributor &A, const Use &U,
8164                                                   const Instruction *UserI) {
8165   // The loaded value is unrelated to the pointer argument, no need to
8166   // follow the users of the load.
8167   if (isa<LoadInst>(UserI) || isa<ReturnInst>(UserI))
8168     return false;
8169 
8170   // By default we follow all uses assuming UserI might leak information on U,
8171   // we have special handling for call sites operands though.
8172   const auto *CB = dyn_cast<CallBase>(UserI);
8173   if (!CB || !CB->isArgOperand(&U))
8174     return true;
8175 
8176   // If the use is a call argument known not to be captured, the users of
8177   // the call do not need to be visited because they have to be unrelated to
8178   // the input. Note that this check is not trivial even though we disallow
8179   // general capturing of the underlying argument. The reason is that the
8180   // call might the argument "through return", which we allow and for which we
8181   // need to check call users.
8182   if (U.get()->getType()->isPointerTy()) {
8183     unsigned ArgNo = CB->getArgOperandNo(&U);
8184     bool IsKnownNoCapture;
8185     return !AA::hasAssumedIRAttr<Attribute::NoCapture>(
8186         A, this, IRPosition::callsite_argument(*CB, ArgNo),
8187         DepClassTy::OPTIONAL, IsKnownNoCapture);
8188   }
8189 
8190   return true;
8191 }
8192 
8193 void AAMemoryBehaviorFloating::analyzeUseIn(Attributor &A, const Use &U,
8194                                             const Instruction *UserI) {
8195   assert(UserI->mayReadOrWriteMemory());
8196 
8197   switch (UserI->getOpcode()) {
8198   default:
8199     // TODO: Handle all atomics and other side-effect operations we know of.
8200     break;
8201   case Instruction::Load:
8202     // Loads cause the NO_READS property to disappear.
8203     removeAssumedBits(NO_READS);
8204     return;
8205 
8206   case Instruction::Store:
8207     // Stores cause the NO_WRITES property to disappear if the use is the
8208     // pointer operand. Note that while capturing was taken care of somewhere
8209     // else we need to deal with stores of the value that is not looked through.
8210     if (cast<StoreInst>(UserI)->getPointerOperand() == U.get())
8211       removeAssumedBits(NO_WRITES);
8212     else
8213       indicatePessimisticFixpoint();
8214     return;
8215 
8216   case Instruction::Call:
8217   case Instruction::CallBr:
8218   case Instruction::Invoke: {
8219     // For call sites we look at the argument memory behavior attribute (this
8220     // could be recursive!) in order to restrict our own state.
8221     const auto *CB = cast<CallBase>(UserI);
8222 
8223     // Give up on operand bundles.
8224     if (CB->isBundleOperand(&U)) {
8225       indicatePessimisticFixpoint();
8226       return;
8227     }
8228 
8229     // Calling a function does read the function pointer, maybe write it if the
8230     // function is self-modifying.
8231     if (CB->isCallee(&U)) {
8232       removeAssumedBits(NO_READS);
8233       break;
8234     }
8235 
8236     // Adjust the possible access behavior based on the information on the
8237     // argument.
8238     IRPosition Pos;
8239     if (U.get()->getType()->isPointerTy())
8240       Pos = IRPosition::callsite_argument(*CB, CB->getArgOperandNo(&U));
8241     else
8242       Pos = IRPosition::callsite_function(*CB);
8243     const auto *MemBehaviorAA =
8244         A.getAAFor<AAMemoryBehavior>(*this, Pos, DepClassTy::OPTIONAL);
8245     if (!MemBehaviorAA)
8246       break;
8247     // "assumed" has at most the same bits as the MemBehaviorAA assumed
8248     // and at least "known".
8249     intersectAssumedBits(MemBehaviorAA->getAssumed());
8250     return;
8251   }
8252   };
8253 
8254   // Generally, look at the "may-properties" and adjust the assumed state if we
8255   // did not trigger special handling before.
8256   if (UserI->mayReadFromMemory())
8257     removeAssumedBits(NO_READS);
8258   if (UserI->mayWriteToMemory())
8259     removeAssumedBits(NO_WRITES);
8260 }
8261 } // namespace
8262 
8263 /// -------------------- Memory Locations Attributes ---------------------------
8264 /// Includes read-none, argmemonly, inaccessiblememonly,
8265 /// inaccessiblememorargmemonly
8266 /// ----------------------------------------------------------------------------
8267 
8268 std::string AAMemoryLocation::getMemoryLocationsAsStr(
8269     AAMemoryLocation::MemoryLocationsKind MLK) {
8270   if (0 == (MLK & AAMemoryLocation::NO_LOCATIONS))
8271     return "all memory";
8272   if (MLK == AAMemoryLocation::NO_LOCATIONS)
8273     return "no memory";
8274   std::string S = "memory:";
8275   if (0 == (MLK & AAMemoryLocation::NO_LOCAL_MEM))
8276     S += "stack,";
8277   if (0 == (MLK & AAMemoryLocation::NO_CONST_MEM))
8278     S += "constant,";
8279   if (0 == (MLK & AAMemoryLocation::NO_GLOBAL_INTERNAL_MEM))
8280     S += "internal global,";
8281   if (0 == (MLK & AAMemoryLocation::NO_GLOBAL_EXTERNAL_MEM))
8282     S += "external global,";
8283   if (0 == (MLK & AAMemoryLocation::NO_ARGUMENT_MEM))
8284     S += "argument,";
8285   if (0 == (MLK & AAMemoryLocation::NO_INACCESSIBLE_MEM))
8286     S += "inaccessible,";
8287   if (0 == (MLK & AAMemoryLocation::NO_MALLOCED_MEM))
8288     S += "malloced,";
8289   if (0 == (MLK & AAMemoryLocation::NO_UNKOWN_MEM))
8290     S += "unknown,";
8291   S.pop_back();
8292   return S;
8293 }
8294 
8295 namespace {
8296 struct AAMemoryLocationImpl : public AAMemoryLocation {
8297 
8298   AAMemoryLocationImpl(const IRPosition &IRP, Attributor &A)
8299       : AAMemoryLocation(IRP, A), Allocator(A.Allocator) {
8300     AccessKind2Accesses.fill(nullptr);
8301   }
8302 
8303   ~AAMemoryLocationImpl() {
8304     // The AccessSets are allocated via a BumpPtrAllocator, we call
8305     // the destructor manually.
8306     for (AccessSet *AS : AccessKind2Accesses)
8307       if (AS)
8308         AS->~AccessSet();
8309   }
8310 
8311   /// See AbstractAttribute::initialize(...).
8312   void initialize(Attributor &A) override {
8313     intersectAssumedBits(BEST_STATE);
8314     getKnownStateFromValue(A, getIRPosition(), getState());
8315     AAMemoryLocation::initialize(A);
8316   }
8317 
8318   /// Return the memory behavior information encoded in the IR for \p IRP.
8319   static void getKnownStateFromValue(Attributor &A, const IRPosition &IRP,
8320                                      BitIntegerState &State,
8321                                      bool IgnoreSubsumingPositions = false) {
8322     // For internal functions we ignore `argmemonly` and
8323     // `inaccessiblememorargmemonly` as we might break it via interprocedural
8324     // constant propagation. It is unclear if this is the best way but it is
8325     // unlikely this will cause real performance problems. If we are deriving
8326     // attributes for the anchor function we even remove the attribute in
8327     // addition to ignoring it.
8328     // TODO: A better way to handle this would be to add ~NO_GLOBAL_MEM /
8329     // MemoryEffects::Other as a possible location.
8330     bool UseArgMemOnly = true;
8331     Function *AnchorFn = IRP.getAnchorScope();
8332     if (AnchorFn && A.isRunOn(*AnchorFn))
8333       UseArgMemOnly = !AnchorFn->hasLocalLinkage();
8334 
8335     SmallVector<Attribute, 2> Attrs;
8336     A.getAttrs(IRP, {Attribute::Memory}, Attrs, IgnoreSubsumingPositions);
8337     for (const Attribute &Attr : Attrs) {
8338       // TODO: We can map MemoryEffects to Attributor locations more precisely.
8339       MemoryEffects ME = Attr.getMemoryEffects();
8340       if (ME.doesNotAccessMemory()) {
8341         State.addKnownBits(NO_LOCAL_MEM | NO_CONST_MEM);
8342         continue;
8343       }
8344       if (ME.onlyAccessesInaccessibleMem()) {
8345         State.addKnownBits(inverseLocation(NO_INACCESSIBLE_MEM, true, true));
8346         continue;
8347       }
8348       if (ME.onlyAccessesArgPointees()) {
8349         if (UseArgMemOnly)
8350           State.addKnownBits(inverseLocation(NO_ARGUMENT_MEM, true, true));
8351         else {
8352           // Remove location information, only keep read/write info.
8353           ME = MemoryEffects(ME.getModRef());
8354           A.manifestAttrs(IRP,
8355                           Attribute::getWithMemoryEffects(
8356                               IRP.getAnchorValue().getContext(), ME),
8357                           /*ForceReplace*/ true);
8358         }
8359         continue;
8360       }
8361       if (ME.onlyAccessesInaccessibleOrArgMem()) {
8362         if (UseArgMemOnly)
8363           State.addKnownBits(inverseLocation(
8364               NO_INACCESSIBLE_MEM | NO_ARGUMENT_MEM, true, true));
8365         else {
8366           // Remove location information, only keep read/write info.
8367           ME = MemoryEffects(ME.getModRef());
8368           A.manifestAttrs(IRP,
8369                           Attribute::getWithMemoryEffects(
8370                               IRP.getAnchorValue().getContext(), ME),
8371                           /*ForceReplace*/ true);
8372         }
8373         continue;
8374       }
8375     }
8376   }
8377 
8378   /// See AbstractAttribute::getDeducedAttributes(...).
8379   void getDeducedAttributes(Attributor &A, LLVMContext &Ctx,
8380                             SmallVectorImpl<Attribute> &Attrs) const override {
8381     // TODO: We can map Attributor locations to MemoryEffects more precisely.
8382     assert(Attrs.size() == 0);
8383     if (getIRPosition().getPositionKind() == IRPosition::IRP_FUNCTION) {
8384       if (isAssumedReadNone())
8385         Attrs.push_back(
8386             Attribute::getWithMemoryEffects(Ctx, MemoryEffects::none()));
8387       else if (isAssumedInaccessibleMemOnly())
8388         Attrs.push_back(Attribute::getWithMemoryEffects(
8389             Ctx, MemoryEffects::inaccessibleMemOnly()));
8390       else if (isAssumedArgMemOnly())
8391         Attrs.push_back(
8392             Attribute::getWithMemoryEffects(Ctx, MemoryEffects::argMemOnly()));
8393       else if (isAssumedInaccessibleOrArgMemOnly())
8394         Attrs.push_back(Attribute::getWithMemoryEffects(
8395             Ctx, MemoryEffects::inaccessibleOrArgMemOnly()));
8396     }
8397     assert(Attrs.size() <= 1);
8398   }
8399 
8400   /// See AbstractAttribute::manifest(...).
8401   ChangeStatus manifest(Attributor &A) override {
8402     // TODO: If AAMemoryLocation and AAMemoryBehavior are merged, we could
8403     // provide per-location modref information here.
8404     const IRPosition &IRP = getIRPosition();
8405 
8406     SmallVector<Attribute, 1> DeducedAttrs;
8407     getDeducedAttributes(A, IRP.getAnchorValue().getContext(), DeducedAttrs);
8408     if (DeducedAttrs.size() != 1)
8409       return ChangeStatus::UNCHANGED;
8410     MemoryEffects ME = DeducedAttrs[0].getMemoryEffects();
8411 
8412     return A.manifestAttrs(IRP, Attribute::getWithMemoryEffects(
8413                                     IRP.getAnchorValue().getContext(), ME));
8414   }
8415 
8416   /// See AAMemoryLocation::checkForAllAccessesToMemoryKind(...).
8417   bool checkForAllAccessesToMemoryKind(
8418       function_ref<bool(const Instruction *, const Value *, AccessKind,
8419                         MemoryLocationsKind)>
8420           Pred,
8421       MemoryLocationsKind RequestedMLK) const override {
8422     if (!isValidState())
8423       return false;
8424 
8425     MemoryLocationsKind AssumedMLK = getAssumedNotAccessedLocation();
8426     if (AssumedMLK == NO_LOCATIONS)
8427       return true;
8428 
8429     unsigned Idx = 0;
8430     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS;
8431          CurMLK *= 2, ++Idx) {
8432       if (CurMLK & RequestedMLK)
8433         continue;
8434 
8435       if (const AccessSet *Accesses = AccessKind2Accesses[Idx])
8436         for (const AccessInfo &AI : *Accesses)
8437           if (!Pred(AI.I, AI.Ptr, AI.Kind, CurMLK))
8438             return false;
8439     }
8440 
8441     return true;
8442   }
8443 
8444   ChangeStatus indicatePessimisticFixpoint() override {
8445     // If we give up and indicate a pessimistic fixpoint this instruction will
8446     // become an access for all potential access kinds:
8447     // TODO: Add pointers for argmemonly and globals to improve the results of
8448     //       checkForAllAccessesToMemoryKind.
8449     bool Changed = false;
8450     MemoryLocationsKind KnownMLK = getKnown();
8451     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
8452     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2)
8453       if (!(CurMLK & KnownMLK))
8454         updateStateAndAccessesMap(getState(), CurMLK, I, nullptr, Changed,
8455                                   getAccessKindFromInst(I));
8456     return AAMemoryLocation::indicatePessimisticFixpoint();
8457   }
8458 
8459 protected:
8460   /// Helper struct to tie together an instruction that has a read or write
8461   /// effect with the pointer it accesses (if any).
8462   struct AccessInfo {
8463 
8464     /// The instruction that caused the access.
8465     const Instruction *I;
8466 
8467     /// The base pointer that is accessed, or null if unknown.
8468     const Value *Ptr;
8469 
8470     /// The kind of access (read/write/read+write).
8471     AccessKind Kind;
8472 
8473     bool operator==(const AccessInfo &RHS) const {
8474       return I == RHS.I && Ptr == RHS.Ptr && Kind == RHS.Kind;
8475     }
8476     bool operator()(const AccessInfo &LHS, const AccessInfo &RHS) const {
8477       if (LHS.I != RHS.I)
8478         return LHS.I < RHS.I;
8479       if (LHS.Ptr != RHS.Ptr)
8480         return LHS.Ptr < RHS.Ptr;
8481       if (LHS.Kind != RHS.Kind)
8482         return LHS.Kind < RHS.Kind;
8483       return false;
8484     }
8485   };
8486 
8487   /// Mapping from *single* memory location kinds, e.g., LOCAL_MEM with the
8488   /// value of NO_LOCAL_MEM, to the accesses encountered for this memory kind.
8489   using AccessSet = SmallSet<AccessInfo, 2, AccessInfo>;
8490   std::array<AccessSet *, llvm::CTLog2<VALID_STATE>()> AccessKind2Accesses;
8491 
8492   /// Categorize the pointer arguments of CB that might access memory in
8493   /// AccessedLoc and update the state and access map accordingly.
8494   void
8495   categorizeArgumentPointerLocations(Attributor &A, CallBase &CB,
8496                                      AAMemoryLocation::StateType &AccessedLocs,
8497                                      bool &Changed);
8498 
8499   /// Return the kind(s) of location that may be accessed by \p V.
8500   AAMemoryLocation::MemoryLocationsKind
8501   categorizeAccessedLocations(Attributor &A, Instruction &I, bool &Changed);
8502 
8503   /// Return the access kind as determined by \p I.
8504   AccessKind getAccessKindFromInst(const Instruction *I) {
8505     AccessKind AK = READ_WRITE;
8506     if (I) {
8507       AK = I->mayReadFromMemory() ? READ : NONE;
8508       AK = AccessKind(AK | (I->mayWriteToMemory() ? WRITE : NONE));
8509     }
8510     return AK;
8511   }
8512 
8513   /// Update the state \p State and the AccessKind2Accesses given that \p I is
8514   /// an access of kind \p AK to a \p MLK memory location with the access
8515   /// pointer \p Ptr.
8516   void updateStateAndAccessesMap(AAMemoryLocation::StateType &State,
8517                                  MemoryLocationsKind MLK, const Instruction *I,
8518                                  const Value *Ptr, bool &Changed,
8519                                  AccessKind AK = READ_WRITE) {
8520 
8521     assert(isPowerOf2_32(MLK) && "Expected a single location set!");
8522     auto *&Accesses = AccessKind2Accesses[llvm::Log2_32(MLK)];
8523     if (!Accesses)
8524       Accesses = new (Allocator) AccessSet();
8525     Changed |= Accesses->insert(AccessInfo{I, Ptr, AK}).second;
8526     if (MLK == NO_UNKOWN_MEM)
8527       MLK = NO_LOCATIONS;
8528     State.removeAssumedBits(MLK);
8529   }
8530 
8531   /// Determine the underlying locations kinds for \p Ptr, e.g., globals or
8532   /// arguments, and update the state and access map accordingly.
8533   void categorizePtrValue(Attributor &A, const Instruction &I, const Value &Ptr,
8534                           AAMemoryLocation::StateType &State, bool &Changed,
8535                           unsigned AccessAS = 0);
8536 
8537   /// Used to allocate access sets.
8538   BumpPtrAllocator &Allocator;
8539 };
8540 
8541 void AAMemoryLocationImpl::categorizePtrValue(
8542     Attributor &A, const Instruction &I, const Value &Ptr,
8543     AAMemoryLocation::StateType &State, bool &Changed, unsigned AccessAS) {
8544   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize pointer locations for "
8545                     << Ptr << " ["
8546                     << getMemoryLocationsAsStr(State.getAssumed()) << "]\n");
8547 
8548   auto Pred = [&](Value &Obj) {
8549     unsigned ObjectAS = Obj.getType()->getPointerAddressSpace();
8550     // TODO: recognize the TBAA used for constant accesses.
8551     MemoryLocationsKind MLK = NO_LOCATIONS;
8552 
8553     // Filter accesses to constant (GPU) memory if we have an AS at the access
8554     // site or the object is known to actually have the associated AS.
8555     if ((AccessAS == (unsigned)AA::GPUAddressSpace::Constant ||
8556          (ObjectAS == (unsigned)AA::GPUAddressSpace::Constant &&
8557           isIdentifiedObject(&Obj))) &&
8558         AA::isGPU(*I.getModule()))
8559       return true;
8560 
8561     if (isa<UndefValue>(&Obj))
8562       return true;
8563     if (isa<Argument>(&Obj)) {
8564       // TODO: For now we do not treat byval arguments as local copies performed
8565       // on the call edge, though, we should. To make that happen we need to
8566       // teach various passes, e.g., DSE, about the copy effect of a byval. That
8567       // would also allow us to mark functions only accessing byval arguments as
8568       // readnone again, arguably their accesses have no effect outside of the
8569       // function, like accesses to allocas.
8570       MLK = NO_ARGUMENT_MEM;
8571     } else if (auto *GV = dyn_cast<GlobalValue>(&Obj)) {
8572       // Reading constant memory is not treated as a read "effect" by the
8573       // function attr pass so we won't neither. Constants defined by TBAA are
8574       // similar. (We know we do not write it because it is constant.)
8575       if (auto *GVar = dyn_cast<GlobalVariable>(GV))
8576         if (GVar->isConstant())
8577           return true;
8578 
8579       if (GV->hasLocalLinkage())
8580         MLK = NO_GLOBAL_INTERNAL_MEM;
8581       else
8582         MLK = NO_GLOBAL_EXTERNAL_MEM;
8583     } else if (isa<ConstantPointerNull>(&Obj) &&
8584                (!NullPointerIsDefined(getAssociatedFunction(), AccessAS) ||
8585                 !NullPointerIsDefined(getAssociatedFunction(), ObjectAS))) {
8586       return true;
8587     } else if (isa<AllocaInst>(&Obj)) {
8588       MLK = NO_LOCAL_MEM;
8589     } else if (const auto *CB = dyn_cast<CallBase>(&Obj)) {
8590       bool IsKnownNoAlias;
8591       if (AA::hasAssumedIRAttr<Attribute::NoAlias>(
8592               A, this, IRPosition::callsite_returned(*CB), DepClassTy::OPTIONAL,
8593               IsKnownNoAlias))
8594         MLK = NO_MALLOCED_MEM;
8595       else
8596         MLK = NO_UNKOWN_MEM;
8597     } else {
8598       MLK = NO_UNKOWN_MEM;
8599     }
8600 
8601     assert(MLK != NO_LOCATIONS && "No location specified!");
8602     LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Ptr value can be categorized: "
8603                       << Obj << " -> " << getMemoryLocationsAsStr(MLK) << "\n");
8604     updateStateAndAccessesMap(State, MLK, &I, &Obj, Changed,
8605                               getAccessKindFromInst(&I));
8606 
8607     return true;
8608   };
8609 
8610   const auto *AA = A.getAAFor<AAUnderlyingObjects>(
8611       *this, IRPosition::value(Ptr), DepClassTy::OPTIONAL);
8612   if (!AA || !AA->forallUnderlyingObjects(Pred, AA::Intraprocedural)) {
8613     LLVM_DEBUG(
8614         dbgs() << "[AAMemoryLocation] Pointer locations not categorized\n");
8615     updateStateAndAccessesMap(State, NO_UNKOWN_MEM, &I, nullptr, Changed,
8616                               getAccessKindFromInst(&I));
8617     return;
8618   }
8619 
8620   LLVM_DEBUG(
8621       dbgs() << "[AAMemoryLocation] Accessed locations with pointer locations: "
8622              << getMemoryLocationsAsStr(State.getAssumed()) << "\n");
8623 }
8624 
8625 void AAMemoryLocationImpl::categorizeArgumentPointerLocations(
8626     Attributor &A, CallBase &CB, AAMemoryLocation::StateType &AccessedLocs,
8627     bool &Changed) {
8628   for (unsigned ArgNo = 0, E = CB.arg_size(); ArgNo < E; ++ArgNo) {
8629 
8630     // Skip non-pointer arguments.
8631     const Value *ArgOp = CB.getArgOperand(ArgNo);
8632     if (!ArgOp->getType()->isPtrOrPtrVectorTy())
8633       continue;
8634 
8635     // Skip readnone arguments.
8636     const IRPosition &ArgOpIRP = IRPosition::callsite_argument(CB, ArgNo);
8637     const auto *ArgOpMemLocationAA =
8638         A.getAAFor<AAMemoryBehavior>(*this, ArgOpIRP, DepClassTy::OPTIONAL);
8639 
8640     if (ArgOpMemLocationAA && ArgOpMemLocationAA->isAssumedReadNone())
8641       continue;
8642 
8643     // Categorize potentially accessed pointer arguments as if there was an
8644     // access instruction with them as pointer.
8645     categorizePtrValue(A, CB, *ArgOp, AccessedLocs, Changed);
8646   }
8647 }
8648 
8649 AAMemoryLocation::MemoryLocationsKind
8650 AAMemoryLocationImpl::categorizeAccessedLocations(Attributor &A, Instruction &I,
8651                                                   bool &Changed) {
8652   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize accessed locations for "
8653                     << I << "\n");
8654 
8655   AAMemoryLocation::StateType AccessedLocs;
8656   AccessedLocs.intersectAssumedBits(NO_LOCATIONS);
8657 
8658   if (auto *CB = dyn_cast<CallBase>(&I)) {
8659 
8660     // First check if we assume any memory is access is visible.
8661     const auto *CBMemLocationAA = A.getAAFor<AAMemoryLocation>(
8662         *this, IRPosition::callsite_function(*CB), DepClassTy::OPTIONAL);
8663     LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize call site: " << I
8664                       << " [" << CBMemLocationAA << "]\n");
8665     if (!CBMemLocationAA) {
8666       updateStateAndAccessesMap(AccessedLocs, NO_UNKOWN_MEM, &I, nullptr,
8667                                 Changed, getAccessKindFromInst(&I));
8668       return NO_UNKOWN_MEM;
8669     }
8670 
8671     if (CBMemLocationAA->isAssumedReadNone())
8672       return NO_LOCATIONS;
8673 
8674     if (CBMemLocationAA->isAssumedInaccessibleMemOnly()) {
8675       updateStateAndAccessesMap(AccessedLocs, NO_INACCESSIBLE_MEM, &I, nullptr,
8676                                 Changed, getAccessKindFromInst(&I));
8677       return AccessedLocs.getAssumed();
8678     }
8679 
8680     uint32_t CBAssumedNotAccessedLocs =
8681         CBMemLocationAA->getAssumedNotAccessedLocation();
8682 
8683     // Set the argmemonly and global bit as we handle them separately below.
8684     uint32_t CBAssumedNotAccessedLocsNoArgMem =
8685         CBAssumedNotAccessedLocs | NO_ARGUMENT_MEM | NO_GLOBAL_MEM;
8686 
8687     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2) {
8688       if (CBAssumedNotAccessedLocsNoArgMem & CurMLK)
8689         continue;
8690       updateStateAndAccessesMap(AccessedLocs, CurMLK, &I, nullptr, Changed,
8691                                 getAccessKindFromInst(&I));
8692     }
8693 
8694     // Now handle global memory if it might be accessed. This is slightly tricky
8695     // as NO_GLOBAL_MEM has multiple bits set.
8696     bool HasGlobalAccesses = ((~CBAssumedNotAccessedLocs) & NO_GLOBAL_MEM);
8697     if (HasGlobalAccesses) {
8698       auto AccessPred = [&](const Instruction *, const Value *Ptr,
8699                             AccessKind Kind, MemoryLocationsKind MLK) {
8700         updateStateAndAccessesMap(AccessedLocs, MLK, &I, Ptr, Changed,
8701                                   getAccessKindFromInst(&I));
8702         return true;
8703       };
8704       if (!CBMemLocationAA->checkForAllAccessesToMemoryKind(
8705               AccessPred, inverseLocation(NO_GLOBAL_MEM, false, false)))
8706         return AccessedLocs.getWorstState();
8707     }
8708 
8709     LLVM_DEBUG(
8710         dbgs() << "[AAMemoryLocation] Accessed state before argument handling: "
8711                << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n");
8712 
8713     // Now handle argument memory if it might be accessed.
8714     bool HasArgAccesses = ((~CBAssumedNotAccessedLocs) & NO_ARGUMENT_MEM);
8715     if (HasArgAccesses)
8716       categorizeArgumentPointerLocations(A, *CB, AccessedLocs, Changed);
8717 
8718     LLVM_DEBUG(
8719         dbgs() << "[AAMemoryLocation] Accessed state after argument handling: "
8720                << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n");
8721 
8722     return AccessedLocs.getAssumed();
8723   }
8724 
8725   if (const Value *Ptr = getPointerOperand(&I, /* AllowVolatile */ true)) {
8726     LLVM_DEBUG(
8727         dbgs() << "[AAMemoryLocation] Categorize memory access with pointer: "
8728                << I << " [" << *Ptr << "]\n");
8729     categorizePtrValue(A, I, *Ptr, AccessedLocs, Changed,
8730                        Ptr->getType()->getPointerAddressSpace());
8731     return AccessedLocs.getAssumed();
8732   }
8733 
8734   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Failed to categorize instruction: "
8735                     << I << "\n");
8736   updateStateAndAccessesMap(AccessedLocs, NO_UNKOWN_MEM, &I, nullptr, Changed,
8737                             getAccessKindFromInst(&I));
8738   return AccessedLocs.getAssumed();
8739 }
8740 
8741 /// An AA to represent the memory behavior function attributes.
8742 struct AAMemoryLocationFunction final : public AAMemoryLocationImpl {
8743   AAMemoryLocationFunction(const IRPosition &IRP, Attributor &A)
8744       : AAMemoryLocationImpl(IRP, A) {}
8745 
8746   /// See AbstractAttribute::updateImpl(Attributor &A).
8747   ChangeStatus updateImpl(Attributor &A) override {
8748 
8749     const auto *MemBehaviorAA =
8750         A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(), DepClassTy::NONE);
8751     if (MemBehaviorAA && MemBehaviorAA->isAssumedReadNone()) {
8752       if (MemBehaviorAA->isKnownReadNone())
8753         return indicateOptimisticFixpoint();
8754       assert(isAssumedReadNone() &&
8755              "AAMemoryLocation was not read-none but AAMemoryBehavior was!");
8756       A.recordDependence(*MemBehaviorAA, *this, DepClassTy::OPTIONAL);
8757       return ChangeStatus::UNCHANGED;
8758     }
8759 
8760     // The current assumed state used to determine a change.
8761     auto AssumedState = getAssumed();
8762     bool Changed = false;
8763 
8764     auto CheckRWInst = [&](Instruction &I) {
8765       MemoryLocationsKind MLK = categorizeAccessedLocations(A, I, Changed);
8766       LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Accessed locations for " << I
8767                         << ": " << getMemoryLocationsAsStr(MLK) << "\n");
8768       removeAssumedBits(inverseLocation(MLK, false, false));
8769       // Stop once only the valid bit set in the *not assumed location*, thus
8770       // once we don't actually exclude any memory locations in the state.
8771       return getAssumedNotAccessedLocation() != VALID_STATE;
8772     };
8773 
8774     bool UsedAssumedInformation = false;
8775     if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this,
8776                                             UsedAssumedInformation))
8777       return indicatePessimisticFixpoint();
8778 
8779     Changed |= AssumedState != getAssumed();
8780     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
8781   }
8782 
8783   /// See AbstractAttribute::trackStatistics()
8784   void trackStatistics() const override {
8785     if (isAssumedReadNone())
8786       STATS_DECLTRACK_FN_ATTR(readnone)
8787     else if (isAssumedArgMemOnly())
8788       STATS_DECLTRACK_FN_ATTR(argmemonly)
8789     else if (isAssumedInaccessibleMemOnly())
8790       STATS_DECLTRACK_FN_ATTR(inaccessiblememonly)
8791     else if (isAssumedInaccessibleOrArgMemOnly())
8792       STATS_DECLTRACK_FN_ATTR(inaccessiblememorargmemonly)
8793   }
8794 };
8795 
8796 /// AAMemoryLocation attribute for call sites.
8797 struct AAMemoryLocationCallSite final : AAMemoryLocationImpl {
8798   AAMemoryLocationCallSite(const IRPosition &IRP, Attributor &A)
8799       : AAMemoryLocationImpl(IRP, A) {}
8800 
8801   /// See AbstractAttribute::updateImpl(...).
8802   ChangeStatus updateImpl(Attributor &A) override {
8803     // TODO: Once we have call site specific value information we can provide
8804     //       call site specific liveness liveness information and then it makes
8805     //       sense to specialize attributes for call sites arguments instead of
8806     //       redirecting requests to the callee argument.
8807     Function *F = getAssociatedFunction();
8808     const IRPosition &FnPos = IRPosition::function(*F);
8809     auto *FnAA =
8810         A.getAAFor<AAMemoryLocation>(*this, FnPos, DepClassTy::REQUIRED);
8811     if (!FnAA)
8812       return indicatePessimisticFixpoint();
8813     bool Changed = false;
8814     auto AccessPred = [&](const Instruction *I, const Value *Ptr,
8815                           AccessKind Kind, MemoryLocationsKind MLK) {
8816       updateStateAndAccessesMap(getState(), MLK, I, Ptr, Changed,
8817                                 getAccessKindFromInst(I));
8818       return true;
8819     };
8820     if (!FnAA->checkForAllAccessesToMemoryKind(AccessPred, ALL_LOCATIONS))
8821       return indicatePessimisticFixpoint();
8822     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
8823   }
8824 
8825   /// See AbstractAttribute::trackStatistics()
8826   void trackStatistics() const override {
8827     if (isAssumedReadNone())
8828       STATS_DECLTRACK_CS_ATTR(readnone)
8829   }
8830 };
8831 } // namespace
8832 
8833 /// ------------------ denormal-fp-math Attribute -------------------------
8834 
8835 namespace {
8836 struct AADenormalFPMathImpl : public AADenormalFPMath {
8837   AADenormalFPMathImpl(const IRPosition &IRP, Attributor &A)
8838       : AADenormalFPMath(IRP, A) {}
8839 
8840   const std::string getAsStr(Attributor *A) const override {
8841     std::string Str("AADenormalFPMath[");
8842     raw_string_ostream OS(Str);
8843 
8844     DenormalState Known = getKnown();
8845     if (Known.Mode.isValid())
8846       OS << "denormal-fp-math=" << Known.Mode;
8847     else
8848       OS << "invalid";
8849 
8850     if (Known.ModeF32.isValid())
8851       OS << " denormal-fp-math-f32=" << Known.ModeF32;
8852     OS << ']';
8853     return OS.str();
8854   }
8855 };
8856 
8857 struct AADenormalFPMathFunction final : AADenormalFPMathImpl {
8858   AADenormalFPMathFunction(const IRPosition &IRP, Attributor &A)
8859       : AADenormalFPMathImpl(IRP, A) {}
8860 
8861   void initialize(Attributor &A) override {
8862     const Function *F = getAnchorScope();
8863     DenormalMode Mode = F->getDenormalModeRaw();
8864     DenormalMode ModeF32 = F->getDenormalModeF32Raw();
8865 
8866     // TODO: Handling this here prevents handling the case where a callee has a
8867     // fixed denormal-fp-math with dynamic denormal-fp-math-f32, but called from
8868     // a function with a fully fixed mode.
8869     if (ModeF32 == DenormalMode::getInvalid())
8870       ModeF32 = Mode;
8871     Known = DenormalState{Mode, ModeF32};
8872     if (isModeFixed())
8873       indicateFixpoint();
8874   }
8875 
8876   ChangeStatus updateImpl(Attributor &A) override {
8877     ChangeStatus Change = ChangeStatus::UNCHANGED;
8878 
8879     auto CheckCallSite = [=, &Change, &A](AbstractCallSite CS) {
8880       Function *Caller = CS.getInstruction()->getFunction();
8881       LLVM_DEBUG(dbgs() << "[AADenormalFPMath] Call " << Caller->getName()
8882                         << "->" << getAssociatedFunction()->getName() << '\n');
8883 
8884       const auto *CallerInfo = A.getAAFor<AADenormalFPMath>(
8885           *this, IRPosition::function(*Caller), DepClassTy::REQUIRED);
8886       if (!CallerInfo)
8887         return false;
8888 
8889       Change = Change | clampStateAndIndicateChange(this->getState(),
8890                                                     CallerInfo->getState());
8891       return true;
8892     };
8893 
8894     bool AllCallSitesKnown = true;
8895     if (!A.checkForAllCallSites(CheckCallSite, *this, true, AllCallSitesKnown))
8896       return indicatePessimisticFixpoint();
8897 
8898     if (Change == ChangeStatus::CHANGED && isModeFixed())
8899       indicateFixpoint();
8900     return Change;
8901   }
8902 
8903   ChangeStatus manifest(Attributor &A) override {
8904     LLVMContext &Ctx = getAssociatedFunction()->getContext();
8905 
8906     SmallVector<Attribute, 2> AttrToAdd;
8907     SmallVector<StringRef, 2> AttrToRemove;
8908     if (Known.Mode == DenormalMode::getDefault()) {
8909       AttrToRemove.push_back("denormal-fp-math");
8910     } else {
8911       AttrToAdd.push_back(
8912           Attribute::get(Ctx, "denormal-fp-math", Known.Mode.str()));
8913     }
8914 
8915     if (Known.ModeF32 != Known.Mode) {
8916       AttrToAdd.push_back(
8917           Attribute::get(Ctx, "denormal-fp-math-f32", Known.ModeF32.str()));
8918     } else {
8919       AttrToRemove.push_back("denormal-fp-math-f32");
8920     }
8921 
8922     auto &IRP = getIRPosition();
8923 
8924     // TODO: There should be a combined add and remove API.
8925     return A.removeAttrs(IRP, AttrToRemove) |
8926            A.manifestAttrs(IRP, AttrToAdd, /*ForceReplace=*/true);
8927   }
8928 
8929   void trackStatistics() const override {
8930     STATS_DECLTRACK_FN_ATTR(denormal_fp_math)
8931   }
8932 };
8933 } // namespace
8934 
8935 /// ------------------ Value Constant Range Attribute -------------------------
8936 
8937 namespace {
8938 struct AAValueConstantRangeImpl : AAValueConstantRange {
8939   using StateType = IntegerRangeState;
8940   AAValueConstantRangeImpl(const IRPosition &IRP, Attributor &A)
8941       : AAValueConstantRange(IRP, A) {}
8942 
8943   /// See AbstractAttribute::initialize(..).
8944   void initialize(Attributor &A) override {
8945     if (A.hasSimplificationCallback(getIRPosition())) {
8946       indicatePessimisticFixpoint();
8947       return;
8948     }
8949 
8950     // Intersect a range given by SCEV.
8951     intersectKnown(getConstantRangeFromSCEV(A, getCtxI()));
8952 
8953     // Intersect a range given by LVI.
8954     intersectKnown(getConstantRangeFromLVI(A, getCtxI()));
8955   }
8956 
8957   /// See AbstractAttribute::getAsStr().
8958   const std::string getAsStr(Attributor *A) const override {
8959     std::string Str;
8960     llvm::raw_string_ostream OS(Str);
8961     OS << "range(" << getBitWidth() << ")<";
8962     getKnown().print(OS);
8963     OS << " / ";
8964     getAssumed().print(OS);
8965     OS << ">";
8966     return OS.str();
8967   }
8968 
8969   /// Helper function to get a SCEV expr for the associated value at program
8970   /// point \p I.
8971   const SCEV *getSCEV(Attributor &A, const Instruction *I = nullptr) const {
8972     if (!getAnchorScope())
8973       return nullptr;
8974 
8975     ScalarEvolution *SE =
8976         A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(
8977             *getAnchorScope());
8978 
8979     LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(
8980         *getAnchorScope());
8981 
8982     if (!SE || !LI)
8983       return nullptr;
8984 
8985     const SCEV *S = SE->getSCEV(&getAssociatedValue());
8986     if (!I)
8987       return S;
8988 
8989     return SE->getSCEVAtScope(S, LI->getLoopFor(I->getParent()));
8990   }
8991 
8992   /// Helper function to get a range from SCEV for the associated value at
8993   /// program point \p I.
8994   ConstantRange getConstantRangeFromSCEV(Attributor &A,
8995                                          const Instruction *I = nullptr) const {
8996     if (!getAnchorScope())
8997       return getWorstState(getBitWidth());
8998 
8999     ScalarEvolution *SE =
9000         A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(
9001             *getAnchorScope());
9002 
9003     const SCEV *S = getSCEV(A, I);
9004     if (!SE || !S)
9005       return getWorstState(getBitWidth());
9006 
9007     return SE->getUnsignedRange(S);
9008   }
9009 
9010   /// Helper function to get a range from LVI for the associated value at
9011   /// program point \p I.
9012   ConstantRange
9013   getConstantRangeFromLVI(Attributor &A,
9014                           const Instruction *CtxI = nullptr) const {
9015     if (!getAnchorScope())
9016       return getWorstState(getBitWidth());
9017 
9018     LazyValueInfo *LVI =
9019         A.getInfoCache().getAnalysisResultForFunction<LazyValueAnalysis>(
9020             *getAnchorScope());
9021 
9022     if (!LVI || !CtxI)
9023       return getWorstState(getBitWidth());
9024     return LVI->getConstantRange(&getAssociatedValue(),
9025                                  const_cast<Instruction *>(CtxI),
9026                                  /*UndefAllowed*/ false);
9027   }
9028 
9029   /// Return true if \p CtxI is valid for querying outside analyses.
9030   /// This basically makes sure we do not ask intra-procedural analysis
9031   /// about a context in the wrong function or a context that violates
9032   /// dominance assumptions they might have. The \p AllowAACtxI flag indicates
9033   /// if the original context of this AA is OK or should be considered invalid.
9034   bool isValidCtxInstructionForOutsideAnalysis(Attributor &A,
9035                                                const Instruction *CtxI,
9036                                                bool AllowAACtxI) const {
9037     if (!CtxI || (!AllowAACtxI && CtxI == getCtxI()))
9038       return false;
9039 
9040     // Our context might be in a different function, neither intra-procedural
9041     // analysis (ScalarEvolution nor LazyValueInfo) can handle that.
9042     if (!AA::isValidInScope(getAssociatedValue(), CtxI->getFunction()))
9043       return false;
9044 
9045     // If the context is not dominated by the value there are paths to the
9046     // context that do not define the value. This cannot be handled by
9047     // LazyValueInfo so we need to bail.
9048     if (auto *I = dyn_cast<Instruction>(&getAssociatedValue())) {
9049       InformationCache &InfoCache = A.getInfoCache();
9050       const DominatorTree *DT =
9051           InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(
9052               *I->getFunction());
9053       return DT && DT->dominates(I, CtxI);
9054     }
9055 
9056     return true;
9057   }
9058 
9059   /// See AAValueConstantRange::getKnownConstantRange(..).
9060   ConstantRange
9061   getKnownConstantRange(Attributor &A,
9062                         const Instruction *CtxI = nullptr) const override {
9063     if (!isValidCtxInstructionForOutsideAnalysis(A, CtxI,
9064                                                  /* AllowAACtxI */ false))
9065       return getKnown();
9066 
9067     ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI);
9068     ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI);
9069     return getKnown().intersectWith(SCEVR).intersectWith(LVIR);
9070   }
9071 
9072   /// See AAValueConstantRange::getAssumedConstantRange(..).
9073   ConstantRange
9074   getAssumedConstantRange(Attributor &A,
9075                           const Instruction *CtxI = nullptr) const override {
9076     // TODO: Make SCEV use Attributor assumption.
9077     //       We may be able to bound a variable range via assumptions in
9078     //       Attributor. ex.) If x is assumed to be in [1, 3] and y is known to
9079     //       evolve to x^2 + x, then we can say that y is in [2, 12].
9080     if (!isValidCtxInstructionForOutsideAnalysis(A, CtxI,
9081                                                  /* AllowAACtxI */ false))
9082       return getAssumed();
9083 
9084     ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI);
9085     ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI);
9086     return getAssumed().intersectWith(SCEVR).intersectWith(LVIR);
9087   }
9088 
9089   /// Helper function to create MDNode for range metadata.
9090   static MDNode *
9091   getMDNodeForConstantRange(Type *Ty, LLVMContext &Ctx,
9092                             const ConstantRange &AssumedConstantRange) {
9093     Metadata *LowAndHigh[] = {ConstantAsMetadata::get(ConstantInt::get(
9094                                   Ty, AssumedConstantRange.getLower())),
9095                               ConstantAsMetadata::get(ConstantInt::get(
9096                                   Ty, AssumedConstantRange.getUpper()))};
9097     return MDNode::get(Ctx, LowAndHigh);
9098   }
9099 
9100   /// Return true if \p Assumed is included in \p KnownRanges.
9101   static bool isBetterRange(const ConstantRange &Assumed, MDNode *KnownRanges) {
9102 
9103     if (Assumed.isFullSet())
9104       return false;
9105 
9106     if (!KnownRanges)
9107       return true;
9108 
9109     // If multiple ranges are annotated in IR, we give up to annotate assumed
9110     // range for now.
9111 
9112     // TODO:  If there exists a known range which containts assumed range, we
9113     // can say assumed range is better.
9114     if (KnownRanges->getNumOperands() > 2)
9115       return false;
9116 
9117     ConstantInt *Lower =
9118         mdconst::extract<ConstantInt>(KnownRanges->getOperand(0));
9119     ConstantInt *Upper =
9120         mdconst::extract<ConstantInt>(KnownRanges->getOperand(1));
9121 
9122     ConstantRange Known(Lower->getValue(), Upper->getValue());
9123     return Known.contains(Assumed) && Known != Assumed;
9124   }
9125 
9126   /// Helper function to set range metadata.
9127   static bool
9128   setRangeMetadataIfisBetterRange(Instruction *I,
9129                                   const ConstantRange &AssumedConstantRange) {
9130     auto *OldRangeMD = I->getMetadata(LLVMContext::MD_range);
9131     if (isBetterRange(AssumedConstantRange, OldRangeMD)) {
9132       if (!AssumedConstantRange.isEmptySet()) {
9133         I->setMetadata(LLVMContext::MD_range,
9134                        getMDNodeForConstantRange(I->getType(), I->getContext(),
9135                                                  AssumedConstantRange));
9136         return true;
9137       }
9138     }
9139     return false;
9140   }
9141 
9142   /// See AbstractAttribute::manifest()
9143   ChangeStatus manifest(Attributor &A) override {
9144     ChangeStatus Changed = ChangeStatus::UNCHANGED;
9145     ConstantRange AssumedConstantRange = getAssumedConstantRange(A);
9146     assert(!AssumedConstantRange.isFullSet() && "Invalid state");
9147 
9148     auto &V = getAssociatedValue();
9149     if (!AssumedConstantRange.isEmptySet() &&
9150         !AssumedConstantRange.isSingleElement()) {
9151       if (Instruction *I = dyn_cast<Instruction>(&V)) {
9152         assert(I == getCtxI() && "Should not annotate an instruction which is "
9153                                  "not the context instruction");
9154         if (isa<CallInst>(I) || isa<LoadInst>(I))
9155           if (setRangeMetadataIfisBetterRange(I, AssumedConstantRange))
9156             Changed = ChangeStatus::CHANGED;
9157       }
9158     }
9159 
9160     return Changed;
9161   }
9162 };
9163 
9164 struct AAValueConstantRangeArgument final
9165     : AAArgumentFromCallSiteArguments<
9166           AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState,
9167           true /* BridgeCallBaseContext */> {
9168   using Base = AAArgumentFromCallSiteArguments<
9169       AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState,
9170       true /* BridgeCallBaseContext */>;
9171   AAValueConstantRangeArgument(const IRPosition &IRP, Attributor &A)
9172       : Base(IRP, A) {}
9173 
9174   /// See AbstractAttribute::trackStatistics()
9175   void trackStatistics() const override {
9176     STATS_DECLTRACK_ARG_ATTR(value_range)
9177   }
9178 };
9179 
9180 struct AAValueConstantRangeReturned
9181     : AAReturnedFromReturnedValues<AAValueConstantRange,
9182                                    AAValueConstantRangeImpl,
9183                                    AAValueConstantRangeImpl::StateType,
9184                                    /* PropogateCallBaseContext */ true> {
9185   using Base =
9186       AAReturnedFromReturnedValues<AAValueConstantRange,
9187                                    AAValueConstantRangeImpl,
9188                                    AAValueConstantRangeImpl::StateType,
9189                                    /* PropogateCallBaseContext */ true>;
9190   AAValueConstantRangeReturned(const IRPosition &IRP, Attributor &A)
9191       : Base(IRP, A) {}
9192 
9193   /// See AbstractAttribute::initialize(...).
9194   void initialize(Attributor &A) override {
9195     if (!A.isFunctionIPOAmendable(*getAssociatedFunction()))
9196       indicatePessimisticFixpoint();
9197   }
9198 
9199   /// See AbstractAttribute::trackStatistics()
9200   void trackStatistics() const override {
9201     STATS_DECLTRACK_FNRET_ATTR(value_range)
9202   }
9203 };
9204 
9205 struct AAValueConstantRangeFloating : AAValueConstantRangeImpl {
9206   AAValueConstantRangeFloating(const IRPosition &IRP, Attributor &A)
9207       : AAValueConstantRangeImpl(IRP, A) {}
9208 
9209   /// See AbstractAttribute::initialize(...).
9210   void initialize(Attributor &A) override {
9211     AAValueConstantRangeImpl::initialize(A);
9212     if (isAtFixpoint())
9213       return;
9214 
9215     Value &V = getAssociatedValue();
9216 
9217     if (auto *C = dyn_cast<ConstantInt>(&V)) {
9218       unionAssumed(ConstantRange(C->getValue()));
9219       indicateOptimisticFixpoint();
9220       return;
9221     }
9222 
9223     if (isa<UndefValue>(&V)) {
9224       // Collapse the undef state to 0.
9225       unionAssumed(ConstantRange(APInt(getBitWidth(), 0)));
9226       indicateOptimisticFixpoint();
9227       return;
9228     }
9229 
9230     if (isa<CallBase>(&V))
9231       return;
9232 
9233     if (isa<BinaryOperator>(&V) || isa<CmpInst>(&V) || isa<CastInst>(&V))
9234       return;
9235 
9236     // If it is a load instruction with range metadata, use it.
9237     if (LoadInst *LI = dyn_cast<LoadInst>(&V))
9238       if (auto *RangeMD = LI->getMetadata(LLVMContext::MD_range)) {
9239         intersectKnown(getConstantRangeFromMetadata(*RangeMD));
9240         return;
9241       }
9242 
9243     // We can work with PHI and select instruction as we traverse their operands
9244     // during update.
9245     if (isa<SelectInst>(V) || isa<PHINode>(V))
9246       return;
9247 
9248     // Otherwise we give up.
9249     indicatePessimisticFixpoint();
9250 
9251     LLVM_DEBUG(dbgs() << "[AAValueConstantRange] We give up: "
9252                       << getAssociatedValue() << "\n");
9253   }
9254 
9255   bool calculateBinaryOperator(
9256       Attributor &A, BinaryOperator *BinOp, IntegerRangeState &T,
9257       const Instruction *CtxI,
9258       SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
9259     Value *LHS = BinOp->getOperand(0);
9260     Value *RHS = BinOp->getOperand(1);
9261 
9262     // Simplify the operands first.
9263     bool UsedAssumedInformation = false;
9264     const auto &SimplifiedLHS = A.getAssumedSimplified(
9265         IRPosition::value(*LHS, getCallBaseContext()), *this,
9266         UsedAssumedInformation, AA::Interprocedural);
9267     if (!SimplifiedLHS.has_value())
9268       return true;
9269     if (!*SimplifiedLHS)
9270       return false;
9271     LHS = *SimplifiedLHS;
9272 
9273     const auto &SimplifiedRHS = A.getAssumedSimplified(
9274         IRPosition::value(*RHS, getCallBaseContext()), *this,
9275         UsedAssumedInformation, AA::Interprocedural);
9276     if (!SimplifiedRHS.has_value())
9277       return true;
9278     if (!*SimplifiedRHS)
9279       return false;
9280     RHS = *SimplifiedRHS;
9281 
9282     // TODO: Allow non integers as well.
9283     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
9284       return false;
9285 
9286     auto *LHSAA = A.getAAFor<AAValueConstantRange>(
9287         *this, IRPosition::value(*LHS, getCallBaseContext()),
9288         DepClassTy::REQUIRED);
9289     if (!LHSAA)
9290       return false;
9291     QuerriedAAs.push_back(LHSAA);
9292     auto LHSAARange = LHSAA->getAssumedConstantRange(A, CtxI);
9293 
9294     auto *RHSAA = A.getAAFor<AAValueConstantRange>(
9295         *this, IRPosition::value(*RHS, getCallBaseContext()),
9296         DepClassTy::REQUIRED);
9297     if (!RHSAA)
9298       return false;
9299     QuerriedAAs.push_back(RHSAA);
9300     auto RHSAARange = RHSAA->getAssumedConstantRange(A, CtxI);
9301 
9302     auto AssumedRange = LHSAARange.binaryOp(BinOp->getOpcode(), RHSAARange);
9303 
9304     T.unionAssumed(AssumedRange);
9305 
9306     // TODO: Track a known state too.
9307 
9308     return T.isValidState();
9309   }
9310 
9311   bool calculateCastInst(
9312       Attributor &A, CastInst *CastI, IntegerRangeState &T,
9313       const Instruction *CtxI,
9314       SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
9315     assert(CastI->getNumOperands() == 1 && "Expected cast to be unary!");
9316     // TODO: Allow non integers as well.
9317     Value *OpV = CastI->getOperand(0);
9318 
9319     // Simplify the operand first.
9320     bool UsedAssumedInformation = false;
9321     const auto &SimplifiedOpV = A.getAssumedSimplified(
9322         IRPosition::value(*OpV, getCallBaseContext()), *this,
9323         UsedAssumedInformation, AA::Interprocedural);
9324     if (!SimplifiedOpV.has_value())
9325       return true;
9326     if (!*SimplifiedOpV)
9327       return false;
9328     OpV = *SimplifiedOpV;
9329 
9330     if (!OpV->getType()->isIntegerTy())
9331       return false;
9332 
9333     auto *OpAA = A.getAAFor<AAValueConstantRange>(
9334         *this, IRPosition::value(*OpV, getCallBaseContext()),
9335         DepClassTy::REQUIRED);
9336     if (!OpAA)
9337       return false;
9338     QuerriedAAs.push_back(OpAA);
9339     T.unionAssumed(OpAA->getAssumed().castOp(CastI->getOpcode(),
9340                                              getState().getBitWidth()));
9341     return T.isValidState();
9342   }
9343 
9344   bool
9345   calculateCmpInst(Attributor &A, CmpInst *CmpI, IntegerRangeState &T,
9346                    const Instruction *CtxI,
9347                    SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
9348     Value *LHS = CmpI->getOperand(0);
9349     Value *RHS = CmpI->getOperand(1);
9350 
9351     // Simplify the operands first.
9352     bool UsedAssumedInformation = false;
9353     const auto &SimplifiedLHS = A.getAssumedSimplified(
9354         IRPosition::value(*LHS, getCallBaseContext()), *this,
9355         UsedAssumedInformation, AA::Interprocedural);
9356     if (!SimplifiedLHS.has_value())
9357       return true;
9358     if (!*SimplifiedLHS)
9359       return false;
9360     LHS = *SimplifiedLHS;
9361 
9362     const auto &SimplifiedRHS = A.getAssumedSimplified(
9363         IRPosition::value(*RHS, getCallBaseContext()), *this,
9364         UsedAssumedInformation, AA::Interprocedural);
9365     if (!SimplifiedRHS.has_value())
9366       return true;
9367     if (!*SimplifiedRHS)
9368       return false;
9369     RHS = *SimplifiedRHS;
9370 
9371     // TODO: Allow non integers as well.
9372     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
9373       return false;
9374 
9375     auto *LHSAA = A.getAAFor<AAValueConstantRange>(
9376         *this, IRPosition::value(*LHS, getCallBaseContext()),
9377         DepClassTy::REQUIRED);
9378     if (!LHSAA)
9379       return false;
9380     QuerriedAAs.push_back(LHSAA);
9381     auto *RHSAA = A.getAAFor<AAValueConstantRange>(
9382         *this, IRPosition::value(*RHS, getCallBaseContext()),
9383         DepClassTy::REQUIRED);
9384     if (!RHSAA)
9385       return false;
9386     QuerriedAAs.push_back(RHSAA);
9387     auto LHSAARange = LHSAA->getAssumedConstantRange(A, CtxI);
9388     auto RHSAARange = RHSAA->getAssumedConstantRange(A, CtxI);
9389 
9390     // If one of them is empty set, we can't decide.
9391     if (LHSAARange.isEmptySet() || RHSAARange.isEmptySet())
9392       return true;
9393 
9394     bool MustTrue = false, MustFalse = false;
9395 
9396     auto AllowedRegion =
9397         ConstantRange::makeAllowedICmpRegion(CmpI->getPredicate(), RHSAARange);
9398 
9399     if (AllowedRegion.intersectWith(LHSAARange).isEmptySet())
9400       MustFalse = true;
9401 
9402     if (LHSAARange.icmp(CmpI->getPredicate(), RHSAARange))
9403       MustTrue = true;
9404 
9405     assert((!MustTrue || !MustFalse) &&
9406            "Either MustTrue or MustFalse should be false!");
9407 
9408     if (MustTrue)
9409       T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 1)));
9410     else if (MustFalse)
9411       T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 0)));
9412     else
9413       T.unionAssumed(ConstantRange(/* BitWidth */ 1, /* isFullSet */ true));
9414 
9415     LLVM_DEBUG(dbgs() << "[AAValueConstantRange] " << *CmpI << " after "
9416                       << (MustTrue ? "true" : (MustFalse ? "false" : "unknown"))
9417                       << ": " << T << "\n\t" << *LHSAA << "\t<op>\n\t"
9418                       << *RHSAA);
9419 
9420     // TODO: Track a known state too.
9421     return T.isValidState();
9422   }
9423 
9424   /// See AbstractAttribute::updateImpl(...).
9425   ChangeStatus updateImpl(Attributor &A) override {
9426 
9427     IntegerRangeState T(getBitWidth());
9428     auto VisitValueCB = [&](Value &V, const Instruction *CtxI) -> bool {
9429       Instruction *I = dyn_cast<Instruction>(&V);
9430       if (!I || isa<CallBase>(I)) {
9431 
9432         // Simplify the operand first.
9433         bool UsedAssumedInformation = false;
9434         const auto &SimplifiedOpV = A.getAssumedSimplified(
9435             IRPosition::value(V, getCallBaseContext()), *this,
9436             UsedAssumedInformation, AA::Interprocedural);
9437         if (!SimplifiedOpV.has_value())
9438           return true;
9439         if (!*SimplifiedOpV)
9440           return false;
9441         Value *VPtr = *SimplifiedOpV;
9442 
9443         // If the value is not instruction, we query AA to Attributor.
9444         const auto *AA = A.getAAFor<AAValueConstantRange>(
9445             *this, IRPosition::value(*VPtr, getCallBaseContext()),
9446             DepClassTy::REQUIRED);
9447 
9448         // Clamp operator is not used to utilize a program point CtxI.
9449         if (AA)
9450           T.unionAssumed(AA->getAssumedConstantRange(A, CtxI));
9451         else
9452           return false;
9453 
9454         return T.isValidState();
9455       }
9456 
9457       SmallVector<const AAValueConstantRange *, 4> QuerriedAAs;
9458       if (auto *BinOp = dyn_cast<BinaryOperator>(I)) {
9459         if (!calculateBinaryOperator(A, BinOp, T, CtxI, QuerriedAAs))
9460           return false;
9461       } else if (auto *CmpI = dyn_cast<CmpInst>(I)) {
9462         if (!calculateCmpInst(A, CmpI, T, CtxI, QuerriedAAs))
9463           return false;
9464       } else if (auto *CastI = dyn_cast<CastInst>(I)) {
9465         if (!calculateCastInst(A, CastI, T, CtxI, QuerriedAAs))
9466           return false;
9467       } else {
9468         // Give up with other instructions.
9469         // TODO: Add other instructions
9470 
9471         T.indicatePessimisticFixpoint();
9472         return false;
9473       }
9474 
9475       // Catch circular reasoning in a pessimistic way for now.
9476       // TODO: Check how the range evolves and if we stripped anything, see also
9477       //       AADereferenceable or AAAlign for similar situations.
9478       for (const AAValueConstantRange *QueriedAA : QuerriedAAs) {
9479         if (QueriedAA != this)
9480           continue;
9481         // If we are in a stady state we do not need to worry.
9482         if (T.getAssumed() == getState().getAssumed())
9483           continue;
9484         T.indicatePessimisticFixpoint();
9485       }
9486 
9487       return T.isValidState();
9488     };
9489 
9490     if (!VisitValueCB(getAssociatedValue(), getCtxI()))
9491       return indicatePessimisticFixpoint();
9492 
9493     // Ensure that long def-use chains can't cause circular reasoning either by
9494     // introducing a cutoff below.
9495     if (clampStateAndIndicateChange(getState(), T) == ChangeStatus::UNCHANGED)
9496       return ChangeStatus::UNCHANGED;
9497     if (++NumChanges > MaxNumChanges) {
9498       LLVM_DEBUG(dbgs() << "[AAValueConstantRange] performed " << NumChanges
9499                         << " but only " << MaxNumChanges
9500                         << " are allowed to avoid cyclic reasoning.");
9501       return indicatePessimisticFixpoint();
9502     }
9503     return ChangeStatus::CHANGED;
9504   }
9505 
9506   /// See AbstractAttribute::trackStatistics()
9507   void trackStatistics() const override {
9508     STATS_DECLTRACK_FLOATING_ATTR(value_range)
9509   }
9510 
9511   /// Tracker to bail after too many widening steps of the constant range.
9512   int NumChanges = 0;
9513 
9514   /// Upper bound for the number of allowed changes (=widening steps) for the
9515   /// constant range before we give up.
9516   static constexpr int MaxNumChanges = 5;
9517 };
9518 
9519 struct AAValueConstantRangeFunction : AAValueConstantRangeImpl {
9520   AAValueConstantRangeFunction(const IRPosition &IRP, Attributor &A)
9521       : AAValueConstantRangeImpl(IRP, A) {}
9522 
9523   /// See AbstractAttribute::initialize(...).
9524   ChangeStatus updateImpl(Attributor &A) override {
9525     llvm_unreachable("AAValueConstantRange(Function|CallSite)::updateImpl will "
9526                      "not be called");
9527   }
9528 
9529   /// See AbstractAttribute::trackStatistics()
9530   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(value_range) }
9531 };
9532 
9533 struct AAValueConstantRangeCallSite : AAValueConstantRangeFunction {
9534   AAValueConstantRangeCallSite(const IRPosition &IRP, Attributor &A)
9535       : AAValueConstantRangeFunction(IRP, A) {}
9536 
9537   /// See AbstractAttribute::trackStatistics()
9538   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(value_range) }
9539 };
9540 
9541 struct AAValueConstantRangeCallSiteReturned
9542     : AACalleeToCallSite<AAValueConstantRange, AAValueConstantRangeImpl,
9543                          AAValueConstantRangeImpl::StateType,
9544                          /* IntroduceCallBaseContext */ true> {
9545   AAValueConstantRangeCallSiteReturned(const IRPosition &IRP, Attributor &A)
9546       : AACalleeToCallSite<AAValueConstantRange, AAValueConstantRangeImpl,
9547                            AAValueConstantRangeImpl::StateType,
9548                            /* IntroduceCallBaseContext */ true>(IRP, A) {}
9549 
9550   /// See AbstractAttribute::initialize(...).
9551   void initialize(Attributor &A) override {
9552     // If it is a load instruction with range metadata, use the metadata.
9553     if (CallInst *CI = dyn_cast<CallInst>(&getAssociatedValue()))
9554       if (auto *RangeMD = CI->getMetadata(LLVMContext::MD_range))
9555         intersectKnown(getConstantRangeFromMetadata(*RangeMD));
9556 
9557     AAValueConstantRangeImpl::initialize(A);
9558   }
9559 
9560   /// See AbstractAttribute::trackStatistics()
9561   void trackStatistics() const override {
9562     STATS_DECLTRACK_CSRET_ATTR(value_range)
9563   }
9564 };
9565 struct AAValueConstantRangeCallSiteArgument : AAValueConstantRangeFloating {
9566   AAValueConstantRangeCallSiteArgument(const IRPosition &IRP, Attributor &A)
9567       : AAValueConstantRangeFloating(IRP, A) {}
9568 
9569   /// See AbstractAttribute::manifest()
9570   ChangeStatus manifest(Attributor &A) override {
9571     return ChangeStatus::UNCHANGED;
9572   }
9573 
9574   /// See AbstractAttribute::trackStatistics()
9575   void trackStatistics() const override {
9576     STATS_DECLTRACK_CSARG_ATTR(value_range)
9577   }
9578 };
9579 } // namespace
9580 
9581 /// ------------------ Potential Values Attribute -------------------------
9582 
9583 namespace {
9584 struct AAPotentialConstantValuesImpl : AAPotentialConstantValues {
9585   using StateType = PotentialConstantIntValuesState;
9586 
9587   AAPotentialConstantValuesImpl(const IRPosition &IRP, Attributor &A)
9588       : AAPotentialConstantValues(IRP, A) {}
9589 
9590   /// See AbstractAttribute::initialize(..).
9591   void initialize(Attributor &A) override {
9592     if (A.hasSimplificationCallback(getIRPosition()))
9593       indicatePessimisticFixpoint();
9594     else
9595       AAPotentialConstantValues::initialize(A);
9596   }
9597 
9598   bool fillSetWithConstantValues(Attributor &A, const IRPosition &IRP, SetTy &S,
9599                                  bool &ContainsUndef, bool ForSelf) {
9600     SmallVector<AA::ValueAndContext> Values;
9601     bool UsedAssumedInformation = false;
9602     if (!A.getAssumedSimplifiedValues(IRP, *this, Values, AA::Interprocedural,
9603                                       UsedAssumedInformation)) {
9604       // Avoid recursion when the caller is computing constant values for this
9605       // IRP itself.
9606       if (ForSelf)
9607         return false;
9608       if (!IRP.getAssociatedType()->isIntegerTy())
9609         return false;
9610       auto *PotentialValuesAA = A.getAAFor<AAPotentialConstantValues>(
9611           *this, IRP, DepClassTy::REQUIRED);
9612       if (!PotentialValuesAA || !PotentialValuesAA->getState().isValidState())
9613         return false;
9614       ContainsUndef = PotentialValuesAA->getState().undefIsContained();
9615       S = PotentialValuesAA->getState().getAssumedSet();
9616       return true;
9617     }
9618 
9619     // Copy all the constant values, except UndefValue. ContainsUndef is true
9620     // iff Values contains only UndefValue instances. If there are other known
9621     // constants, then UndefValue is dropped.
9622     ContainsUndef = false;
9623     for (auto &It : Values) {
9624       if (isa<UndefValue>(It.getValue())) {
9625         ContainsUndef = true;
9626         continue;
9627       }
9628       auto *CI = dyn_cast<ConstantInt>(It.getValue());
9629       if (!CI)
9630         return false;
9631       S.insert(CI->getValue());
9632     }
9633     ContainsUndef &= S.empty();
9634 
9635     return true;
9636   }
9637 
9638   /// See AbstractAttribute::getAsStr().
9639   const std::string getAsStr(Attributor *A) const override {
9640     std::string Str;
9641     llvm::raw_string_ostream OS(Str);
9642     OS << getState();
9643     return OS.str();
9644   }
9645 
9646   /// See AbstractAttribute::updateImpl(...).
9647   ChangeStatus updateImpl(Attributor &A) override {
9648     return indicatePessimisticFixpoint();
9649   }
9650 };
9651 
9652 struct AAPotentialConstantValuesArgument final
9653     : AAArgumentFromCallSiteArguments<AAPotentialConstantValues,
9654                                       AAPotentialConstantValuesImpl,
9655                                       PotentialConstantIntValuesState> {
9656   using Base = AAArgumentFromCallSiteArguments<AAPotentialConstantValues,
9657                                                AAPotentialConstantValuesImpl,
9658                                                PotentialConstantIntValuesState>;
9659   AAPotentialConstantValuesArgument(const IRPosition &IRP, Attributor &A)
9660       : Base(IRP, A) {}
9661 
9662   /// See AbstractAttribute::trackStatistics()
9663   void trackStatistics() const override {
9664     STATS_DECLTRACK_ARG_ATTR(potential_values)
9665   }
9666 };
9667 
9668 struct AAPotentialConstantValuesReturned
9669     : AAReturnedFromReturnedValues<AAPotentialConstantValues,
9670                                    AAPotentialConstantValuesImpl> {
9671   using Base = AAReturnedFromReturnedValues<AAPotentialConstantValues,
9672                                             AAPotentialConstantValuesImpl>;
9673   AAPotentialConstantValuesReturned(const IRPosition &IRP, Attributor &A)
9674       : Base(IRP, A) {}
9675 
9676   void initialize(Attributor &A) override {
9677     if (!A.isFunctionIPOAmendable(*getAssociatedFunction()))
9678       indicatePessimisticFixpoint();
9679     Base::initialize(A);
9680   }
9681 
9682   /// See AbstractAttribute::trackStatistics()
9683   void trackStatistics() const override {
9684     STATS_DECLTRACK_FNRET_ATTR(potential_values)
9685   }
9686 };
9687 
9688 struct AAPotentialConstantValuesFloating : AAPotentialConstantValuesImpl {
9689   AAPotentialConstantValuesFloating(const IRPosition &IRP, Attributor &A)
9690       : AAPotentialConstantValuesImpl(IRP, A) {}
9691 
9692   /// See AbstractAttribute::initialize(..).
9693   void initialize(Attributor &A) override {
9694     AAPotentialConstantValuesImpl::initialize(A);
9695     if (isAtFixpoint())
9696       return;
9697 
9698     Value &V = getAssociatedValue();
9699 
9700     if (auto *C = dyn_cast<ConstantInt>(&V)) {
9701       unionAssumed(C->getValue());
9702       indicateOptimisticFixpoint();
9703       return;
9704     }
9705 
9706     if (isa<UndefValue>(&V)) {
9707       unionAssumedWithUndef();
9708       indicateOptimisticFixpoint();
9709       return;
9710     }
9711 
9712     if (isa<BinaryOperator>(&V) || isa<ICmpInst>(&V) || isa<CastInst>(&V))
9713       return;
9714 
9715     if (isa<SelectInst>(V) || isa<PHINode>(V) || isa<LoadInst>(V))
9716       return;
9717 
9718     indicatePessimisticFixpoint();
9719 
9720     LLVM_DEBUG(dbgs() << "[AAPotentialConstantValues] We give up: "
9721                       << getAssociatedValue() << "\n");
9722   }
9723 
9724   static bool calculateICmpInst(const ICmpInst *ICI, const APInt &LHS,
9725                                 const APInt &RHS) {
9726     return ICmpInst::compare(LHS, RHS, ICI->getPredicate());
9727   }
9728 
9729   static APInt calculateCastInst(const CastInst *CI, const APInt &Src,
9730                                  uint32_t ResultBitWidth) {
9731     Instruction::CastOps CastOp = CI->getOpcode();
9732     switch (CastOp) {
9733     default:
9734       llvm_unreachable("unsupported or not integer cast");
9735     case Instruction::Trunc:
9736       return Src.trunc(ResultBitWidth);
9737     case Instruction::SExt:
9738       return Src.sext(ResultBitWidth);
9739     case Instruction::ZExt:
9740       return Src.zext(ResultBitWidth);
9741     case Instruction::BitCast:
9742       return Src;
9743     }
9744   }
9745 
9746   static APInt calculateBinaryOperator(const BinaryOperator *BinOp,
9747                                        const APInt &LHS, const APInt &RHS,
9748                                        bool &SkipOperation, bool &Unsupported) {
9749     Instruction::BinaryOps BinOpcode = BinOp->getOpcode();
9750     // Unsupported is set to true when the binary operator is not supported.
9751     // SkipOperation is set to true when UB occur with the given operand pair
9752     // (LHS, RHS).
9753     // TODO: we should look at nsw and nuw keywords to handle operations
9754     //       that create poison or undef value.
9755     switch (BinOpcode) {
9756     default:
9757       Unsupported = true;
9758       return LHS;
9759     case Instruction::Add:
9760       return LHS + RHS;
9761     case Instruction::Sub:
9762       return LHS - RHS;
9763     case Instruction::Mul:
9764       return LHS * RHS;
9765     case Instruction::UDiv:
9766       if (RHS.isZero()) {
9767         SkipOperation = true;
9768         return LHS;
9769       }
9770       return LHS.udiv(RHS);
9771     case Instruction::SDiv:
9772       if (RHS.isZero()) {
9773         SkipOperation = true;
9774         return LHS;
9775       }
9776       return LHS.sdiv(RHS);
9777     case Instruction::URem:
9778       if (RHS.isZero()) {
9779         SkipOperation = true;
9780         return LHS;
9781       }
9782       return LHS.urem(RHS);
9783     case Instruction::SRem:
9784       if (RHS.isZero()) {
9785         SkipOperation = true;
9786         return LHS;
9787       }
9788       return LHS.srem(RHS);
9789     case Instruction::Shl:
9790       return LHS.shl(RHS);
9791     case Instruction::LShr:
9792       return LHS.lshr(RHS);
9793     case Instruction::AShr:
9794       return LHS.ashr(RHS);
9795     case Instruction::And:
9796       return LHS & RHS;
9797     case Instruction::Or:
9798       return LHS | RHS;
9799     case Instruction::Xor:
9800       return LHS ^ RHS;
9801     }
9802   }
9803 
9804   bool calculateBinaryOperatorAndTakeUnion(const BinaryOperator *BinOp,
9805                                            const APInt &LHS, const APInt &RHS) {
9806     bool SkipOperation = false;
9807     bool Unsupported = false;
9808     APInt Result =
9809         calculateBinaryOperator(BinOp, LHS, RHS, SkipOperation, Unsupported);
9810     if (Unsupported)
9811       return false;
9812     // If SkipOperation is true, we can ignore this operand pair (L, R).
9813     if (!SkipOperation)
9814       unionAssumed(Result);
9815     return isValidState();
9816   }
9817 
9818   ChangeStatus updateWithICmpInst(Attributor &A, ICmpInst *ICI) {
9819     auto AssumedBefore = getAssumed();
9820     Value *LHS = ICI->getOperand(0);
9821     Value *RHS = ICI->getOperand(1);
9822 
9823     bool LHSContainsUndef = false, RHSContainsUndef = false;
9824     SetTy LHSAAPVS, RHSAAPVS;
9825     if (!fillSetWithConstantValues(A, IRPosition::value(*LHS), LHSAAPVS,
9826                                    LHSContainsUndef, /* ForSelf */ false) ||
9827         !fillSetWithConstantValues(A, IRPosition::value(*RHS), RHSAAPVS,
9828                                    RHSContainsUndef, /* ForSelf */ false))
9829       return indicatePessimisticFixpoint();
9830 
9831     // TODO: make use of undef flag to limit potential values aggressively.
9832     bool MaybeTrue = false, MaybeFalse = false;
9833     const APInt Zero(RHS->getType()->getIntegerBitWidth(), 0);
9834     if (LHSContainsUndef && RHSContainsUndef) {
9835       // The result of any comparison between undefs can be soundly replaced
9836       // with undef.
9837       unionAssumedWithUndef();
9838     } else if (LHSContainsUndef) {
9839       for (const APInt &R : RHSAAPVS) {
9840         bool CmpResult = calculateICmpInst(ICI, Zero, R);
9841         MaybeTrue |= CmpResult;
9842         MaybeFalse |= !CmpResult;
9843         if (MaybeTrue & MaybeFalse)
9844           return indicatePessimisticFixpoint();
9845       }
9846     } else if (RHSContainsUndef) {
9847       for (const APInt &L : LHSAAPVS) {
9848         bool CmpResult = calculateICmpInst(ICI, L, Zero);
9849         MaybeTrue |= CmpResult;
9850         MaybeFalse |= !CmpResult;
9851         if (MaybeTrue & MaybeFalse)
9852           return indicatePessimisticFixpoint();
9853       }
9854     } else {
9855       for (const APInt &L : LHSAAPVS) {
9856         for (const APInt &R : RHSAAPVS) {
9857           bool CmpResult = calculateICmpInst(ICI, L, R);
9858           MaybeTrue |= CmpResult;
9859           MaybeFalse |= !CmpResult;
9860           if (MaybeTrue & MaybeFalse)
9861             return indicatePessimisticFixpoint();
9862         }
9863       }
9864     }
9865     if (MaybeTrue)
9866       unionAssumed(APInt(/* numBits */ 1, /* val */ 1));
9867     if (MaybeFalse)
9868       unionAssumed(APInt(/* numBits */ 1, /* val */ 0));
9869     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
9870                                          : ChangeStatus::CHANGED;
9871   }
9872 
9873   ChangeStatus updateWithSelectInst(Attributor &A, SelectInst *SI) {
9874     auto AssumedBefore = getAssumed();
9875     Value *LHS = SI->getTrueValue();
9876     Value *RHS = SI->getFalseValue();
9877 
9878     bool UsedAssumedInformation = false;
9879     std::optional<Constant *> C = A.getAssumedConstant(
9880         *SI->getCondition(), *this, UsedAssumedInformation);
9881 
9882     // Check if we only need one operand.
9883     bool OnlyLeft = false, OnlyRight = false;
9884     if (C && *C && (*C)->isOneValue())
9885       OnlyLeft = true;
9886     else if (C && *C && (*C)->isZeroValue())
9887       OnlyRight = true;
9888 
9889     bool LHSContainsUndef = false, RHSContainsUndef = false;
9890     SetTy LHSAAPVS, RHSAAPVS;
9891     if (!OnlyRight &&
9892         !fillSetWithConstantValues(A, IRPosition::value(*LHS), LHSAAPVS,
9893                                    LHSContainsUndef, /* ForSelf */ false))
9894       return indicatePessimisticFixpoint();
9895 
9896     if (!OnlyLeft &&
9897         !fillSetWithConstantValues(A, IRPosition::value(*RHS), RHSAAPVS,
9898                                    RHSContainsUndef, /* ForSelf */ false))
9899       return indicatePessimisticFixpoint();
9900 
9901     if (OnlyLeft || OnlyRight) {
9902       // select (true/false), lhs, rhs
9903       auto *OpAA = OnlyLeft ? &LHSAAPVS : &RHSAAPVS;
9904       auto Undef = OnlyLeft ? LHSContainsUndef : RHSContainsUndef;
9905 
9906       if (Undef)
9907         unionAssumedWithUndef();
9908       else {
9909         for (const auto &It : *OpAA)
9910           unionAssumed(It);
9911       }
9912 
9913     } else if (LHSContainsUndef && RHSContainsUndef) {
9914       // select i1 *, undef , undef => undef
9915       unionAssumedWithUndef();
9916     } else {
9917       for (const auto &It : LHSAAPVS)
9918         unionAssumed(It);
9919       for (const auto &It : RHSAAPVS)
9920         unionAssumed(It);
9921     }
9922     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
9923                                          : ChangeStatus::CHANGED;
9924   }
9925 
9926   ChangeStatus updateWithCastInst(Attributor &A, CastInst *CI) {
9927     auto AssumedBefore = getAssumed();
9928     if (!CI->isIntegerCast())
9929       return indicatePessimisticFixpoint();
9930     assert(CI->getNumOperands() == 1 && "Expected cast to be unary!");
9931     uint32_t ResultBitWidth = CI->getDestTy()->getIntegerBitWidth();
9932     Value *Src = CI->getOperand(0);
9933 
9934     bool SrcContainsUndef = false;
9935     SetTy SrcPVS;
9936     if (!fillSetWithConstantValues(A, IRPosition::value(*Src), SrcPVS,
9937                                    SrcContainsUndef, /* ForSelf */ false))
9938       return indicatePessimisticFixpoint();
9939 
9940     if (SrcContainsUndef)
9941       unionAssumedWithUndef();
9942     else {
9943       for (const APInt &S : SrcPVS) {
9944         APInt T = calculateCastInst(CI, S, ResultBitWidth);
9945         unionAssumed(T);
9946       }
9947     }
9948     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
9949                                          : ChangeStatus::CHANGED;
9950   }
9951 
9952   ChangeStatus updateWithBinaryOperator(Attributor &A, BinaryOperator *BinOp) {
9953     auto AssumedBefore = getAssumed();
9954     Value *LHS = BinOp->getOperand(0);
9955     Value *RHS = BinOp->getOperand(1);
9956 
9957     bool LHSContainsUndef = false, RHSContainsUndef = false;
9958     SetTy LHSAAPVS, RHSAAPVS;
9959     if (!fillSetWithConstantValues(A, IRPosition::value(*LHS), LHSAAPVS,
9960                                    LHSContainsUndef, /* ForSelf */ false) ||
9961         !fillSetWithConstantValues(A, IRPosition::value(*RHS), RHSAAPVS,
9962                                    RHSContainsUndef, /* ForSelf */ false))
9963       return indicatePessimisticFixpoint();
9964 
9965     const APInt Zero = APInt(LHS->getType()->getIntegerBitWidth(), 0);
9966 
9967     // TODO: make use of undef flag to limit potential values aggressively.
9968     if (LHSContainsUndef && RHSContainsUndef) {
9969       if (!calculateBinaryOperatorAndTakeUnion(BinOp, Zero, Zero))
9970         return indicatePessimisticFixpoint();
9971     } else if (LHSContainsUndef) {
9972       for (const APInt &R : RHSAAPVS) {
9973         if (!calculateBinaryOperatorAndTakeUnion(BinOp, Zero, R))
9974           return indicatePessimisticFixpoint();
9975       }
9976     } else if (RHSContainsUndef) {
9977       for (const APInt &L : LHSAAPVS) {
9978         if (!calculateBinaryOperatorAndTakeUnion(BinOp, L, Zero))
9979           return indicatePessimisticFixpoint();
9980       }
9981     } else {
9982       for (const APInt &L : LHSAAPVS) {
9983         for (const APInt &R : RHSAAPVS) {
9984           if (!calculateBinaryOperatorAndTakeUnion(BinOp, L, R))
9985             return indicatePessimisticFixpoint();
9986         }
9987       }
9988     }
9989     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
9990                                          : ChangeStatus::CHANGED;
9991   }
9992 
9993   ChangeStatus updateWithInstruction(Attributor &A, Instruction *Inst) {
9994     auto AssumedBefore = getAssumed();
9995     SetTy Incoming;
9996     bool ContainsUndef;
9997     if (!fillSetWithConstantValues(A, IRPosition::value(*Inst), Incoming,
9998                                    ContainsUndef, /* ForSelf */ true))
9999       return indicatePessimisticFixpoint();
10000     if (ContainsUndef) {
10001       unionAssumedWithUndef();
10002     } else {
10003       for (const auto &It : Incoming)
10004         unionAssumed(It);
10005     }
10006     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
10007                                          : ChangeStatus::CHANGED;
10008   }
10009 
10010   /// See AbstractAttribute::updateImpl(...).
10011   ChangeStatus updateImpl(Attributor &A) override {
10012     Value &V = getAssociatedValue();
10013     Instruction *I = dyn_cast<Instruction>(&V);
10014 
10015     if (auto *ICI = dyn_cast<ICmpInst>(I))
10016       return updateWithICmpInst(A, ICI);
10017 
10018     if (auto *SI = dyn_cast<SelectInst>(I))
10019       return updateWithSelectInst(A, SI);
10020 
10021     if (auto *CI = dyn_cast<CastInst>(I))
10022       return updateWithCastInst(A, CI);
10023 
10024     if (auto *BinOp = dyn_cast<BinaryOperator>(I))
10025       return updateWithBinaryOperator(A, BinOp);
10026 
10027     if (isa<PHINode>(I) || isa<LoadInst>(I))
10028       return updateWithInstruction(A, I);
10029 
10030     return indicatePessimisticFixpoint();
10031   }
10032 
10033   /// See AbstractAttribute::trackStatistics()
10034   void trackStatistics() const override {
10035     STATS_DECLTRACK_FLOATING_ATTR(potential_values)
10036   }
10037 };
10038 
10039 struct AAPotentialConstantValuesFunction : AAPotentialConstantValuesImpl {
10040   AAPotentialConstantValuesFunction(const IRPosition &IRP, Attributor &A)
10041       : AAPotentialConstantValuesImpl(IRP, A) {}
10042 
10043   /// See AbstractAttribute::initialize(...).
10044   ChangeStatus updateImpl(Attributor &A) override {
10045     llvm_unreachable(
10046         "AAPotentialConstantValues(Function|CallSite)::updateImpl will "
10047         "not be called");
10048   }
10049 
10050   /// See AbstractAttribute::trackStatistics()
10051   void trackStatistics() const override {
10052     STATS_DECLTRACK_FN_ATTR(potential_values)
10053   }
10054 };
10055 
10056 struct AAPotentialConstantValuesCallSite : AAPotentialConstantValuesFunction {
10057   AAPotentialConstantValuesCallSite(const IRPosition &IRP, Attributor &A)
10058       : AAPotentialConstantValuesFunction(IRP, A) {}
10059 
10060   /// See AbstractAttribute::trackStatistics()
10061   void trackStatistics() const override {
10062     STATS_DECLTRACK_CS_ATTR(potential_values)
10063   }
10064 };
10065 
10066 struct AAPotentialConstantValuesCallSiteReturned
10067     : AACalleeToCallSite<AAPotentialConstantValues,
10068                          AAPotentialConstantValuesImpl> {
10069   AAPotentialConstantValuesCallSiteReturned(const IRPosition &IRP,
10070                                             Attributor &A)
10071       : AACalleeToCallSite<AAPotentialConstantValues,
10072                            AAPotentialConstantValuesImpl>(IRP, A) {}
10073 
10074   /// See AbstractAttribute::trackStatistics()
10075   void trackStatistics() const override {
10076     STATS_DECLTRACK_CSRET_ATTR(potential_values)
10077   }
10078 };
10079 
10080 struct AAPotentialConstantValuesCallSiteArgument
10081     : AAPotentialConstantValuesFloating {
10082   AAPotentialConstantValuesCallSiteArgument(const IRPosition &IRP,
10083                                             Attributor &A)
10084       : AAPotentialConstantValuesFloating(IRP, A) {}
10085 
10086   /// See AbstractAttribute::initialize(..).
10087   void initialize(Attributor &A) override {
10088     AAPotentialConstantValuesImpl::initialize(A);
10089     if (isAtFixpoint())
10090       return;
10091 
10092     Value &V = getAssociatedValue();
10093 
10094     if (auto *C = dyn_cast<ConstantInt>(&V)) {
10095       unionAssumed(C->getValue());
10096       indicateOptimisticFixpoint();
10097       return;
10098     }
10099 
10100     if (isa<UndefValue>(&V)) {
10101       unionAssumedWithUndef();
10102       indicateOptimisticFixpoint();
10103       return;
10104     }
10105   }
10106 
10107   /// See AbstractAttribute::updateImpl(...).
10108   ChangeStatus updateImpl(Attributor &A) override {
10109     Value &V = getAssociatedValue();
10110     auto AssumedBefore = getAssumed();
10111     auto *AA = A.getAAFor<AAPotentialConstantValues>(
10112         *this, IRPosition::value(V), DepClassTy::REQUIRED);
10113     if (!AA)
10114       return indicatePessimisticFixpoint();
10115     const auto &S = AA->getAssumed();
10116     unionAssumed(S);
10117     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
10118                                          : ChangeStatus::CHANGED;
10119   }
10120 
10121   /// See AbstractAttribute::trackStatistics()
10122   void trackStatistics() const override {
10123     STATS_DECLTRACK_CSARG_ATTR(potential_values)
10124   }
10125 };
10126 } // namespace
10127 
10128 /// ------------------------ NoUndef Attribute ---------------------------------
10129 bool AANoUndef::isImpliedByIR(Attributor &A, const IRPosition &IRP,
10130                               Attribute::AttrKind ImpliedAttributeKind,
10131                               bool IgnoreSubsumingPositions) {
10132   assert(ImpliedAttributeKind == Attribute::NoUndef &&
10133          "Unexpected attribute kind");
10134   if (A.hasAttr(IRP, {Attribute::NoUndef}, IgnoreSubsumingPositions,
10135                 Attribute::NoUndef))
10136     return true;
10137 
10138   Value &Val = IRP.getAssociatedValue();
10139   if (IRP.getPositionKind() != IRPosition::IRP_RETURNED &&
10140       isGuaranteedNotToBeUndefOrPoison(&Val)) {
10141     LLVMContext &Ctx = Val.getContext();
10142     A.manifestAttrs(IRP, Attribute::get(Ctx, Attribute::NoUndef));
10143     return true;
10144   }
10145 
10146   return false;
10147 }
10148 
10149 namespace {
10150 struct AANoUndefImpl : AANoUndef {
10151   AANoUndefImpl(const IRPosition &IRP, Attributor &A) : AANoUndef(IRP, A) {}
10152 
10153   /// See AbstractAttribute::initialize(...).
10154   void initialize(Attributor &A) override {
10155     Value &V = getAssociatedValue();
10156     if (isa<UndefValue>(V))
10157       indicatePessimisticFixpoint();
10158     assert(!isImpliedByIR(A, getIRPosition(), Attribute::NoUndef));
10159   }
10160 
10161   /// See followUsesInMBEC
10162   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
10163                        AANoUndef::StateType &State) {
10164     const Value *UseV = U->get();
10165     const DominatorTree *DT = nullptr;
10166     AssumptionCache *AC = nullptr;
10167     InformationCache &InfoCache = A.getInfoCache();
10168     if (Function *F = getAnchorScope()) {
10169       DT = InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*F);
10170       AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*F);
10171     }
10172     State.setKnown(isGuaranteedNotToBeUndefOrPoison(UseV, AC, I, DT));
10173     bool TrackUse = false;
10174     // Track use for instructions which must produce undef or poison bits when
10175     // at least one operand contains such bits.
10176     if (isa<CastInst>(*I) || isa<GetElementPtrInst>(*I))
10177       TrackUse = true;
10178     return TrackUse;
10179   }
10180 
10181   /// See AbstractAttribute::getAsStr().
10182   const std::string getAsStr(Attributor *A) const override {
10183     return getAssumed() ? "noundef" : "may-undef-or-poison";
10184   }
10185 
10186   ChangeStatus manifest(Attributor &A) override {
10187     // We don't manifest noundef attribute for dead positions because the
10188     // associated values with dead positions would be replaced with undef
10189     // values.
10190     bool UsedAssumedInformation = false;
10191     if (A.isAssumedDead(getIRPosition(), nullptr, nullptr,
10192                         UsedAssumedInformation))
10193       return ChangeStatus::UNCHANGED;
10194     // A position whose simplified value does not have any value is
10195     // considered to be dead. We don't manifest noundef in such positions for
10196     // the same reason above.
10197     if (!A.getAssumedSimplified(getIRPosition(), *this, UsedAssumedInformation,
10198                                 AA::Interprocedural)
10199              .has_value())
10200       return ChangeStatus::UNCHANGED;
10201     return AANoUndef::manifest(A);
10202   }
10203 };
10204 
10205 struct AANoUndefFloating : public AANoUndefImpl {
10206   AANoUndefFloating(const IRPosition &IRP, Attributor &A)
10207       : AANoUndefImpl(IRP, A) {}
10208 
10209   /// See AbstractAttribute::initialize(...).
10210   void initialize(Attributor &A) override {
10211     AANoUndefImpl::initialize(A);
10212     if (!getState().isAtFixpoint() && getAnchorScope() &&
10213         !getAnchorScope()->isDeclaration())
10214       if (Instruction *CtxI = getCtxI())
10215         followUsesInMBEC(*this, A, getState(), *CtxI);
10216   }
10217 
10218   /// See AbstractAttribute::updateImpl(...).
10219   ChangeStatus updateImpl(Attributor &A) override {
10220     auto VisitValueCB = [&](const IRPosition &IRP) -> bool {
10221       bool IsKnownNoUndef;
10222       return AA::hasAssumedIRAttr<Attribute::NoUndef>(
10223           A, this, IRP, DepClassTy::REQUIRED, IsKnownNoUndef);
10224     };
10225 
10226     bool Stripped;
10227     bool UsedAssumedInformation = false;
10228     Value *AssociatedValue = &getAssociatedValue();
10229     SmallVector<AA::ValueAndContext> Values;
10230     if (!A.getAssumedSimplifiedValues(getIRPosition(), *this, Values,
10231                                       AA::AnyScope, UsedAssumedInformation))
10232       Stripped = false;
10233     else
10234       Stripped =
10235           Values.size() != 1 || Values.front().getValue() != AssociatedValue;
10236 
10237     if (!Stripped) {
10238       // If we haven't stripped anything we might still be able to use a
10239       // different AA, but only if the IRP changes. Effectively when we
10240       // interpret this not as a call site value but as a floating/argument
10241       // value.
10242       const IRPosition AVIRP = IRPosition::value(*AssociatedValue);
10243       if (AVIRP == getIRPosition() || !VisitValueCB(AVIRP))
10244         return indicatePessimisticFixpoint();
10245       return ChangeStatus::UNCHANGED;
10246     }
10247 
10248     for (const auto &VAC : Values)
10249       if (!VisitValueCB(IRPosition::value(*VAC.getValue())))
10250         return indicatePessimisticFixpoint();
10251 
10252     return ChangeStatus::UNCHANGED;
10253   }
10254 
10255   /// See AbstractAttribute::trackStatistics()
10256   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noundef) }
10257 };
10258 
10259 struct AANoUndefReturned final
10260     : AAReturnedFromReturnedValues<AANoUndef, AANoUndefImpl> {
10261   AANoUndefReturned(const IRPosition &IRP, Attributor &A)
10262       : AAReturnedFromReturnedValues<AANoUndef, AANoUndefImpl>(IRP, A) {}
10263 
10264   /// See AbstractAttribute::trackStatistics()
10265   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noundef) }
10266 };
10267 
10268 struct AANoUndefArgument final
10269     : AAArgumentFromCallSiteArguments<AANoUndef, AANoUndefImpl> {
10270   AANoUndefArgument(const IRPosition &IRP, Attributor &A)
10271       : AAArgumentFromCallSiteArguments<AANoUndef, AANoUndefImpl>(IRP, A) {}
10272 
10273   /// See AbstractAttribute::trackStatistics()
10274   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noundef) }
10275 };
10276 
10277 struct AANoUndefCallSiteArgument final : AANoUndefFloating {
10278   AANoUndefCallSiteArgument(const IRPosition &IRP, Attributor &A)
10279       : AANoUndefFloating(IRP, A) {}
10280 
10281   /// See AbstractAttribute::trackStatistics()
10282   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noundef) }
10283 };
10284 
10285 struct AANoUndefCallSiteReturned final
10286     : AACalleeToCallSite<AANoUndef, AANoUndefImpl> {
10287   AANoUndefCallSiteReturned(const IRPosition &IRP, Attributor &A)
10288       : AACalleeToCallSite<AANoUndef, AANoUndefImpl>(IRP, A) {}
10289 
10290   /// See AbstractAttribute::trackStatistics()
10291   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noundef) }
10292 };
10293 
10294 /// ------------------------ NoFPClass Attribute -------------------------------
10295 
10296 struct AANoFPClassImpl : AANoFPClass {
10297   AANoFPClassImpl(const IRPosition &IRP, Attributor &A) : AANoFPClass(IRP, A) {}
10298 
10299   void initialize(Attributor &A) override {
10300     const IRPosition &IRP = getIRPosition();
10301 
10302     Value &V = IRP.getAssociatedValue();
10303     if (isa<UndefValue>(V)) {
10304       indicateOptimisticFixpoint();
10305       return;
10306     }
10307 
10308     SmallVector<Attribute> Attrs;
10309     A.getAttrs(getIRPosition(), {Attribute::NoFPClass}, Attrs, false);
10310     for (const auto &Attr : Attrs) {
10311       addKnownBits(Attr.getNoFPClass());
10312     }
10313 
10314     const DataLayout &DL = A.getDataLayout();
10315     if (getPositionKind() != IRPosition::IRP_RETURNED) {
10316       KnownFPClass KnownFPClass = computeKnownFPClass(&V, DL);
10317       addKnownBits(~KnownFPClass.KnownFPClasses);
10318     }
10319 
10320     if (Instruction *CtxI = getCtxI())
10321       followUsesInMBEC(*this, A, getState(), *CtxI);
10322   }
10323 
10324   /// See followUsesInMBEC
10325   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
10326                        AANoFPClass::StateType &State) {
10327     const Value *UseV = U->get();
10328     const DominatorTree *DT = nullptr;
10329     AssumptionCache *AC = nullptr;
10330     const TargetLibraryInfo *TLI = nullptr;
10331     InformationCache &InfoCache = A.getInfoCache();
10332 
10333     if (Function *F = getAnchorScope()) {
10334       DT = InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*F);
10335       AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*F);
10336       TLI = InfoCache.getTargetLibraryInfoForFunction(*F);
10337     }
10338 
10339     const DataLayout &DL = A.getDataLayout();
10340 
10341     KnownFPClass KnownFPClass =
10342         computeKnownFPClass(UseV, DL,
10343                             /*InterestedClasses=*/fcAllFlags,
10344                             /*Depth=*/0, TLI, AC, I, DT);
10345     State.addKnownBits(~KnownFPClass.KnownFPClasses);
10346 
10347     if (auto *CI = dyn_cast<CallInst>(UseV)) {
10348       // Special case FP intrinsic with struct return type.
10349       switch (CI->getIntrinsicID()) {
10350       case Intrinsic::frexp:
10351         return true;
10352       case Intrinsic::not_intrinsic:
10353         // TODO: Could recognize math libcalls
10354         return false;
10355       default:
10356         break;
10357       }
10358     }
10359 
10360     if (!UseV->getType()->isFPOrFPVectorTy())
10361       return false;
10362     return !isa<LoadInst, AtomicRMWInst>(UseV);
10363   }
10364 
10365   const std::string getAsStr(Attributor *A) const override {
10366     std::string Result = "nofpclass";
10367     raw_string_ostream OS(Result);
10368     OS << getAssumedNoFPClass();
10369     return Result;
10370   }
10371 
10372   void getDeducedAttributes(Attributor &A, LLVMContext &Ctx,
10373                             SmallVectorImpl<Attribute> &Attrs) const override {
10374     Attrs.emplace_back(Attribute::getWithNoFPClass(Ctx, getAssumedNoFPClass()));
10375   }
10376 };
10377 
10378 struct AANoFPClassFloating : public AANoFPClassImpl {
10379   AANoFPClassFloating(const IRPosition &IRP, Attributor &A)
10380       : AANoFPClassImpl(IRP, A) {}
10381 
10382   /// See AbstractAttribute::updateImpl(...).
10383   ChangeStatus updateImpl(Attributor &A) override {
10384     SmallVector<AA::ValueAndContext> Values;
10385     bool UsedAssumedInformation = false;
10386     if (!A.getAssumedSimplifiedValues(getIRPosition(), *this, Values,
10387                                       AA::AnyScope, UsedAssumedInformation)) {
10388       Values.push_back({getAssociatedValue(), getCtxI()});
10389     }
10390 
10391     StateType T;
10392     auto VisitValueCB = [&](Value &V, const Instruction *CtxI) -> bool {
10393       const auto *AA = A.getAAFor<AANoFPClass>(*this, IRPosition::value(V),
10394                                                DepClassTy::REQUIRED);
10395       if (!AA || this == AA) {
10396         T.indicatePessimisticFixpoint();
10397       } else {
10398         const AANoFPClass::StateType &S =
10399             static_cast<const AANoFPClass::StateType &>(AA->getState());
10400         T ^= S;
10401       }
10402       return T.isValidState();
10403     };
10404 
10405     for (const auto &VAC : Values)
10406       if (!VisitValueCB(*VAC.getValue(), VAC.getCtxI()))
10407         return indicatePessimisticFixpoint();
10408 
10409     return clampStateAndIndicateChange(getState(), T);
10410   }
10411 
10412   /// See AbstractAttribute::trackStatistics()
10413   void trackStatistics() const override {
10414     STATS_DECLTRACK_FNRET_ATTR(nofpclass)
10415   }
10416 };
10417 
10418 struct AANoFPClassReturned final
10419     : AAReturnedFromReturnedValues<AANoFPClass, AANoFPClassImpl,
10420                                    AANoFPClassImpl::StateType, false, Attribute::None, false> {
10421   AANoFPClassReturned(const IRPosition &IRP, Attributor &A)
10422       : AAReturnedFromReturnedValues<AANoFPClass, AANoFPClassImpl,
10423                                      AANoFPClassImpl::StateType, false, Attribute::None, false>(
10424             IRP, A) {}
10425 
10426   /// See AbstractAttribute::trackStatistics()
10427   void trackStatistics() const override {
10428     STATS_DECLTRACK_FNRET_ATTR(nofpclass)
10429   }
10430 };
10431 
10432 struct AANoFPClassArgument final
10433     : AAArgumentFromCallSiteArguments<AANoFPClass, AANoFPClassImpl> {
10434   AANoFPClassArgument(const IRPosition &IRP, Attributor &A)
10435       : AAArgumentFromCallSiteArguments<AANoFPClass, AANoFPClassImpl>(IRP, A) {}
10436 
10437   /// See AbstractAttribute::trackStatistics()
10438   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nofpclass) }
10439 };
10440 
10441 struct AANoFPClassCallSiteArgument final : AANoFPClassFloating {
10442   AANoFPClassCallSiteArgument(const IRPosition &IRP, Attributor &A)
10443       : AANoFPClassFloating(IRP, A) {}
10444 
10445   /// See AbstractAttribute::trackStatistics()
10446   void trackStatistics() const override {
10447     STATS_DECLTRACK_CSARG_ATTR(nofpclass)
10448   }
10449 };
10450 
10451 struct AANoFPClassCallSiteReturned final
10452     : AACalleeToCallSite<AANoFPClass, AANoFPClassImpl> {
10453   AANoFPClassCallSiteReturned(const IRPosition &IRP, Attributor &A)
10454       : AACalleeToCallSite<AANoFPClass, AANoFPClassImpl>(IRP, A) {}
10455 
10456   /// See AbstractAttribute::trackStatistics()
10457   void trackStatistics() const override {
10458     STATS_DECLTRACK_CSRET_ATTR(nofpclass)
10459   }
10460 };
10461 
10462 struct AACallEdgesImpl : public AACallEdges {
10463   AACallEdgesImpl(const IRPosition &IRP, Attributor &A) : AACallEdges(IRP, A) {}
10464 
10465   const SetVector<Function *> &getOptimisticEdges() const override {
10466     return CalledFunctions;
10467   }
10468 
10469   bool hasUnknownCallee() const override { return HasUnknownCallee; }
10470 
10471   bool hasNonAsmUnknownCallee() const override {
10472     return HasUnknownCalleeNonAsm;
10473   }
10474 
10475   const std::string getAsStr(Attributor *A) const override {
10476     return "CallEdges[" + std::to_string(HasUnknownCallee) + "," +
10477            std::to_string(CalledFunctions.size()) + "]";
10478   }
10479 
10480   void trackStatistics() const override {}
10481 
10482 protected:
10483   void addCalledFunction(Function *Fn, ChangeStatus &Change) {
10484     if (CalledFunctions.insert(Fn)) {
10485       Change = ChangeStatus::CHANGED;
10486       LLVM_DEBUG(dbgs() << "[AACallEdges] New call edge: " << Fn->getName()
10487                         << "\n");
10488     }
10489   }
10490 
10491   void setHasUnknownCallee(bool NonAsm, ChangeStatus &Change) {
10492     if (!HasUnknownCallee)
10493       Change = ChangeStatus::CHANGED;
10494     if (NonAsm && !HasUnknownCalleeNonAsm)
10495       Change = ChangeStatus::CHANGED;
10496     HasUnknownCalleeNonAsm |= NonAsm;
10497     HasUnknownCallee = true;
10498   }
10499 
10500 private:
10501   /// Optimistic set of functions that might be called by this position.
10502   SetVector<Function *> CalledFunctions;
10503 
10504   /// Is there any call with a unknown callee.
10505   bool HasUnknownCallee = false;
10506 
10507   /// Is there any call with a unknown callee, excluding any inline asm.
10508   bool HasUnknownCalleeNonAsm = false;
10509 };
10510 
10511 struct AACallEdgesCallSite : public AACallEdgesImpl {
10512   AACallEdgesCallSite(const IRPosition &IRP, Attributor &A)
10513       : AACallEdgesImpl(IRP, A) {}
10514   /// See AbstractAttribute::updateImpl(...).
10515   ChangeStatus updateImpl(Attributor &A) override {
10516     ChangeStatus Change = ChangeStatus::UNCHANGED;
10517 
10518     auto VisitValue = [&](Value &V, const Instruction *CtxI) -> bool {
10519       if (Function *Fn = dyn_cast<Function>(&V)) {
10520         addCalledFunction(Fn, Change);
10521       } else {
10522         LLVM_DEBUG(dbgs() << "[AACallEdges] Unrecognized value: " << V << "\n");
10523         setHasUnknownCallee(true, Change);
10524       }
10525 
10526       // Explore all values.
10527       return true;
10528     };
10529 
10530     SmallVector<AA::ValueAndContext> Values;
10531     // Process any value that we might call.
10532     auto ProcessCalledOperand = [&](Value *V, Instruction *CtxI) {
10533       if (isa<Constant>(V)) {
10534         VisitValue(*V, CtxI);
10535         return;
10536       }
10537 
10538       bool UsedAssumedInformation = false;
10539       Values.clear();
10540       if (!A.getAssumedSimplifiedValues(IRPosition::value(*V), *this, Values,
10541                                         AA::AnyScope, UsedAssumedInformation)) {
10542         Values.push_back({*V, CtxI});
10543       }
10544       for (auto &VAC : Values)
10545         VisitValue(*VAC.getValue(), VAC.getCtxI());
10546     };
10547 
10548     CallBase *CB = cast<CallBase>(getCtxI());
10549 
10550     if (auto *IA = dyn_cast<InlineAsm>(CB->getCalledOperand())) {
10551       if (IA->hasSideEffects() &&
10552           !hasAssumption(*CB->getCaller(), "ompx_no_call_asm") &&
10553           !hasAssumption(*CB, "ompx_no_call_asm")) {
10554         setHasUnknownCallee(false, Change);
10555       }
10556       return Change;
10557     }
10558 
10559     if (CB->isIndirectCall())
10560       if (auto *IndirectCallAA = A.getAAFor<AAIndirectCallInfo>(
10561               *this, getIRPosition(), DepClassTy::OPTIONAL))
10562         if (IndirectCallAA->foreachCallee(
10563                 [&](Function *Fn) { return VisitValue(*Fn, CB); }))
10564           return Change;
10565 
10566     // The most simple case.
10567     ProcessCalledOperand(CB->getCalledOperand(), CB);
10568 
10569     // Process callback functions.
10570     SmallVector<const Use *, 4u> CallbackUses;
10571     AbstractCallSite::getCallbackUses(*CB, CallbackUses);
10572     for (const Use *U : CallbackUses)
10573       ProcessCalledOperand(U->get(), CB);
10574 
10575     return Change;
10576   }
10577 };
10578 
10579 struct AACallEdgesFunction : public AACallEdgesImpl {
10580   AACallEdgesFunction(const IRPosition &IRP, Attributor &A)
10581       : AACallEdgesImpl(IRP, A) {}
10582 
10583   /// See AbstractAttribute::updateImpl(...).
10584   ChangeStatus updateImpl(Attributor &A) override {
10585     ChangeStatus Change = ChangeStatus::UNCHANGED;
10586 
10587     auto ProcessCallInst = [&](Instruction &Inst) {
10588       CallBase &CB = cast<CallBase>(Inst);
10589 
10590       auto *CBEdges = A.getAAFor<AACallEdges>(
10591           *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED);
10592       if (!CBEdges)
10593         return false;
10594       if (CBEdges->hasNonAsmUnknownCallee())
10595         setHasUnknownCallee(true, Change);
10596       if (CBEdges->hasUnknownCallee())
10597         setHasUnknownCallee(false, Change);
10598 
10599       for (Function *F : CBEdges->getOptimisticEdges())
10600         addCalledFunction(F, Change);
10601 
10602       return true;
10603     };
10604 
10605     // Visit all callable instructions.
10606     bool UsedAssumedInformation = false;
10607     if (!A.checkForAllCallLikeInstructions(ProcessCallInst, *this,
10608                                            UsedAssumedInformation,
10609                                            /* CheckBBLivenessOnly */ true)) {
10610       // If we haven't looked at all call like instructions, assume that there
10611       // are unknown callees.
10612       setHasUnknownCallee(true, Change);
10613     }
10614 
10615     return Change;
10616   }
10617 };
10618 
10619 /// -------------------AAInterFnReachability Attribute--------------------------
10620 
10621 struct AAInterFnReachabilityFunction
10622     : public CachedReachabilityAA<AAInterFnReachability, Function> {
10623   using Base = CachedReachabilityAA<AAInterFnReachability, Function>;
10624   AAInterFnReachabilityFunction(const IRPosition &IRP, Attributor &A)
10625       : Base(IRP, A) {}
10626 
10627   bool instructionCanReach(
10628       Attributor &A, const Instruction &From, const Function &To,
10629       const AA::InstExclusionSetTy *ExclusionSet) const override {
10630     assert(From.getFunction() == getAnchorScope() && "Queried the wrong AA!");
10631     auto *NonConstThis = const_cast<AAInterFnReachabilityFunction *>(this);
10632 
10633     RQITy StackRQI(A, From, To, ExclusionSet, false);
10634     typename RQITy::Reachable Result;
10635     if (!NonConstThis->checkQueryCache(A, StackRQI, Result))
10636       return NonConstThis->isReachableImpl(A, StackRQI,
10637                                            /*IsTemporaryRQI=*/true);
10638     return Result == RQITy::Reachable::Yes;
10639   }
10640 
10641   bool isReachableImpl(Attributor &A, RQITy &RQI,
10642                        bool IsTemporaryRQI) override {
10643     const Instruction *EntryI =
10644         &RQI.From->getFunction()->getEntryBlock().front();
10645     if (EntryI != RQI.From &&
10646         !instructionCanReach(A, *EntryI, *RQI.To, nullptr))
10647       return rememberResult(A, RQITy::Reachable::No, RQI, false,
10648                             IsTemporaryRQI);
10649 
10650     auto CheckReachableCallBase = [&](CallBase *CB) {
10651       auto *CBEdges = A.getAAFor<AACallEdges>(
10652           *this, IRPosition::callsite_function(*CB), DepClassTy::OPTIONAL);
10653       if (!CBEdges || !CBEdges->getState().isValidState())
10654         return false;
10655       // TODO Check To backwards in this case.
10656       if (CBEdges->hasUnknownCallee())
10657         return false;
10658 
10659       for (Function *Fn : CBEdges->getOptimisticEdges()) {
10660         if (Fn == RQI.To)
10661           return false;
10662 
10663         if (Fn->isDeclaration()) {
10664           if (Fn->hasFnAttribute(Attribute::NoCallback))
10665             continue;
10666           // TODO Check To backwards in this case.
10667           return false;
10668         }
10669 
10670         if (Fn == getAnchorScope()) {
10671           if (EntryI == RQI.From)
10672             continue;
10673           return false;
10674         }
10675 
10676         const AAInterFnReachability *InterFnReachability =
10677             A.getAAFor<AAInterFnReachability>(*this, IRPosition::function(*Fn),
10678                                               DepClassTy::OPTIONAL);
10679 
10680         const Instruction &FnFirstInst = Fn->getEntryBlock().front();
10681         if (!InterFnReachability ||
10682             InterFnReachability->instructionCanReach(A, FnFirstInst, *RQI.To,
10683                                                      RQI.ExclusionSet))
10684           return false;
10685       }
10686       return true;
10687     };
10688 
10689     const auto *IntraFnReachability = A.getAAFor<AAIntraFnReachability>(
10690         *this, IRPosition::function(*RQI.From->getFunction()),
10691         DepClassTy::OPTIONAL);
10692 
10693     // Determine call like instructions that we can reach from the inst.
10694     auto CheckCallBase = [&](Instruction &CBInst) {
10695       // There are usually less nodes in the call graph, check inter function
10696       // reachability first.
10697       if (CheckReachableCallBase(cast<CallBase>(&CBInst)))
10698         return true;
10699       return IntraFnReachability && !IntraFnReachability->isAssumedReachable(
10700                                         A, *RQI.From, CBInst, RQI.ExclusionSet);
10701     };
10702 
10703     bool UsedExclusionSet = /* conservative */ true;
10704     bool UsedAssumedInformation = false;
10705     if (!A.checkForAllCallLikeInstructions(CheckCallBase, *this,
10706                                            UsedAssumedInformation,
10707                                            /* CheckBBLivenessOnly */ true))
10708       return rememberResult(A, RQITy::Reachable::Yes, RQI, UsedExclusionSet,
10709                             IsTemporaryRQI);
10710 
10711     return rememberResult(A, RQITy::Reachable::No, RQI, UsedExclusionSet,
10712                           IsTemporaryRQI);
10713   }
10714 
10715   void trackStatistics() const override {}
10716 };
10717 } // namespace
10718 
10719 template <typename AAType>
10720 static std::optional<Constant *>
10721 askForAssumedConstant(Attributor &A, const AbstractAttribute &QueryingAA,
10722                       const IRPosition &IRP, Type &Ty) {
10723   if (!Ty.isIntegerTy())
10724     return nullptr;
10725 
10726   // This will also pass the call base context.
10727   const auto *AA = A.getAAFor<AAType>(QueryingAA, IRP, DepClassTy::NONE);
10728   if (!AA)
10729     return nullptr;
10730 
10731   std::optional<Constant *> COpt = AA->getAssumedConstant(A);
10732 
10733   if (!COpt.has_value()) {
10734     A.recordDependence(*AA, QueryingAA, DepClassTy::OPTIONAL);
10735     return std::nullopt;
10736   }
10737   if (auto *C = *COpt) {
10738     A.recordDependence(*AA, QueryingAA, DepClassTy::OPTIONAL);
10739     return C;
10740   }
10741   return nullptr;
10742 }
10743 
10744 Value *AAPotentialValues::getSingleValue(
10745     Attributor &A, const AbstractAttribute &AA, const IRPosition &IRP,
10746     SmallVectorImpl<AA::ValueAndContext> &Values) {
10747   Type &Ty = *IRP.getAssociatedType();
10748   std::optional<Value *> V;
10749   for (auto &It : Values) {
10750     V = AA::combineOptionalValuesInAAValueLatice(V, It.getValue(), &Ty);
10751     if (V.has_value() && !*V)
10752       break;
10753   }
10754   if (!V.has_value())
10755     return UndefValue::get(&Ty);
10756   return *V;
10757 }
10758 
10759 namespace {
10760 struct AAPotentialValuesImpl : AAPotentialValues {
10761   using StateType = PotentialLLVMValuesState;
10762 
10763   AAPotentialValuesImpl(const IRPosition &IRP, Attributor &A)
10764       : AAPotentialValues(IRP, A) {}
10765 
10766   /// See AbstractAttribute::initialize(..).
10767   void initialize(Attributor &A) override {
10768     if (A.hasSimplificationCallback(getIRPosition())) {
10769       indicatePessimisticFixpoint();
10770       return;
10771     }
10772     Value *Stripped = getAssociatedValue().stripPointerCasts();
10773     auto *CE = dyn_cast<ConstantExpr>(Stripped);
10774     if (isa<Constant>(Stripped) &&
10775         (!CE || CE->getOpcode() != Instruction::ICmp)) {
10776       addValue(A, getState(), *Stripped, getCtxI(), AA::AnyScope,
10777                getAnchorScope());
10778       indicateOptimisticFixpoint();
10779       return;
10780     }
10781     AAPotentialValues::initialize(A);
10782   }
10783 
10784   /// See AbstractAttribute::getAsStr().
10785   const std::string getAsStr(Attributor *A) const override {
10786     std::string Str;
10787     llvm::raw_string_ostream OS(Str);
10788     OS << getState();
10789     return OS.str();
10790   }
10791 
10792   template <typename AAType>
10793   static std::optional<Value *> askOtherAA(Attributor &A,
10794                                            const AbstractAttribute &AA,
10795                                            const IRPosition &IRP, Type &Ty) {
10796     if (isa<Constant>(IRP.getAssociatedValue()))
10797       return &IRP.getAssociatedValue();
10798     std::optional<Constant *> C = askForAssumedConstant<AAType>(A, AA, IRP, Ty);
10799     if (!C)
10800       return std::nullopt;
10801     if (*C)
10802       if (auto *CC = AA::getWithType(**C, Ty))
10803         return CC;
10804     return nullptr;
10805   }
10806 
10807   virtual void addValue(Attributor &A, StateType &State, Value &V,
10808                         const Instruction *CtxI, AA::ValueScope S,
10809                         Function *AnchorScope) const {
10810 
10811     IRPosition ValIRP = IRPosition::value(V);
10812     if (auto *CB = dyn_cast_or_null<CallBase>(CtxI)) {
10813       for (const auto &U : CB->args()) {
10814         if (U.get() != &V)
10815           continue;
10816         ValIRP = IRPosition::callsite_argument(*CB, CB->getArgOperandNo(&U));
10817         break;
10818       }
10819     }
10820 
10821     Value *VPtr = &V;
10822     if (ValIRP.getAssociatedType()->isIntegerTy()) {
10823       Type &Ty = *getAssociatedType();
10824       std::optional<Value *> SimpleV =
10825           askOtherAA<AAValueConstantRange>(A, *this, ValIRP, Ty);
10826       if (SimpleV.has_value() && !*SimpleV) {
10827         auto *PotentialConstantsAA = A.getAAFor<AAPotentialConstantValues>(
10828             *this, ValIRP, DepClassTy::OPTIONAL);
10829         if (PotentialConstantsAA && PotentialConstantsAA->isValidState()) {
10830           for (const auto &It : PotentialConstantsAA->getAssumedSet())
10831             State.unionAssumed({{*ConstantInt::get(&Ty, It), nullptr}, S});
10832           if (PotentialConstantsAA->undefIsContained())
10833             State.unionAssumed({{*UndefValue::get(&Ty), nullptr}, S});
10834           return;
10835         }
10836       }
10837       if (!SimpleV.has_value())
10838         return;
10839 
10840       if (*SimpleV)
10841         VPtr = *SimpleV;
10842     }
10843 
10844     if (isa<ConstantInt>(VPtr))
10845       CtxI = nullptr;
10846     if (!AA::isValidInScope(*VPtr, AnchorScope))
10847       S = AA::ValueScope(S | AA::Interprocedural);
10848 
10849     State.unionAssumed({{*VPtr, CtxI}, S});
10850   }
10851 
10852   /// Helper struct to tie a value+context pair together with the scope for
10853   /// which this is the simplified version.
10854   struct ItemInfo {
10855     AA::ValueAndContext I;
10856     AA::ValueScope S;
10857 
10858     bool operator==(const ItemInfo &II) const {
10859       return II.I == I && II.S == S;
10860     };
10861     bool operator<(const ItemInfo &II) const {
10862       if (I == II.I)
10863         return S < II.S;
10864       return I < II.I;
10865     };
10866   };
10867 
10868   bool recurseForValue(Attributor &A, const IRPosition &IRP, AA::ValueScope S) {
10869     SmallMapVector<AA::ValueAndContext, int, 8> ValueScopeMap;
10870     for (auto CS : {AA::Intraprocedural, AA::Interprocedural}) {
10871       if (!(CS & S))
10872         continue;
10873 
10874       bool UsedAssumedInformation = false;
10875       SmallVector<AA::ValueAndContext> Values;
10876       if (!A.getAssumedSimplifiedValues(IRP, this, Values, CS,
10877                                         UsedAssumedInformation))
10878         return false;
10879 
10880       for (auto &It : Values)
10881         ValueScopeMap[It] += CS;
10882     }
10883     for (auto &It : ValueScopeMap)
10884       addValue(A, getState(), *It.first.getValue(), It.first.getCtxI(),
10885                AA::ValueScope(It.second), getAnchorScope());
10886 
10887     return true;
10888   }
10889 
10890   void giveUpOnIntraprocedural(Attributor &A) {
10891     auto NewS = StateType::getBestState(getState());
10892     for (const auto &It : getAssumedSet()) {
10893       if (It.second == AA::Intraprocedural)
10894         continue;
10895       addValue(A, NewS, *It.first.getValue(), It.first.getCtxI(),
10896                AA::Interprocedural, getAnchorScope());
10897     }
10898     assert(!undefIsContained() && "Undef should be an explicit value!");
10899     addValue(A, NewS, getAssociatedValue(), getCtxI(), AA::Intraprocedural,
10900              getAnchorScope());
10901     getState() = NewS;
10902   }
10903 
10904   /// See AbstractState::indicatePessimisticFixpoint(...).
10905   ChangeStatus indicatePessimisticFixpoint() override {
10906     getState() = StateType::getBestState(getState());
10907     getState().unionAssumed({{getAssociatedValue(), getCtxI()}, AA::AnyScope});
10908     AAPotentialValues::indicateOptimisticFixpoint();
10909     return ChangeStatus::CHANGED;
10910   }
10911 
10912   /// See AbstractAttribute::updateImpl(...).
10913   ChangeStatus updateImpl(Attributor &A) override {
10914     return indicatePessimisticFixpoint();
10915   }
10916 
10917   /// See AbstractAttribute::manifest(...).
10918   ChangeStatus manifest(Attributor &A) override {
10919     SmallVector<AA::ValueAndContext> Values;
10920     for (AA::ValueScope S : {AA::Interprocedural, AA::Intraprocedural}) {
10921       Values.clear();
10922       if (!getAssumedSimplifiedValues(A, Values, S))
10923         continue;
10924       Value &OldV = getAssociatedValue();
10925       if (isa<UndefValue>(OldV))
10926         continue;
10927       Value *NewV = getSingleValue(A, *this, getIRPosition(), Values);
10928       if (!NewV || NewV == &OldV)
10929         continue;
10930       if (getCtxI() &&
10931           !AA::isValidAtPosition({*NewV, *getCtxI()}, A.getInfoCache()))
10932         continue;
10933       if (A.changeAfterManifest(getIRPosition(), *NewV))
10934         return ChangeStatus::CHANGED;
10935     }
10936     return ChangeStatus::UNCHANGED;
10937   }
10938 
10939   bool getAssumedSimplifiedValues(
10940       Attributor &A, SmallVectorImpl<AA::ValueAndContext> &Values,
10941       AA::ValueScope S, bool RecurseForSelectAndPHI = false) const override {
10942     if (!isValidState())
10943       return false;
10944     bool UsedAssumedInformation = false;
10945     for (const auto &It : getAssumedSet())
10946       if (It.second & S) {
10947         if (RecurseForSelectAndPHI && (isa<PHINode>(It.first.getValue()) ||
10948                                        isa<SelectInst>(It.first.getValue()))) {
10949           if (A.getAssumedSimplifiedValues(
10950                   IRPosition::inst(*cast<Instruction>(It.first.getValue())),
10951                   this, Values, S, UsedAssumedInformation))
10952             continue;
10953         }
10954         Values.push_back(It.first);
10955       }
10956     assert(!undefIsContained() && "Undef should be an explicit value!");
10957     return true;
10958   }
10959 };
10960 
10961 struct AAPotentialValuesFloating : AAPotentialValuesImpl {
10962   AAPotentialValuesFloating(const IRPosition &IRP, Attributor &A)
10963       : AAPotentialValuesImpl(IRP, A) {}
10964 
10965   /// See AbstractAttribute::updateImpl(...).
10966   ChangeStatus updateImpl(Attributor &A) override {
10967     auto AssumedBefore = getAssumed();
10968 
10969     genericValueTraversal(A, &getAssociatedValue());
10970 
10971     return (AssumedBefore == getAssumed()) ? ChangeStatus::UNCHANGED
10972                                            : ChangeStatus::CHANGED;
10973   }
10974 
10975   /// Helper struct to remember which AAIsDead instances we actually used.
10976   struct LivenessInfo {
10977     const AAIsDead *LivenessAA = nullptr;
10978     bool AnyDead = false;
10979   };
10980 
10981   /// Check if \p Cmp is a comparison we can simplify.
10982   ///
10983   /// We handle multiple cases, one in which at least one operand is an
10984   /// (assumed) nullptr. If so, try to simplify it using AANonNull on the other
10985   /// operand. Return true if successful, in that case Worklist will be updated.
10986   bool handleCmp(Attributor &A, Value &Cmp, Value *LHS, Value *RHS,
10987                  CmpInst::Predicate Pred, ItemInfo II,
10988                  SmallVectorImpl<ItemInfo> &Worklist) {
10989 
10990     // Simplify the operands first.
10991     bool UsedAssumedInformation = false;
10992     SmallVector<AA::ValueAndContext> LHSValues, RHSValues;
10993     auto GetSimplifiedValues = [&](Value &V,
10994                                    SmallVector<AA::ValueAndContext> &Values) {
10995       if (!A.getAssumedSimplifiedValues(
10996               IRPosition::value(V, getCallBaseContext()), this, Values,
10997               AA::Intraprocedural, UsedAssumedInformation)) {
10998         Values.clear();
10999         Values.push_back(AA::ValueAndContext{V, II.I.getCtxI()});
11000       }
11001       return Values.empty();
11002     };
11003     if (GetSimplifiedValues(*LHS, LHSValues))
11004       return true;
11005     if (GetSimplifiedValues(*RHS, RHSValues))
11006       return true;
11007 
11008     LLVMContext &Ctx = LHS->getContext();
11009 
11010     InformationCache &InfoCache = A.getInfoCache();
11011     Instruction *CmpI = dyn_cast<Instruction>(&Cmp);
11012     Function *F = CmpI ? CmpI->getFunction() : nullptr;
11013     const auto *DT =
11014         F ? InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*F)
11015           : nullptr;
11016     const auto *TLI =
11017         F ? A.getInfoCache().getTargetLibraryInfoForFunction(*F) : nullptr;
11018     auto *AC =
11019         F ? InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*F)
11020           : nullptr;
11021 
11022     const DataLayout &DL = A.getDataLayout();
11023     SimplifyQuery Q(DL, TLI, DT, AC, CmpI);
11024 
11025     auto CheckPair = [&](Value &LHSV, Value &RHSV) {
11026       if (isa<UndefValue>(LHSV) || isa<UndefValue>(RHSV)) {
11027         addValue(A, getState(), *UndefValue::get(Cmp.getType()),
11028                  /* CtxI */ nullptr, II.S, getAnchorScope());
11029         return true;
11030       }
11031 
11032       // Handle the trivial case first in which we don't even need to think
11033       // about null or non-null.
11034       if (&LHSV == &RHSV &&
11035           (CmpInst::isTrueWhenEqual(Pred) || CmpInst::isFalseWhenEqual(Pred))) {
11036         Constant *NewV = ConstantInt::get(Type::getInt1Ty(Ctx),
11037                                           CmpInst::isTrueWhenEqual(Pred));
11038         addValue(A, getState(), *NewV, /* CtxI */ nullptr, II.S,
11039                  getAnchorScope());
11040         return true;
11041       }
11042 
11043       auto *TypedLHS = AA::getWithType(LHSV, *LHS->getType());
11044       auto *TypedRHS = AA::getWithType(RHSV, *RHS->getType());
11045       if (TypedLHS && TypedRHS) {
11046         Value *NewV = simplifyCmpInst(Pred, TypedLHS, TypedRHS, Q);
11047         if (NewV && NewV != &Cmp) {
11048           addValue(A, getState(), *NewV, /* CtxI */ nullptr, II.S,
11049                    getAnchorScope());
11050           return true;
11051         }
11052       }
11053 
11054       // From now on we only handle equalities (==, !=).
11055       if (!CmpInst::isEquality(Pred))
11056         return false;
11057 
11058       bool LHSIsNull = isa<ConstantPointerNull>(LHSV);
11059       bool RHSIsNull = isa<ConstantPointerNull>(RHSV);
11060       if (!LHSIsNull && !RHSIsNull)
11061         return false;
11062 
11063       // Left is the nullptr ==/!= non-nullptr case. We'll use AANonNull on the
11064       // non-nullptr operand and if we assume it's non-null we can conclude the
11065       // result of the comparison.
11066       assert((LHSIsNull || RHSIsNull) &&
11067              "Expected nullptr versus non-nullptr comparison at this point");
11068 
11069       // The index is the operand that we assume is not null.
11070       unsigned PtrIdx = LHSIsNull;
11071       bool IsKnownNonNull;
11072       bool IsAssumedNonNull = AA::hasAssumedIRAttr<Attribute::NonNull>(
11073           A, this, IRPosition::value(*(PtrIdx ? &RHSV : &LHSV)),
11074           DepClassTy::REQUIRED, IsKnownNonNull);
11075       if (!IsAssumedNonNull)
11076         return false;
11077 
11078       // The new value depends on the predicate, true for != and false for ==.
11079       Constant *NewV =
11080           ConstantInt::get(Type::getInt1Ty(Ctx), Pred == CmpInst::ICMP_NE);
11081       addValue(A, getState(), *NewV, /* CtxI */ nullptr, II.S,
11082                getAnchorScope());
11083       return true;
11084     };
11085 
11086     for (auto &LHSValue : LHSValues)
11087       for (auto &RHSValue : RHSValues)
11088         if (!CheckPair(*LHSValue.getValue(), *RHSValue.getValue()))
11089           return false;
11090     return true;
11091   }
11092 
11093   bool handleSelectInst(Attributor &A, SelectInst &SI, ItemInfo II,
11094                         SmallVectorImpl<ItemInfo> &Worklist) {
11095     const Instruction *CtxI = II.I.getCtxI();
11096     bool UsedAssumedInformation = false;
11097 
11098     std::optional<Constant *> C =
11099         A.getAssumedConstant(*SI.getCondition(), *this, UsedAssumedInformation);
11100     bool NoValueYet = !C.has_value();
11101     if (NoValueYet || isa_and_nonnull<UndefValue>(*C))
11102       return true;
11103     if (auto *CI = dyn_cast_or_null<ConstantInt>(*C)) {
11104       if (CI->isZero())
11105         Worklist.push_back({{*SI.getFalseValue(), CtxI}, II.S});
11106       else
11107         Worklist.push_back({{*SI.getTrueValue(), CtxI}, II.S});
11108     } else if (&SI == &getAssociatedValue()) {
11109       // We could not simplify the condition, assume both values.
11110       Worklist.push_back({{*SI.getTrueValue(), CtxI}, II.S});
11111       Worklist.push_back({{*SI.getFalseValue(), CtxI}, II.S});
11112     } else {
11113       std::optional<Value *> SimpleV = A.getAssumedSimplified(
11114           IRPosition::inst(SI), *this, UsedAssumedInformation, II.S);
11115       if (!SimpleV.has_value())
11116         return true;
11117       if (*SimpleV) {
11118         addValue(A, getState(), **SimpleV, CtxI, II.S, getAnchorScope());
11119         return true;
11120       }
11121       return false;
11122     }
11123     return true;
11124   }
11125 
11126   bool handleLoadInst(Attributor &A, LoadInst &LI, ItemInfo II,
11127                       SmallVectorImpl<ItemInfo> &Worklist) {
11128     SmallSetVector<Value *, 4> PotentialCopies;
11129     SmallSetVector<Instruction *, 4> PotentialValueOrigins;
11130     bool UsedAssumedInformation = false;
11131     if (!AA::getPotentiallyLoadedValues(A, LI, PotentialCopies,
11132                                         PotentialValueOrigins, *this,
11133                                         UsedAssumedInformation,
11134                                         /* OnlyExact */ true)) {
11135       LLVM_DEBUG(dbgs() << "[AAPotentialValues] Failed to get potentially "
11136                            "loaded values for load instruction "
11137                         << LI << "\n");
11138       return false;
11139     }
11140 
11141     // Do not simplify loads that are only used in llvm.assume if we cannot also
11142     // remove all stores that may feed into the load. The reason is that the
11143     // assume is probably worth something as long as the stores are around.
11144     InformationCache &InfoCache = A.getInfoCache();
11145     if (InfoCache.isOnlyUsedByAssume(LI)) {
11146       if (!llvm::all_of(PotentialValueOrigins, [&](Instruction *I) {
11147             if (!I || isa<AssumeInst>(I))
11148               return true;
11149             if (auto *SI = dyn_cast<StoreInst>(I))
11150               return A.isAssumedDead(SI->getOperandUse(0), this,
11151                                      /* LivenessAA */ nullptr,
11152                                      UsedAssumedInformation,
11153                                      /* CheckBBLivenessOnly */ false);
11154             return A.isAssumedDead(*I, this, /* LivenessAA */ nullptr,
11155                                    UsedAssumedInformation,
11156                                    /* CheckBBLivenessOnly */ false);
11157           })) {
11158         LLVM_DEBUG(dbgs() << "[AAPotentialValues] Load is onl used by assumes "
11159                              "and we cannot delete all the stores: "
11160                           << LI << "\n");
11161         return false;
11162       }
11163     }
11164 
11165     // Values have to be dynamically unique or we loose the fact that a
11166     // single llvm::Value might represent two runtime values (e.g.,
11167     // stack locations in different recursive calls).
11168     const Instruction *CtxI = II.I.getCtxI();
11169     bool ScopeIsLocal = (II.S & AA::Intraprocedural);
11170     bool AllLocal = ScopeIsLocal;
11171     bool DynamicallyUnique = llvm::all_of(PotentialCopies, [&](Value *PC) {
11172       AllLocal &= AA::isValidInScope(*PC, getAnchorScope());
11173       return AA::isDynamicallyUnique(A, *this, *PC);
11174     });
11175     if (!DynamicallyUnique) {
11176       LLVM_DEBUG(dbgs() << "[AAPotentialValues] Not all potentially loaded "
11177                            "values are dynamically unique: "
11178                         << LI << "\n");
11179       return false;
11180     }
11181 
11182     for (auto *PotentialCopy : PotentialCopies) {
11183       if (AllLocal) {
11184         Worklist.push_back({{*PotentialCopy, CtxI}, II.S});
11185       } else {
11186         Worklist.push_back({{*PotentialCopy, CtxI}, AA::Interprocedural});
11187       }
11188     }
11189     if (!AllLocal && ScopeIsLocal)
11190       addValue(A, getState(), LI, CtxI, AA::Intraprocedural, getAnchorScope());
11191     return true;
11192   }
11193 
11194   bool handlePHINode(
11195       Attributor &A, PHINode &PHI, ItemInfo II,
11196       SmallVectorImpl<ItemInfo> &Worklist,
11197       SmallMapVector<const Function *, LivenessInfo, 4> &LivenessAAs) {
11198     auto GetLivenessInfo = [&](const Function &F) -> LivenessInfo & {
11199       LivenessInfo &LI = LivenessAAs[&F];
11200       if (!LI.LivenessAA)
11201         LI.LivenessAA = A.getAAFor<AAIsDead>(*this, IRPosition::function(F),
11202                                              DepClassTy::NONE);
11203       return LI;
11204     };
11205 
11206     if (&PHI == &getAssociatedValue()) {
11207       LivenessInfo &LI = GetLivenessInfo(*PHI.getFunction());
11208       const auto *CI =
11209           A.getInfoCache().getAnalysisResultForFunction<CycleAnalysis>(
11210               *PHI.getFunction());
11211 
11212       Cycle *C = nullptr;
11213       bool CyclePHI = mayBeInCycle(CI, &PHI, /* HeaderOnly */ true, &C);
11214       for (unsigned u = 0, e = PHI.getNumIncomingValues(); u < e; u++) {
11215         BasicBlock *IncomingBB = PHI.getIncomingBlock(u);
11216         if (LI.LivenessAA &&
11217             LI.LivenessAA->isEdgeDead(IncomingBB, PHI.getParent())) {
11218           LI.AnyDead = true;
11219           continue;
11220         }
11221         Value *V = PHI.getIncomingValue(u);
11222         if (V == &PHI)
11223           continue;
11224 
11225         // If the incoming value is not the PHI but an instruction in the same
11226         // cycle we might have multiple versions of it flying around.
11227         if (CyclePHI && isa<Instruction>(V) &&
11228             (!C || C->contains(cast<Instruction>(V)->getParent())))
11229           return false;
11230 
11231         Worklist.push_back({{*V, IncomingBB->getTerminator()}, II.S});
11232       }
11233       return true;
11234     }
11235 
11236     bool UsedAssumedInformation = false;
11237     std::optional<Value *> SimpleV = A.getAssumedSimplified(
11238         IRPosition::inst(PHI), *this, UsedAssumedInformation, II.S);
11239     if (!SimpleV.has_value())
11240       return true;
11241     if (!(*SimpleV))
11242       return false;
11243     addValue(A, getState(), **SimpleV, &PHI, II.S, getAnchorScope());
11244     return true;
11245   }
11246 
11247   /// Use the generic, non-optimistic InstSimplfy functionality if we managed to
11248   /// simplify any operand of the instruction \p I. Return true if successful,
11249   /// in that case Worklist will be updated.
11250   bool handleGenericInst(Attributor &A, Instruction &I, ItemInfo II,
11251                          SmallVectorImpl<ItemInfo> &Worklist) {
11252     bool SomeSimplified = false;
11253     bool UsedAssumedInformation = false;
11254 
11255     SmallVector<Value *, 8> NewOps(I.getNumOperands());
11256     int Idx = 0;
11257     for (Value *Op : I.operands()) {
11258       const auto &SimplifiedOp = A.getAssumedSimplified(
11259           IRPosition::value(*Op, getCallBaseContext()), *this,
11260           UsedAssumedInformation, AA::Intraprocedural);
11261       // If we are not sure about any operand we are not sure about the entire
11262       // instruction, we'll wait.
11263       if (!SimplifiedOp.has_value())
11264         return true;
11265 
11266       if (*SimplifiedOp)
11267         NewOps[Idx] = *SimplifiedOp;
11268       else
11269         NewOps[Idx] = Op;
11270 
11271       SomeSimplified |= (NewOps[Idx] != Op);
11272       ++Idx;
11273     }
11274 
11275     // We won't bother with the InstSimplify interface if we didn't simplify any
11276     // operand ourselves.
11277     if (!SomeSimplified)
11278       return false;
11279 
11280     InformationCache &InfoCache = A.getInfoCache();
11281     Function *F = I.getFunction();
11282     const auto *DT =
11283         InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*F);
11284     const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
11285     auto *AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*F);
11286 
11287     const DataLayout &DL = I.getModule()->getDataLayout();
11288     SimplifyQuery Q(DL, TLI, DT, AC, &I);
11289     Value *NewV = simplifyInstructionWithOperands(&I, NewOps, Q);
11290     if (!NewV || NewV == &I)
11291       return false;
11292 
11293     LLVM_DEBUG(dbgs() << "Generic inst " << I << " assumed simplified to "
11294                       << *NewV << "\n");
11295     Worklist.push_back({{*NewV, II.I.getCtxI()}, II.S});
11296     return true;
11297   }
11298 
11299   bool simplifyInstruction(
11300       Attributor &A, Instruction &I, ItemInfo II,
11301       SmallVectorImpl<ItemInfo> &Worklist,
11302       SmallMapVector<const Function *, LivenessInfo, 4> &LivenessAAs) {
11303     if (auto *CI = dyn_cast<CmpInst>(&I))
11304       return handleCmp(A, *CI, CI->getOperand(0), CI->getOperand(1),
11305                        CI->getPredicate(), II, Worklist);
11306 
11307     switch (I.getOpcode()) {
11308     case Instruction::Select:
11309       return handleSelectInst(A, cast<SelectInst>(I), II, Worklist);
11310     case Instruction::PHI:
11311       return handlePHINode(A, cast<PHINode>(I), II, Worklist, LivenessAAs);
11312     case Instruction::Load:
11313       return handleLoadInst(A, cast<LoadInst>(I), II, Worklist);
11314     default:
11315       return handleGenericInst(A, I, II, Worklist);
11316     };
11317     return false;
11318   }
11319 
11320   void genericValueTraversal(Attributor &A, Value *InitialV) {
11321     SmallMapVector<const Function *, LivenessInfo, 4> LivenessAAs;
11322 
11323     SmallSet<ItemInfo, 16> Visited;
11324     SmallVector<ItemInfo, 16> Worklist;
11325     Worklist.push_back({{*InitialV, getCtxI()}, AA::AnyScope});
11326 
11327     int Iteration = 0;
11328     do {
11329       ItemInfo II = Worklist.pop_back_val();
11330       Value *V = II.I.getValue();
11331       assert(V);
11332       const Instruction *CtxI = II.I.getCtxI();
11333       AA::ValueScope S = II.S;
11334 
11335       // Check if we should process the current value. To prevent endless
11336       // recursion keep a record of the values we followed!
11337       if (!Visited.insert(II).second)
11338         continue;
11339 
11340       // Make sure we limit the compile time for complex expressions.
11341       if (Iteration++ >= MaxPotentialValuesIterations) {
11342         LLVM_DEBUG(dbgs() << "Generic value traversal reached iteration limit: "
11343                           << Iteration << "!\n");
11344         addValue(A, getState(), *V, CtxI, S, getAnchorScope());
11345         continue;
11346       }
11347 
11348       // Explicitly look through calls with a "returned" attribute if we do
11349       // not have a pointer as stripPointerCasts only works on them.
11350       Value *NewV = nullptr;
11351       if (V->getType()->isPointerTy()) {
11352         NewV = AA::getWithType(*V->stripPointerCasts(), *V->getType());
11353       } else {
11354         if (auto *CB = dyn_cast<CallBase>(V))
11355           if (auto *Callee =
11356                   dyn_cast_if_present<Function>(CB->getCalledOperand())) {
11357             for (Argument &Arg : Callee->args())
11358               if (Arg.hasReturnedAttr()) {
11359                 NewV = CB->getArgOperand(Arg.getArgNo());
11360                 break;
11361               }
11362           }
11363       }
11364       if (NewV && NewV != V) {
11365         Worklist.push_back({{*NewV, CtxI}, S});
11366         continue;
11367       }
11368 
11369       if (auto *CE = dyn_cast<ConstantExpr>(V)) {
11370         if (CE->getOpcode() == Instruction::ICmp)
11371           if (handleCmp(A, *CE, CE->getOperand(0), CE->getOperand(1),
11372                         CmpInst::Predicate(CE->getPredicate()), II, Worklist))
11373             continue;
11374       }
11375 
11376       if (auto *I = dyn_cast<Instruction>(V)) {
11377         if (simplifyInstruction(A, *I, II, Worklist, LivenessAAs))
11378           continue;
11379       }
11380 
11381       if (V != InitialV || isa<Argument>(V))
11382         if (recurseForValue(A, IRPosition::value(*V), II.S))
11383           continue;
11384 
11385       // If we haven't stripped anything we give up.
11386       if (V == InitialV && CtxI == getCtxI()) {
11387         indicatePessimisticFixpoint();
11388         return;
11389       }
11390 
11391       addValue(A, getState(), *V, CtxI, S, getAnchorScope());
11392     } while (!Worklist.empty());
11393 
11394     // If we actually used liveness information so we have to record a
11395     // dependence.
11396     for (auto &It : LivenessAAs)
11397       if (It.second.AnyDead)
11398         A.recordDependence(*It.second.LivenessAA, *this, DepClassTy::OPTIONAL);
11399   }
11400 
11401   /// See AbstractAttribute::trackStatistics()
11402   void trackStatistics() const override {
11403     STATS_DECLTRACK_FLOATING_ATTR(potential_values)
11404   }
11405 };
11406 
11407 struct AAPotentialValuesArgument final : AAPotentialValuesImpl {
11408   using Base = AAPotentialValuesImpl;
11409   AAPotentialValuesArgument(const IRPosition &IRP, Attributor &A)
11410       : Base(IRP, A) {}
11411 
11412   /// See AbstractAttribute::initialize(..).
11413   void initialize(Attributor &A) override {
11414     auto &Arg = cast<Argument>(getAssociatedValue());
11415     if (Arg.hasPointeeInMemoryValueAttr())
11416       indicatePessimisticFixpoint();
11417   }
11418 
11419   /// See AbstractAttribute::updateImpl(...).
11420   ChangeStatus updateImpl(Attributor &A) override {
11421     auto AssumedBefore = getAssumed();
11422 
11423     unsigned ArgNo = getCalleeArgNo();
11424 
11425     bool UsedAssumedInformation = false;
11426     SmallVector<AA::ValueAndContext> Values;
11427     auto CallSitePred = [&](AbstractCallSite ACS) {
11428       const auto CSArgIRP = IRPosition::callsite_argument(ACS, ArgNo);
11429       if (CSArgIRP.getPositionKind() == IRP_INVALID)
11430         return false;
11431 
11432       if (!A.getAssumedSimplifiedValues(CSArgIRP, this, Values,
11433                                         AA::Interprocedural,
11434                                         UsedAssumedInformation))
11435         return false;
11436 
11437       return isValidState();
11438     };
11439 
11440     if (!A.checkForAllCallSites(CallSitePred, *this,
11441                                 /* RequireAllCallSites */ true,
11442                                 UsedAssumedInformation))
11443       return indicatePessimisticFixpoint();
11444 
11445     Function *Fn = getAssociatedFunction();
11446     bool AnyNonLocal = false;
11447     for (auto &It : Values) {
11448       if (isa<Constant>(It.getValue())) {
11449         addValue(A, getState(), *It.getValue(), It.getCtxI(), AA::AnyScope,
11450                  getAnchorScope());
11451         continue;
11452       }
11453       if (!AA::isDynamicallyUnique(A, *this, *It.getValue()))
11454         return indicatePessimisticFixpoint();
11455 
11456       if (auto *Arg = dyn_cast<Argument>(It.getValue()))
11457         if (Arg->getParent() == Fn) {
11458           addValue(A, getState(), *It.getValue(), It.getCtxI(), AA::AnyScope,
11459                    getAnchorScope());
11460           continue;
11461         }
11462       addValue(A, getState(), *It.getValue(), It.getCtxI(), AA::Interprocedural,
11463                getAnchorScope());
11464       AnyNonLocal = true;
11465     }
11466     assert(!undefIsContained() && "Undef should be an explicit value!");
11467     if (AnyNonLocal)
11468       giveUpOnIntraprocedural(A);
11469 
11470     return (AssumedBefore == getAssumed()) ? ChangeStatus::UNCHANGED
11471                                            : ChangeStatus::CHANGED;
11472   }
11473 
11474   /// See AbstractAttribute::trackStatistics()
11475   void trackStatistics() const override {
11476     STATS_DECLTRACK_ARG_ATTR(potential_values)
11477   }
11478 };
11479 
11480 struct AAPotentialValuesReturned : public AAPotentialValuesFloating {
11481   using Base = AAPotentialValuesFloating;
11482   AAPotentialValuesReturned(const IRPosition &IRP, Attributor &A)
11483       : Base(IRP, A) {}
11484 
11485   /// See AbstractAttribute::initialize(..).
11486   void initialize(Attributor &A) override {
11487     Function *F = getAssociatedFunction();
11488     if (!F || F->isDeclaration() || F->getReturnType()->isVoidTy()) {
11489       indicatePessimisticFixpoint();
11490       return;
11491     }
11492 
11493     for (Argument &Arg : F->args())
11494       if (Arg.hasReturnedAttr()) {
11495         addValue(A, getState(), Arg, nullptr, AA::AnyScope, F);
11496         ReturnedArg = &Arg;
11497         break;
11498       }
11499     if (!A.isFunctionIPOAmendable(*F) ||
11500         A.hasSimplificationCallback(getIRPosition())) {
11501       if (!ReturnedArg)
11502         indicatePessimisticFixpoint();
11503       else
11504         indicateOptimisticFixpoint();
11505     }
11506   }
11507 
11508   /// See AbstractAttribute::updateImpl(...).
11509   ChangeStatus updateImpl(Attributor &A) override {
11510     auto AssumedBefore = getAssumed();
11511     bool UsedAssumedInformation = false;
11512 
11513     SmallVector<AA::ValueAndContext> Values;
11514     Function *AnchorScope = getAnchorScope();
11515     auto HandleReturnedValue = [&](Value &V, Instruction *CtxI,
11516                                    bool AddValues) {
11517       for (AA::ValueScope S : {AA::Interprocedural, AA::Intraprocedural}) {
11518         Values.clear();
11519         if (!A.getAssumedSimplifiedValues(IRPosition::value(V), this, Values, S,
11520                                           UsedAssumedInformation,
11521                                           /* RecurseForSelectAndPHI */ true))
11522           return false;
11523         if (!AddValues)
11524           continue;
11525         for (const AA::ValueAndContext &VAC : Values)
11526           addValue(A, getState(), *VAC.getValue(),
11527                    VAC.getCtxI() ? VAC.getCtxI() : CtxI, S, AnchorScope);
11528       }
11529       return true;
11530     };
11531 
11532     if (ReturnedArg) {
11533       HandleReturnedValue(*ReturnedArg, nullptr, true);
11534     } else {
11535       auto RetInstPred = [&](Instruction &RetI) {
11536         bool AddValues = true;
11537         if (isa<PHINode>(RetI.getOperand(0)) ||
11538             isa<SelectInst>(RetI.getOperand(0))) {
11539           addValue(A, getState(), *RetI.getOperand(0), &RetI, AA::AnyScope,
11540                    AnchorScope);
11541           AddValues = false;
11542         }
11543         return HandleReturnedValue(*RetI.getOperand(0), &RetI, AddValues);
11544       };
11545 
11546       if (!A.checkForAllInstructions(RetInstPred, *this, {Instruction::Ret},
11547                                      UsedAssumedInformation,
11548                                      /* CheckBBLivenessOnly */ true))
11549         return indicatePessimisticFixpoint();
11550     }
11551 
11552     return (AssumedBefore == getAssumed()) ? ChangeStatus::UNCHANGED
11553                                            : ChangeStatus::CHANGED;
11554   }
11555 
11556   void addValue(Attributor &A, StateType &State, Value &V,
11557                 const Instruction *CtxI, AA::ValueScope S,
11558                 Function *AnchorScope) const override {
11559     Function *F = getAssociatedFunction();
11560     if (auto *CB = dyn_cast<CallBase>(&V))
11561       if (CB->getCalledOperand() == F)
11562         return;
11563     Base::addValue(A, State, V, CtxI, S, AnchorScope);
11564   }
11565 
11566   ChangeStatus manifest(Attributor &A) override {
11567     if (ReturnedArg)
11568       return ChangeStatus::UNCHANGED;
11569     SmallVector<AA::ValueAndContext> Values;
11570     if (!getAssumedSimplifiedValues(A, Values, AA::ValueScope::Intraprocedural,
11571                                     /* RecurseForSelectAndPHI */ true))
11572       return ChangeStatus::UNCHANGED;
11573     Value *NewVal = getSingleValue(A, *this, getIRPosition(), Values);
11574     if (!NewVal)
11575       return ChangeStatus::UNCHANGED;
11576 
11577     ChangeStatus Changed = ChangeStatus::UNCHANGED;
11578     if (auto *Arg = dyn_cast<Argument>(NewVal)) {
11579       STATS_DECLTRACK(UniqueReturnValue, FunctionReturn,
11580                       "Number of function with unique return");
11581       Changed |= A.manifestAttrs(
11582           IRPosition::argument(*Arg),
11583           {Attribute::get(Arg->getContext(), Attribute::Returned)});
11584       STATS_DECLTRACK_ARG_ATTR(returned);
11585     }
11586 
11587     auto RetInstPred = [&](Instruction &RetI) {
11588       Value *RetOp = RetI.getOperand(0);
11589       if (isa<UndefValue>(RetOp) || RetOp == NewVal)
11590         return true;
11591       if (AA::isValidAtPosition({*NewVal, RetI}, A.getInfoCache()))
11592         if (A.changeUseAfterManifest(RetI.getOperandUse(0), *NewVal))
11593           Changed = ChangeStatus::CHANGED;
11594       return true;
11595     };
11596     bool UsedAssumedInformation = false;
11597     (void)A.checkForAllInstructions(RetInstPred, *this, {Instruction::Ret},
11598                                     UsedAssumedInformation,
11599                                     /* CheckBBLivenessOnly */ true);
11600     return Changed;
11601   }
11602 
11603   ChangeStatus indicatePessimisticFixpoint() override {
11604     return AAPotentialValues::indicatePessimisticFixpoint();
11605   }
11606 
11607   /// See AbstractAttribute::trackStatistics()
11608   void trackStatistics() const override{
11609       STATS_DECLTRACK_FNRET_ATTR(potential_values)}
11610 
11611   /// The argumented with an existing `returned` attribute.
11612   Argument *ReturnedArg = nullptr;
11613 };
11614 
11615 struct AAPotentialValuesFunction : AAPotentialValuesImpl {
11616   AAPotentialValuesFunction(const IRPosition &IRP, Attributor &A)
11617       : AAPotentialValuesImpl(IRP, A) {}
11618 
11619   /// See AbstractAttribute::updateImpl(...).
11620   ChangeStatus updateImpl(Attributor &A) override {
11621     llvm_unreachable("AAPotentialValues(Function|CallSite)::updateImpl will "
11622                      "not be called");
11623   }
11624 
11625   /// See AbstractAttribute::trackStatistics()
11626   void trackStatistics() const override {
11627     STATS_DECLTRACK_FN_ATTR(potential_values)
11628   }
11629 };
11630 
11631 struct AAPotentialValuesCallSite : AAPotentialValuesFunction {
11632   AAPotentialValuesCallSite(const IRPosition &IRP, Attributor &A)
11633       : AAPotentialValuesFunction(IRP, A) {}
11634 
11635   /// See AbstractAttribute::trackStatistics()
11636   void trackStatistics() const override {
11637     STATS_DECLTRACK_CS_ATTR(potential_values)
11638   }
11639 };
11640 
11641 struct AAPotentialValuesCallSiteReturned : AAPotentialValuesImpl {
11642   AAPotentialValuesCallSiteReturned(const IRPosition &IRP, Attributor &A)
11643       : AAPotentialValuesImpl(IRP, A) {}
11644 
11645   /// See AbstractAttribute::updateImpl(...).
11646   ChangeStatus updateImpl(Attributor &A) override {
11647     auto AssumedBefore = getAssumed();
11648 
11649     Function *Callee = getAssociatedFunction();
11650     if (!Callee)
11651       return indicatePessimisticFixpoint();
11652 
11653     bool UsedAssumedInformation = false;
11654     auto *CB = cast<CallBase>(getCtxI());
11655     if (CB->isMustTailCall() &&
11656         !A.isAssumedDead(IRPosition::inst(*CB), this, nullptr,
11657                          UsedAssumedInformation))
11658       return indicatePessimisticFixpoint();
11659 
11660     SmallVector<AA::ValueAndContext> Values;
11661     if (!A.getAssumedSimplifiedValues(IRPosition::returned(*Callee), this,
11662                                       Values, AA::Intraprocedural,
11663                                       UsedAssumedInformation))
11664       return indicatePessimisticFixpoint();
11665 
11666     Function *Caller = CB->getCaller();
11667 
11668     bool AnyNonLocal = false;
11669     for (auto &It : Values) {
11670       Value *V = It.getValue();
11671       std::optional<Value *> CallerV = A.translateArgumentToCallSiteContent(
11672           V, *CB, *this, UsedAssumedInformation);
11673       if (!CallerV.has_value()) {
11674         // Nothing to do as long as no value was determined.
11675         continue;
11676       }
11677       V = *CallerV ? *CallerV : V;
11678       if (AA::isDynamicallyUnique(A, *this, *V) &&
11679           AA::isValidInScope(*V, Caller)) {
11680         if (*CallerV) {
11681           SmallVector<AA::ValueAndContext> ArgValues;
11682           IRPosition IRP = IRPosition::value(*V);
11683           if (auto *Arg = dyn_cast<Argument>(V))
11684             if (Arg->getParent() == CB->getCalledOperand())
11685               IRP = IRPosition::callsite_argument(*CB, Arg->getArgNo());
11686           if (recurseForValue(A, IRP, AA::AnyScope))
11687             continue;
11688         }
11689         addValue(A, getState(), *V, CB, AA::AnyScope, getAnchorScope());
11690       } else {
11691         AnyNonLocal = true;
11692         break;
11693       }
11694     }
11695     if (AnyNonLocal) {
11696       Values.clear();
11697       if (!A.getAssumedSimplifiedValues(IRPosition::returned(*Callee), this,
11698                                         Values, AA::Interprocedural,
11699                                         UsedAssumedInformation))
11700         return indicatePessimisticFixpoint();
11701       AnyNonLocal = false;
11702       getState() = PotentialLLVMValuesState::getBestState();
11703       for (auto &It : Values) {
11704         Value *V = It.getValue();
11705         if (!AA::isDynamicallyUnique(A, *this, *V))
11706           return indicatePessimisticFixpoint();
11707         if (AA::isValidInScope(*V, Caller)) {
11708           addValue(A, getState(), *V, CB, AA::AnyScope, getAnchorScope());
11709         } else {
11710           AnyNonLocal = true;
11711           addValue(A, getState(), *V, CB, AA::Interprocedural,
11712                    getAnchorScope());
11713         }
11714       }
11715       if (AnyNonLocal)
11716         giveUpOnIntraprocedural(A);
11717     }
11718     return (AssumedBefore == getAssumed()) ? ChangeStatus::UNCHANGED
11719                                            : ChangeStatus::CHANGED;
11720   }
11721 
11722   ChangeStatus indicatePessimisticFixpoint() override {
11723     return AAPotentialValues::indicatePessimisticFixpoint();
11724   }
11725 
11726   /// See AbstractAttribute::trackStatistics()
11727   void trackStatistics() const override {
11728     STATS_DECLTRACK_CSRET_ATTR(potential_values)
11729   }
11730 };
11731 
11732 struct AAPotentialValuesCallSiteArgument : AAPotentialValuesFloating {
11733   AAPotentialValuesCallSiteArgument(const IRPosition &IRP, Attributor &A)
11734       : AAPotentialValuesFloating(IRP, A) {}
11735 
11736   /// See AbstractAttribute::trackStatistics()
11737   void trackStatistics() const override {
11738     STATS_DECLTRACK_CSARG_ATTR(potential_values)
11739   }
11740 };
11741 } // namespace
11742 
11743 /// ---------------------- Assumption Propagation ------------------------------
11744 namespace {
11745 struct AAAssumptionInfoImpl : public AAAssumptionInfo {
11746   AAAssumptionInfoImpl(const IRPosition &IRP, Attributor &A,
11747                        const DenseSet<StringRef> &Known)
11748       : AAAssumptionInfo(IRP, A, Known) {}
11749 
11750   /// See AbstractAttribute::manifest(...).
11751   ChangeStatus manifest(Attributor &A) override {
11752     // Don't manifest a universal set if it somehow made it here.
11753     if (getKnown().isUniversal())
11754       return ChangeStatus::UNCHANGED;
11755 
11756     const IRPosition &IRP = getIRPosition();
11757     return A.manifestAttrs(
11758         IRP,
11759         Attribute::get(IRP.getAnchorValue().getContext(), AssumptionAttrKey,
11760                        llvm::join(getAssumed().getSet(), ",")),
11761         /* ForceReplace */ true);
11762   }
11763 
11764   bool hasAssumption(const StringRef Assumption) const override {
11765     return isValidState() && setContains(Assumption);
11766   }
11767 
11768   /// See AbstractAttribute::getAsStr()
11769   const std::string getAsStr(Attributor *A) const override {
11770     const SetContents &Known = getKnown();
11771     const SetContents &Assumed = getAssumed();
11772 
11773     const std::string KnownStr =
11774         llvm::join(Known.getSet().begin(), Known.getSet().end(), ",");
11775     const std::string AssumedStr =
11776         (Assumed.isUniversal())
11777             ? "Universal"
11778             : llvm::join(Assumed.getSet().begin(), Assumed.getSet().end(), ",");
11779 
11780     return "Known [" + KnownStr + "]," + " Assumed [" + AssumedStr + "]";
11781   }
11782 };
11783 
11784 /// Propagates assumption information from parent functions to all of their
11785 /// successors. An assumption can be propagated if the containing function
11786 /// dominates the called function.
11787 ///
11788 /// We start with a "known" set of assumptions already valid for the associated
11789 /// function and an "assumed" set that initially contains all possible
11790 /// assumptions. The assumed set is inter-procedurally updated by narrowing its
11791 /// contents as concrete values are known. The concrete values are seeded by the
11792 /// first nodes that are either entries into the call graph, or contains no
11793 /// assumptions. Each node is updated as the intersection of the assumed state
11794 /// with all of its predecessors.
11795 struct AAAssumptionInfoFunction final : AAAssumptionInfoImpl {
11796   AAAssumptionInfoFunction(const IRPosition &IRP, Attributor &A)
11797       : AAAssumptionInfoImpl(IRP, A,
11798                              getAssumptions(*IRP.getAssociatedFunction())) {}
11799 
11800   /// See AbstractAttribute::updateImpl(...).
11801   ChangeStatus updateImpl(Attributor &A) override {
11802     bool Changed = false;
11803 
11804     auto CallSitePred = [&](AbstractCallSite ACS) {
11805       const auto *AssumptionAA = A.getAAFor<AAAssumptionInfo>(
11806           *this, IRPosition::callsite_function(*ACS.getInstruction()),
11807           DepClassTy::REQUIRED);
11808       if (!AssumptionAA)
11809         return false;
11810       // Get the set of assumptions shared by all of this function's callers.
11811       Changed |= getIntersection(AssumptionAA->getAssumed());
11812       return !getAssumed().empty() || !getKnown().empty();
11813     };
11814 
11815     bool UsedAssumedInformation = false;
11816     // Get the intersection of all assumptions held by this node's predecessors.
11817     // If we don't know all the call sites then this is either an entry into the
11818     // call graph or an empty node. This node is known to only contain its own
11819     // assumptions and can be propagated to its successors.
11820     if (!A.checkForAllCallSites(CallSitePred, *this, true,
11821                                 UsedAssumedInformation))
11822       return indicatePessimisticFixpoint();
11823 
11824     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
11825   }
11826 
11827   void trackStatistics() const override {}
11828 };
11829 
11830 /// Assumption Info defined for call sites.
11831 struct AAAssumptionInfoCallSite final : AAAssumptionInfoImpl {
11832 
11833   AAAssumptionInfoCallSite(const IRPosition &IRP, Attributor &A)
11834       : AAAssumptionInfoImpl(IRP, A, getInitialAssumptions(IRP)) {}
11835 
11836   /// See AbstractAttribute::initialize(...).
11837   void initialize(Attributor &A) override {
11838     const IRPosition &FnPos = IRPosition::function(*getAnchorScope());
11839     A.getAAFor<AAAssumptionInfo>(*this, FnPos, DepClassTy::REQUIRED);
11840   }
11841 
11842   /// See AbstractAttribute::updateImpl(...).
11843   ChangeStatus updateImpl(Attributor &A) override {
11844     const IRPosition &FnPos = IRPosition::function(*getAnchorScope());
11845     auto *AssumptionAA =
11846         A.getAAFor<AAAssumptionInfo>(*this, FnPos, DepClassTy::REQUIRED);
11847     if (!AssumptionAA)
11848       return indicatePessimisticFixpoint();
11849     bool Changed = getIntersection(AssumptionAA->getAssumed());
11850     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
11851   }
11852 
11853   /// See AbstractAttribute::trackStatistics()
11854   void trackStatistics() const override {}
11855 
11856 private:
11857   /// Helper to initialized the known set as all the assumptions this call and
11858   /// the callee contain.
11859   DenseSet<StringRef> getInitialAssumptions(const IRPosition &IRP) {
11860     const CallBase &CB = cast<CallBase>(IRP.getAssociatedValue());
11861     auto Assumptions = getAssumptions(CB);
11862     if (const Function *F = CB.getCaller())
11863       set_union(Assumptions, getAssumptions(*F));
11864     if (Function *F = IRP.getAssociatedFunction())
11865       set_union(Assumptions, getAssumptions(*F));
11866     return Assumptions;
11867   }
11868 };
11869 } // namespace
11870 
11871 AACallGraphNode *AACallEdgeIterator::operator*() const {
11872   return static_cast<AACallGraphNode *>(const_cast<AACallEdges *>(
11873       A.getOrCreateAAFor<AACallEdges>(IRPosition::function(**I))));
11874 }
11875 
11876 void AttributorCallGraph::print() { llvm::WriteGraph(outs(), this); }
11877 
11878 /// ------------------------ UnderlyingObjects ---------------------------------
11879 
11880 namespace {
11881 struct AAUnderlyingObjectsImpl
11882     : StateWrapper<BooleanState, AAUnderlyingObjects> {
11883   using BaseTy = StateWrapper<BooleanState, AAUnderlyingObjects>;
11884   AAUnderlyingObjectsImpl(const IRPosition &IRP, Attributor &A) : BaseTy(IRP) {}
11885 
11886   /// See AbstractAttribute::getAsStr().
11887   const std::string getAsStr(Attributor *A) const override {
11888     return std::string("UnderlyingObjects ") +
11889            (isValidState()
11890                 ? (std::string("inter #") +
11891                    std::to_string(InterAssumedUnderlyingObjects.size()) +
11892                    " objs" + std::string(", intra #") +
11893                    std::to_string(IntraAssumedUnderlyingObjects.size()) +
11894                    " objs")
11895                 : "<invalid>");
11896   }
11897 
11898   /// See AbstractAttribute::trackStatistics()
11899   void trackStatistics() const override {}
11900 
11901   /// See AbstractAttribute::updateImpl(...).
11902   ChangeStatus updateImpl(Attributor &A) override {
11903     auto &Ptr = getAssociatedValue();
11904 
11905     auto DoUpdate = [&](SmallSetVector<Value *, 8> &UnderlyingObjects,
11906                         AA::ValueScope Scope) {
11907       bool UsedAssumedInformation = false;
11908       SmallPtrSet<Value *, 8> SeenObjects;
11909       SmallVector<AA::ValueAndContext> Values;
11910 
11911       if (!A.getAssumedSimplifiedValues(IRPosition::value(Ptr), *this, Values,
11912                                         Scope, UsedAssumedInformation))
11913         return UnderlyingObjects.insert(&Ptr);
11914 
11915       bool Changed = false;
11916 
11917       for (unsigned I = 0; I < Values.size(); ++I) {
11918         auto &VAC = Values[I];
11919         auto *Obj = VAC.getValue();
11920         Value *UO = getUnderlyingObject(Obj);
11921         if (UO && UO != VAC.getValue() && SeenObjects.insert(UO).second) {
11922           const auto *OtherAA = A.getAAFor<AAUnderlyingObjects>(
11923               *this, IRPosition::value(*UO), DepClassTy::OPTIONAL);
11924           auto Pred = [&Values](Value &V) {
11925             Values.emplace_back(V, nullptr);
11926             return true;
11927           };
11928 
11929           if (!OtherAA || !OtherAA->forallUnderlyingObjects(Pred, Scope))
11930             llvm_unreachable(
11931                 "The forall call should not return false at this position");
11932 
11933           continue;
11934         }
11935 
11936         if (isa<SelectInst>(Obj)) {
11937           Changed |= handleIndirect(A, *Obj, UnderlyingObjects, Scope);
11938           continue;
11939         }
11940         if (auto *PHI = dyn_cast<PHINode>(Obj)) {
11941           // Explicitly look through PHIs as we do not care about dynamically
11942           // uniqueness.
11943           for (unsigned u = 0, e = PHI->getNumIncomingValues(); u < e; u++) {
11944             Changed |= handleIndirect(A, *PHI->getIncomingValue(u),
11945                                       UnderlyingObjects, Scope);
11946           }
11947           continue;
11948         }
11949 
11950         Changed |= UnderlyingObjects.insert(Obj);
11951       }
11952 
11953       return Changed;
11954     };
11955 
11956     bool Changed = false;
11957     Changed |= DoUpdate(IntraAssumedUnderlyingObjects, AA::Intraprocedural);
11958     Changed |= DoUpdate(InterAssumedUnderlyingObjects, AA::Interprocedural);
11959 
11960     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
11961   }
11962 
11963   bool forallUnderlyingObjects(
11964       function_ref<bool(Value &)> Pred,
11965       AA::ValueScope Scope = AA::Interprocedural) const override {
11966     if (!isValidState())
11967       return Pred(getAssociatedValue());
11968 
11969     auto &AssumedUnderlyingObjects = Scope == AA::Intraprocedural
11970                                          ? IntraAssumedUnderlyingObjects
11971                                          : InterAssumedUnderlyingObjects;
11972     for (Value *Obj : AssumedUnderlyingObjects)
11973       if (!Pred(*Obj))
11974         return false;
11975 
11976     return true;
11977   }
11978 
11979 private:
11980   /// Handle the case where the value is not the actual underlying value, such
11981   /// as a phi node or a select instruction.
11982   bool handleIndirect(Attributor &A, Value &V,
11983                       SmallSetVector<Value *, 8> &UnderlyingObjects,
11984                       AA::ValueScope Scope) {
11985     bool Changed = false;
11986     const auto *AA = A.getAAFor<AAUnderlyingObjects>(
11987         *this, IRPosition::value(V), DepClassTy::OPTIONAL);
11988     auto Pred = [&](Value &V) {
11989       Changed |= UnderlyingObjects.insert(&V);
11990       return true;
11991     };
11992     if (!AA || !AA->forallUnderlyingObjects(Pred, Scope))
11993       llvm_unreachable(
11994           "The forall call should not return false at this position");
11995     return Changed;
11996   }
11997 
11998   /// All the underlying objects collected so far via intra procedural scope.
11999   SmallSetVector<Value *, 8> IntraAssumedUnderlyingObjects;
12000   /// All the underlying objects collected so far via inter procedural scope.
12001   SmallSetVector<Value *, 8> InterAssumedUnderlyingObjects;
12002 };
12003 
12004 struct AAUnderlyingObjectsFloating final : AAUnderlyingObjectsImpl {
12005   AAUnderlyingObjectsFloating(const IRPosition &IRP, Attributor &A)
12006       : AAUnderlyingObjectsImpl(IRP, A) {}
12007 };
12008 
12009 struct AAUnderlyingObjectsArgument final : AAUnderlyingObjectsImpl {
12010   AAUnderlyingObjectsArgument(const IRPosition &IRP, Attributor &A)
12011       : AAUnderlyingObjectsImpl(IRP, A) {}
12012 };
12013 
12014 struct AAUnderlyingObjectsCallSite final : AAUnderlyingObjectsImpl {
12015   AAUnderlyingObjectsCallSite(const IRPosition &IRP, Attributor &A)
12016       : AAUnderlyingObjectsImpl(IRP, A) {}
12017 };
12018 
12019 struct AAUnderlyingObjectsCallSiteArgument final : AAUnderlyingObjectsImpl {
12020   AAUnderlyingObjectsCallSiteArgument(const IRPosition &IRP, Attributor &A)
12021       : AAUnderlyingObjectsImpl(IRP, A) {}
12022 };
12023 
12024 struct AAUnderlyingObjectsReturned final : AAUnderlyingObjectsImpl {
12025   AAUnderlyingObjectsReturned(const IRPosition &IRP, Attributor &A)
12026       : AAUnderlyingObjectsImpl(IRP, A) {}
12027 };
12028 
12029 struct AAUnderlyingObjectsCallSiteReturned final : AAUnderlyingObjectsImpl {
12030   AAUnderlyingObjectsCallSiteReturned(const IRPosition &IRP, Attributor &A)
12031       : AAUnderlyingObjectsImpl(IRP, A) {}
12032 };
12033 
12034 struct AAUnderlyingObjectsFunction final : AAUnderlyingObjectsImpl {
12035   AAUnderlyingObjectsFunction(const IRPosition &IRP, Attributor &A)
12036       : AAUnderlyingObjectsImpl(IRP, A) {}
12037 };
12038 } // namespace
12039 
12040 /// ------------------------ Global Value Info  -------------------------------
12041 namespace {
12042 struct AAGlobalValueInfoFloating : public AAGlobalValueInfo {
12043   AAGlobalValueInfoFloating(const IRPosition &IRP, Attributor &A)
12044       : AAGlobalValueInfo(IRP, A) {}
12045 
12046   /// See AbstractAttribute::initialize(...).
12047   void initialize(Attributor &A) override {}
12048 
12049   bool checkUse(Attributor &A, const Use &U, bool &Follow,
12050                 SmallVectorImpl<const Value *> &Worklist) {
12051     Instruction *UInst = dyn_cast<Instruction>(U.getUser());
12052     if (!UInst) {
12053       Follow = true;
12054       return true;
12055     }
12056 
12057     LLVM_DEBUG(dbgs() << "[AAGlobalValueInfo] Check use: " << *U.get() << " in "
12058                       << *UInst << "\n");
12059 
12060     if (auto *Cmp = dyn_cast<ICmpInst>(U.getUser())) {
12061       int Idx = &Cmp->getOperandUse(0) == &U;
12062       if (isa<Constant>(Cmp->getOperand(Idx)))
12063         return true;
12064       return U == &getAnchorValue();
12065     }
12066 
12067     // Explicitly catch return instructions.
12068     if (isa<ReturnInst>(UInst)) {
12069       auto CallSitePred = [&](AbstractCallSite ACS) {
12070         Worklist.push_back(ACS.getInstruction());
12071         return true;
12072       };
12073       bool UsedAssumedInformation = false;
12074       // TODO: We should traverse the uses or add a "non-call-site" CB.
12075       if (!A.checkForAllCallSites(CallSitePred, *UInst->getFunction(),
12076                                   /*RequireAllCallSites=*/true, this,
12077                                   UsedAssumedInformation))
12078         return false;
12079       return true;
12080     }
12081 
12082     // For now we only use special logic for call sites. However, the tracker
12083     // itself knows about a lot of other non-capturing cases already.
12084     auto *CB = dyn_cast<CallBase>(UInst);
12085     if (!CB)
12086       return false;
12087     // Direct calls are OK uses.
12088     if (CB->isCallee(&U))
12089       return true;
12090     // Non-argument uses are scary.
12091     if (!CB->isArgOperand(&U))
12092       return false;
12093     // TODO: Iterate callees.
12094     auto *Fn = dyn_cast<Function>(CB->getCalledOperand());
12095     if (!Fn || !A.isFunctionIPOAmendable(*Fn))
12096       return false;
12097 
12098     unsigned ArgNo = CB->getArgOperandNo(&U);
12099     Worklist.push_back(Fn->getArg(ArgNo));
12100     return true;
12101   }
12102 
12103   ChangeStatus updateImpl(Attributor &A) override {
12104     unsigned NumUsesBefore = Uses.size();
12105 
12106     SmallPtrSet<const Value *, 8> Visited;
12107     SmallVector<const Value *> Worklist;
12108     Worklist.push_back(&getAnchorValue());
12109 
12110     auto UsePred = [&](const Use &U, bool &Follow) -> bool {
12111       Uses.insert(&U);
12112       switch (DetermineUseCaptureKind(U, nullptr)) {
12113       case UseCaptureKind::NO_CAPTURE:
12114         return checkUse(A, U, Follow, Worklist);
12115       case UseCaptureKind::MAY_CAPTURE:
12116         return checkUse(A, U, Follow, Worklist);
12117       case UseCaptureKind::PASSTHROUGH:
12118         Follow = true;
12119         return true;
12120       }
12121       return true;
12122     };
12123     auto EquivalentUseCB = [&](const Use &OldU, const Use &NewU) {
12124       Uses.insert(&OldU);
12125       return true;
12126     };
12127 
12128     while (!Worklist.empty()) {
12129       const Value *V = Worklist.pop_back_val();
12130       if (!Visited.insert(V).second)
12131         continue;
12132       if (!A.checkForAllUses(UsePred, *this, *V,
12133                              /* CheckBBLivenessOnly */ true,
12134                              DepClassTy::OPTIONAL,
12135                              /* IgnoreDroppableUses */ true, EquivalentUseCB)) {
12136         return indicatePessimisticFixpoint();
12137       }
12138     }
12139 
12140     return Uses.size() == NumUsesBefore ? ChangeStatus::UNCHANGED
12141                                         : ChangeStatus::CHANGED;
12142   }
12143 
12144   bool isPotentialUse(const Use &U) const override {
12145     return !isValidState() || Uses.contains(&U);
12146   }
12147 
12148   /// See AbstractAttribute::manifest(...).
12149   ChangeStatus manifest(Attributor &A) override {
12150     return ChangeStatus::UNCHANGED;
12151   }
12152 
12153   /// See AbstractAttribute::getAsStr().
12154   const std::string getAsStr(Attributor *A) const override {
12155     return "[" + std::to_string(Uses.size()) + " uses]";
12156   }
12157 
12158   void trackStatistics() const override {
12159     STATS_DECLTRACK_FLOATING_ATTR(GlobalValuesTracked);
12160   }
12161 
12162 private:
12163   /// Set of (transitive) uses of this GlobalValue.
12164   SmallPtrSet<const Use *, 8> Uses;
12165 };
12166 } // namespace
12167 
12168 /// ------------------------ Indirect Call Info  -------------------------------
12169 namespace {
12170 struct AAIndirectCallInfoCallSite : public AAIndirectCallInfo {
12171   AAIndirectCallInfoCallSite(const IRPosition &IRP, Attributor &A)
12172       : AAIndirectCallInfo(IRP, A) {}
12173 
12174   /// See AbstractAttribute::initialize(...).
12175   void initialize(Attributor &A) override {
12176     auto *MD = getCtxI()->getMetadata(LLVMContext::MD_callees);
12177     if (!MD && !A.isClosedWorldModule())
12178       return;
12179 
12180     if (MD) {
12181       for (const auto &Op : MD->operands())
12182         if (Function *Callee = mdconst::dyn_extract_or_null<Function>(Op))
12183           PotentialCallees.insert(Callee);
12184     } else if (A.isClosedWorldModule()) {
12185       ArrayRef<Function *> IndirectlyCallableFunctions =
12186           A.getInfoCache().getIndirectlyCallableFunctions(A);
12187       PotentialCallees.insert(IndirectlyCallableFunctions.begin(),
12188                               IndirectlyCallableFunctions.end());
12189     }
12190 
12191     if (PotentialCallees.empty())
12192       indicateOptimisticFixpoint();
12193   }
12194 
12195   ChangeStatus updateImpl(Attributor &A) override {
12196     CallBase *CB = cast<CallBase>(getCtxI());
12197     const Use &CalleeUse = CB->getCalledOperandUse();
12198     Value *FP = CB->getCalledOperand();
12199 
12200     SmallSetVector<Function *, 4> AssumedCalleesNow;
12201     bool AllCalleesKnownNow = AllCalleesKnown;
12202 
12203     auto CheckPotentialCalleeUse = [&](Function &PotentialCallee,
12204                                        bool &UsedAssumedInformation) {
12205       const auto *GIAA = A.getAAFor<AAGlobalValueInfo>(
12206           *this, IRPosition::value(PotentialCallee), DepClassTy::OPTIONAL);
12207       if (!GIAA || GIAA->isPotentialUse(CalleeUse))
12208         return true;
12209       UsedAssumedInformation = !GIAA->isAtFixpoint();
12210       return false;
12211     };
12212 
12213     auto AddPotentialCallees = [&]() {
12214       for (auto *PotentialCallee : PotentialCallees) {
12215         bool UsedAssumedInformation = false;
12216         if (CheckPotentialCalleeUse(*PotentialCallee, UsedAssumedInformation))
12217           AssumedCalleesNow.insert(PotentialCallee);
12218       }
12219     };
12220 
12221     // Use simplification to find potential callees, if !callees was present,
12222     // fallback to that set if necessary.
12223     bool UsedAssumedInformation = false;
12224     SmallVector<AA::ValueAndContext> Values;
12225     if (!A.getAssumedSimplifiedValues(IRPosition::value(*FP), this, Values,
12226                                       AA::ValueScope::AnyScope,
12227                                       UsedAssumedInformation)) {
12228       if (PotentialCallees.empty())
12229         return indicatePessimisticFixpoint();
12230       AddPotentialCallees();
12231     }
12232 
12233     // Try to find a reason for \p Fn not to be a potential callee. If none was
12234     // found, add it to the assumed callees set.
12235     auto CheckPotentialCallee = [&](Function &Fn) {
12236       if (!PotentialCallees.empty() && !PotentialCallees.count(&Fn))
12237         return false;
12238 
12239       auto &CachedResult = FilterResults[&Fn];
12240       if (CachedResult.has_value())
12241         return CachedResult.value();
12242 
12243       bool UsedAssumedInformation = false;
12244       if (!CheckPotentialCalleeUse(Fn, UsedAssumedInformation)) {
12245         if (!UsedAssumedInformation)
12246           CachedResult = false;
12247         return false;
12248       }
12249 
12250       int NumFnArgs = Fn.arg_size();
12251       int NumCBArgs = CB->arg_size();
12252 
12253       // Check if any excess argument (which we fill up with poison) is known to
12254       // be UB on undef.
12255       for (int I = NumCBArgs; I < NumFnArgs; ++I) {
12256         bool IsKnown = false;
12257         if (AA::hasAssumedIRAttr<Attribute::NoUndef>(
12258                 A, this, IRPosition::argument(*Fn.getArg(I)),
12259                 DepClassTy::OPTIONAL, IsKnown)) {
12260           if (IsKnown)
12261             CachedResult = false;
12262           return false;
12263         }
12264       }
12265 
12266       CachedResult = true;
12267       return true;
12268     };
12269 
12270     // Check simplification result, prune known UB callees, also restrict it to
12271     // the !callees set, if present.
12272     for (auto &VAC : Values) {
12273       if (isa<UndefValue>(VAC.getValue()))
12274         continue;
12275       if (isa<ConstantPointerNull>(VAC.getValue()) &&
12276           VAC.getValue()->getType()->getPointerAddressSpace() == 0)
12277         continue;
12278       // TODO: Check for known UB, e.g., poison + noundef.
12279       if (auto *VACFn = dyn_cast<Function>(VAC.getValue())) {
12280         if (CheckPotentialCallee(*VACFn))
12281           AssumedCalleesNow.insert(VACFn);
12282         continue;
12283       }
12284       if (!PotentialCallees.empty()) {
12285         AddPotentialCallees();
12286         break;
12287       }
12288       AllCalleesKnownNow = false;
12289     }
12290 
12291     if (AssumedCalleesNow == AssumedCallees &&
12292         AllCalleesKnown == AllCalleesKnownNow)
12293       return ChangeStatus::UNCHANGED;
12294 
12295     std::swap(AssumedCallees, AssumedCalleesNow);
12296     AllCalleesKnown = AllCalleesKnownNow;
12297     return ChangeStatus::CHANGED;
12298   }
12299 
12300   /// See AbstractAttribute::manifest(...).
12301   ChangeStatus manifest(Attributor &A) override {
12302     // If we can't specialize at all, give up now.
12303     if (!AllCalleesKnown && AssumedCallees.empty())
12304       return ChangeStatus::UNCHANGED;
12305 
12306     CallBase *CB = cast<CallBase>(getCtxI());
12307     bool UsedAssumedInformation = false;
12308     if (A.isAssumedDead(*CB, this, /*LivenessAA=*/nullptr,
12309                         UsedAssumedInformation))
12310       return ChangeStatus::UNCHANGED;
12311 
12312     ChangeStatus Changed = ChangeStatus::UNCHANGED;
12313     Value *FP = CB->getCalledOperand();
12314     if (FP->getType()->getPointerAddressSpace())
12315       FP = new AddrSpaceCastInst(FP, PointerType::get(FP->getType(), 0),
12316                                  FP->getName() + ".as0", CB);
12317 
12318     bool CBIsVoid = CB->getType()->isVoidTy();
12319     Instruction *IP = CB;
12320     FunctionType *CSFT = CB->getFunctionType();
12321     SmallVector<Value *> CSArgs(CB->arg_begin(), CB->arg_end());
12322 
12323     // If we know all callees and there are none, the call site is (effectively)
12324     // dead (or UB).
12325     if (AssumedCallees.empty()) {
12326       assert(AllCalleesKnown &&
12327              "Expected all callees to be known if there are none.");
12328       A.changeToUnreachableAfterManifest(CB);
12329       return ChangeStatus::CHANGED;
12330     }
12331 
12332     // Special handling for the single callee case.
12333     if (AllCalleesKnown && AssumedCallees.size() == 1) {
12334       auto *NewCallee = AssumedCallees.front();
12335       if (isLegalToPromote(*CB, NewCallee)) {
12336         promoteCall(*CB, NewCallee, nullptr);
12337         return ChangeStatus::CHANGED;
12338       }
12339       Instruction *NewCall = CallInst::Create(FunctionCallee(CSFT, NewCallee),
12340                                               CSArgs, CB->getName(), CB);
12341       if (!CBIsVoid)
12342         A.changeAfterManifest(IRPosition::callsite_returned(*CB), *NewCall);
12343       A.deleteAfterManifest(*CB);
12344       return ChangeStatus::CHANGED;
12345     }
12346 
12347     // For each potential value we create a conditional
12348     //
12349     // ```
12350     // if (ptr == value) value(args);
12351     // else ...
12352     // ```
12353     //
12354     bool SpecializedForAnyCallees = false;
12355     bool SpecializedForAllCallees = AllCalleesKnown;
12356     ICmpInst *LastCmp = nullptr;
12357     SmallVector<Function *, 8> SkippedAssumedCallees;
12358     SmallVector<std::pair<CallInst *, Instruction *>> NewCalls;
12359     for (Function *NewCallee : AssumedCallees) {
12360       if (!A.shouldSpecializeCallSiteForCallee(*this, *CB, *NewCallee)) {
12361         SkippedAssumedCallees.push_back(NewCallee);
12362         SpecializedForAllCallees = false;
12363         continue;
12364       }
12365       SpecializedForAnyCallees = true;
12366 
12367       LastCmp = new ICmpInst(IP, llvm::CmpInst::ICMP_EQ, FP, NewCallee);
12368       Instruction *ThenTI =
12369           SplitBlockAndInsertIfThen(LastCmp, IP, /* Unreachable */ false);
12370       BasicBlock *CBBB = CB->getParent();
12371       A.registerManifestAddedBasicBlock(*ThenTI->getParent());
12372       A.registerManifestAddedBasicBlock(*CBBB);
12373       auto *SplitTI = cast<BranchInst>(LastCmp->getNextNode());
12374       BasicBlock *ElseBB;
12375       if (IP == CB) {
12376         ElseBB = BasicBlock::Create(ThenTI->getContext(), "",
12377                                     ThenTI->getFunction(), CBBB);
12378         A.registerManifestAddedBasicBlock(*ElseBB);
12379         IP = BranchInst::Create(CBBB, ElseBB);
12380         SplitTI->replaceUsesOfWith(CBBB, ElseBB);
12381       } else {
12382         ElseBB = IP->getParent();
12383         ThenTI->replaceUsesOfWith(ElseBB, CBBB);
12384       }
12385       CastInst *RetBC = nullptr;
12386       CallInst *NewCall = nullptr;
12387       if (isLegalToPromote(*CB, NewCallee)) {
12388         auto *CBClone = cast<CallBase>(CB->clone());
12389         CBClone->insertBefore(ThenTI);
12390         NewCall = &cast<CallInst>(promoteCall(*CBClone, NewCallee, &RetBC));
12391       } else {
12392         NewCall = CallInst::Create(FunctionCallee(CSFT, NewCallee), CSArgs,
12393                                    CB->getName(), ThenTI);
12394       }
12395       NewCalls.push_back({NewCall, RetBC});
12396     }
12397 
12398     auto AttachCalleeMetadata = [&](CallBase &IndirectCB) {
12399       if (!AllCalleesKnown)
12400         return ChangeStatus::UNCHANGED;
12401       MDBuilder MDB(IndirectCB.getContext());
12402       MDNode *Callees = MDB.createCallees(SkippedAssumedCallees);
12403       IndirectCB.setMetadata(LLVMContext::MD_callees, Callees);
12404       return ChangeStatus::CHANGED;
12405     };
12406 
12407     if (!SpecializedForAnyCallees)
12408       return AttachCalleeMetadata(*CB);
12409 
12410     // Check if we need the fallback indirect call still.
12411     if (SpecializedForAllCallees) {
12412       LastCmp->replaceAllUsesWith(ConstantInt::getTrue(LastCmp->getContext()));
12413       LastCmp->eraseFromParent();
12414       new UnreachableInst(IP->getContext(), IP);
12415       IP->eraseFromParent();
12416     } else {
12417       auto *CBClone = cast<CallInst>(CB->clone());
12418       CBClone->setName(CB->getName());
12419       CBClone->insertBefore(IP);
12420       NewCalls.push_back({CBClone, nullptr});
12421       AttachCalleeMetadata(*CBClone);
12422     }
12423 
12424     // Check if we need a PHI to merge the results.
12425     if (!CBIsVoid) {
12426       auto *PHI = PHINode::Create(CB->getType(), NewCalls.size(),
12427                                   CB->getName() + ".phi",
12428                                   &*CB->getParent()->getFirstInsertionPt());
12429       for (auto &It : NewCalls) {
12430         CallBase *NewCall = It.first;
12431         Instruction *CallRet = It.second ? It.second : It.first;
12432         if (CallRet->getType() == CB->getType())
12433           PHI->addIncoming(CallRet, CallRet->getParent());
12434         else if (NewCall->getType()->isVoidTy())
12435           PHI->addIncoming(PoisonValue::get(CB->getType()),
12436                            NewCall->getParent());
12437         else
12438           llvm_unreachable("Call return should match or be void!");
12439       }
12440       A.changeAfterManifest(IRPosition::callsite_returned(*CB), *PHI);
12441     }
12442 
12443     A.deleteAfterManifest(*CB);
12444     Changed = ChangeStatus::CHANGED;
12445 
12446     return Changed;
12447   }
12448 
12449   /// See AbstractAttribute::getAsStr().
12450   const std::string getAsStr(Attributor *A) const override {
12451     return std::string(AllCalleesKnown ? "eliminate" : "specialize") +
12452            " indirect call site with " + std::to_string(AssumedCallees.size()) +
12453            " functions";
12454   }
12455 
12456   void trackStatistics() const override {
12457     if (AllCalleesKnown) {
12458       STATS_DECLTRACK(
12459           Eliminated, CallSites,
12460           "Number of indirect call sites eliminated via specialization")
12461     } else {
12462       STATS_DECLTRACK(Specialized, CallSites,
12463                       "Number of indirect call sites specialized")
12464     }
12465   }
12466 
12467   bool foreachCallee(function_ref<bool(Function *)> CB) const override {
12468     return isValidState() && AllCalleesKnown && all_of(AssumedCallees, CB);
12469   }
12470 
12471 private:
12472   /// Map to remember filter results.
12473   DenseMap<Function *, std::optional<bool>> FilterResults;
12474 
12475   /// If the !callee metadata was present, this set will contain all potential
12476   /// callees (superset).
12477   SmallSetVector<Function *, 4> PotentialCallees;
12478 
12479   /// This set contains all currently assumed calllees, which might grow over
12480   /// time.
12481   SmallSetVector<Function *, 4> AssumedCallees;
12482 
12483   /// Flag to indicate if all possible callees are in the AssumedCallees set or
12484   /// if there could be others.
12485   bool AllCalleesKnown = true;
12486 };
12487 } // namespace
12488 
12489 /// ------------------------ Address Space  ------------------------------------
12490 namespace {
12491 struct AAAddressSpaceImpl : public AAAddressSpace {
12492   AAAddressSpaceImpl(const IRPosition &IRP, Attributor &A)
12493       : AAAddressSpace(IRP, A) {}
12494 
12495   int32_t getAddressSpace() const override {
12496     assert(isValidState() && "the AA is invalid");
12497     return AssumedAddressSpace;
12498   }
12499 
12500   /// See AbstractAttribute::initialize(...).
12501   void initialize(Attributor &A) override {
12502     assert(getAssociatedType()->isPtrOrPtrVectorTy() &&
12503            "Associated value is not a pointer");
12504   }
12505 
12506   ChangeStatus updateImpl(Attributor &A) override {
12507     int32_t OldAddressSpace = AssumedAddressSpace;
12508     auto *AUO = A.getOrCreateAAFor<AAUnderlyingObjects>(getIRPosition(), this,
12509                                                         DepClassTy::REQUIRED);
12510     auto Pred = [&](Value &Obj) {
12511       if (isa<UndefValue>(&Obj))
12512         return true;
12513       return takeAddressSpace(Obj.getType()->getPointerAddressSpace());
12514     };
12515 
12516     if (!AUO->forallUnderlyingObjects(Pred))
12517       return indicatePessimisticFixpoint();
12518 
12519     return OldAddressSpace == AssumedAddressSpace ? ChangeStatus::UNCHANGED
12520                                                   : ChangeStatus::CHANGED;
12521   }
12522 
12523   /// See AbstractAttribute::manifest(...).
12524   ChangeStatus manifest(Attributor &A) override {
12525     Value *AssociatedValue = &getAssociatedValue();
12526     Value *OriginalValue = peelAddrspacecast(AssociatedValue);
12527     if (getAddressSpace() == NoAddressSpace ||
12528         static_cast<uint32_t>(getAddressSpace()) ==
12529             getAssociatedType()->getPointerAddressSpace())
12530       return ChangeStatus::UNCHANGED;
12531 
12532     Type *NewPtrTy = PointerType::get(getAssociatedType()->getContext(),
12533                                       static_cast<uint32_t>(getAddressSpace()));
12534     bool UseOriginalValue =
12535         OriginalValue->getType()->getPointerAddressSpace() ==
12536         static_cast<uint32_t>(getAddressSpace());
12537 
12538     bool Changed = false;
12539 
12540     auto MakeChange = [&](Instruction *I, Use &U) {
12541       Changed = true;
12542       if (UseOriginalValue) {
12543         A.changeUseAfterManifest(U, *OriginalValue);
12544         return;
12545       }
12546       Instruction *CastInst = new AddrSpaceCastInst(OriginalValue, NewPtrTy);
12547       CastInst->insertBefore(cast<Instruction>(I));
12548       A.changeUseAfterManifest(U, *CastInst);
12549     };
12550 
12551     auto Pred = [&](const Use &U, bool &) {
12552       if (U.get() != AssociatedValue)
12553         return true;
12554       auto *Inst = dyn_cast<Instruction>(U.getUser());
12555       if (!Inst)
12556         return true;
12557       // This is a WA to make sure we only change uses from the corresponding
12558       // CGSCC if the AA is run on CGSCC instead of the entire module.
12559       if (!A.isRunOn(Inst->getFunction()))
12560         return true;
12561       if (isa<LoadInst>(Inst))
12562         MakeChange(Inst, const_cast<Use &>(U));
12563       if (isa<StoreInst>(Inst)) {
12564         // We only make changes if the use is the pointer operand.
12565         if (U.getOperandNo() == 1)
12566           MakeChange(Inst, const_cast<Use &>(U));
12567       }
12568       return true;
12569     };
12570 
12571     // It doesn't matter if we can't check all uses as we can simply
12572     // conservatively ignore those that can not be visited.
12573     (void)A.checkForAllUses(Pred, *this, getAssociatedValue(),
12574                             /* CheckBBLivenessOnly */ true);
12575 
12576     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
12577   }
12578 
12579   /// See AbstractAttribute::getAsStr().
12580   const std::string getAsStr(Attributor *A) const override {
12581     if (!isValidState())
12582       return "addrspace(<invalid>)";
12583     return "addrspace(" +
12584            (AssumedAddressSpace == NoAddressSpace
12585                 ? "none"
12586                 : std::to_string(AssumedAddressSpace)) +
12587            ")";
12588   }
12589 
12590 private:
12591   int32_t AssumedAddressSpace = NoAddressSpace;
12592 
12593   bool takeAddressSpace(int32_t AS) {
12594     if (AssumedAddressSpace == NoAddressSpace) {
12595       AssumedAddressSpace = AS;
12596       return true;
12597     }
12598     return AssumedAddressSpace == AS;
12599   }
12600 
12601   static Value *peelAddrspacecast(Value *V) {
12602     if (auto *I = dyn_cast<AddrSpaceCastInst>(V))
12603       return peelAddrspacecast(I->getPointerOperand());
12604     if (auto *C = dyn_cast<ConstantExpr>(V))
12605       if (C->getOpcode() == Instruction::AddrSpaceCast)
12606         return peelAddrspacecast(C->getOperand(0));
12607     return V;
12608   }
12609 };
12610 
12611 struct AAAddressSpaceFloating final : AAAddressSpaceImpl {
12612   AAAddressSpaceFloating(const IRPosition &IRP, Attributor &A)
12613       : AAAddressSpaceImpl(IRP, A) {}
12614 
12615   void trackStatistics() const override {
12616     STATS_DECLTRACK_FLOATING_ATTR(addrspace);
12617   }
12618 };
12619 
12620 struct AAAddressSpaceReturned final : AAAddressSpaceImpl {
12621   AAAddressSpaceReturned(const IRPosition &IRP, Attributor &A)
12622       : AAAddressSpaceImpl(IRP, A) {}
12623 
12624   /// See AbstractAttribute::initialize(...).
12625   void initialize(Attributor &A) override {
12626     // TODO: we don't rewrite function argument for now because it will need to
12627     // rewrite the function signature and all call sites.
12628     (void)indicatePessimisticFixpoint();
12629   }
12630 
12631   void trackStatistics() const override {
12632     STATS_DECLTRACK_FNRET_ATTR(addrspace);
12633   }
12634 };
12635 
12636 struct AAAddressSpaceCallSiteReturned final : AAAddressSpaceImpl {
12637   AAAddressSpaceCallSiteReturned(const IRPosition &IRP, Attributor &A)
12638       : AAAddressSpaceImpl(IRP, A) {}
12639 
12640   void trackStatistics() const override {
12641     STATS_DECLTRACK_CSRET_ATTR(addrspace);
12642   }
12643 };
12644 
12645 struct AAAddressSpaceArgument final : AAAddressSpaceImpl {
12646   AAAddressSpaceArgument(const IRPosition &IRP, Attributor &A)
12647       : AAAddressSpaceImpl(IRP, A) {}
12648 
12649   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(addrspace); }
12650 };
12651 
12652 struct AAAddressSpaceCallSiteArgument final : AAAddressSpaceImpl {
12653   AAAddressSpaceCallSiteArgument(const IRPosition &IRP, Attributor &A)
12654       : AAAddressSpaceImpl(IRP, A) {}
12655 
12656   /// See AbstractAttribute::initialize(...).
12657   void initialize(Attributor &A) override {
12658     // TODO: we don't rewrite call site argument for now because it will need to
12659     // rewrite the function signature of the callee.
12660     (void)indicatePessimisticFixpoint();
12661   }
12662 
12663   void trackStatistics() const override {
12664     STATS_DECLTRACK_CSARG_ATTR(addrspace);
12665   }
12666 };
12667 } // namespace
12668 
12669 /// ----------- Allocation Info ----------
12670 namespace {
12671 struct AAAllocationInfoImpl : public AAAllocationInfo {
12672   AAAllocationInfoImpl(const IRPosition &IRP, Attributor &A)
12673       : AAAllocationInfo(IRP, A) {}
12674 
12675   std::optional<TypeSize> getAllocatedSize() const override {
12676     assert(isValidState() && "the AA is invalid");
12677     return AssumedAllocatedSize;
12678   }
12679 
12680   std::optional<TypeSize> findInitialAllocationSize(Instruction *I,
12681                                                     const DataLayout &DL) {
12682 
12683     // TODO: implement case for malloc like instructions
12684     switch (I->getOpcode()) {
12685     case Instruction::Alloca: {
12686       AllocaInst *AI = cast<AllocaInst>(I);
12687       return AI->getAllocationSize(DL);
12688     }
12689     default:
12690       return std::nullopt;
12691     }
12692   }
12693 
12694   ChangeStatus updateImpl(Attributor &A) override {
12695 
12696     const IRPosition &IRP = getIRPosition();
12697     Instruction *I = IRP.getCtxI();
12698 
12699     // TODO: update check for malloc like calls
12700     if (!isa<AllocaInst>(I))
12701       return indicatePessimisticFixpoint();
12702 
12703     bool IsKnownNoCapture;
12704     if (!AA::hasAssumedIRAttr<Attribute::NoCapture>(
12705             A, this, IRP, DepClassTy::OPTIONAL, IsKnownNoCapture))
12706       return indicatePessimisticFixpoint();
12707 
12708     const AAPointerInfo *PI =
12709         A.getOrCreateAAFor<AAPointerInfo>(IRP, *this, DepClassTy::REQUIRED);
12710 
12711     if (!PI)
12712       return indicatePessimisticFixpoint();
12713 
12714     if (!PI->getState().isValidState())
12715       return indicatePessimisticFixpoint();
12716 
12717     const DataLayout &DL = A.getDataLayout();
12718     const auto AllocationSize = findInitialAllocationSize(I, DL);
12719 
12720     // If allocation size is nullopt, we give up.
12721     if (!AllocationSize)
12722       return indicatePessimisticFixpoint();
12723 
12724     // For zero sized allocations, we give up.
12725     // Since we can't reduce further
12726     if (*AllocationSize == 0)
12727       return indicatePessimisticFixpoint();
12728 
12729     int64_t BinSize = PI->numOffsetBins();
12730 
12731     // TODO: implement for multiple bins
12732     if (BinSize > 1)
12733       return indicatePessimisticFixpoint();
12734 
12735     if (BinSize == 0) {
12736       auto NewAllocationSize = std::optional<TypeSize>(TypeSize(0, false));
12737       if (!changeAllocationSize(NewAllocationSize))
12738         return ChangeStatus::UNCHANGED;
12739       return ChangeStatus::CHANGED;
12740     }
12741 
12742     // TODO: refactor this to be part of multiple bin case
12743     const auto &It = PI->begin();
12744 
12745     // TODO: handle if Offset is not zero
12746     if (It->first.Offset != 0)
12747       return indicatePessimisticFixpoint();
12748 
12749     uint64_t SizeOfBin = It->first.Offset + It->first.Size;
12750 
12751     if (SizeOfBin >= *AllocationSize)
12752       return indicatePessimisticFixpoint();
12753 
12754     auto NewAllocationSize =
12755         std::optional<TypeSize>(TypeSize(SizeOfBin * 8, false));
12756 
12757     if (!changeAllocationSize(NewAllocationSize))
12758       return ChangeStatus::UNCHANGED;
12759 
12760     return ChangeStatus::CHANGED;
12761   }
12762 
12763   /// See AbstractAttribute::manifest(...).
12764   ChangeStatus manifest(Attributor &A) override {
12765 
12766     assert(isValidState() &&
12767            "Manifest should only be called if the state is valid.");
12768 
12769     Instruction *I = getIRPosition().getCtxI();
12770 
12771     auto FixedAllocatedSizeInBits = getAllocatedSize()->getFixedValue();
12772 
12773     unsigned long NumBytesToAllocate = (FixedAllocatedSizeInBits + 7) / 8;
12774 
12775     switch (I->getOpcode()) {
12776     // TODO: add case for malloc like calls
12777     case Instruction::Alloca: {
12778 
12779       AllocaInst *AI = cast<AllocaInst>(I);
12780 
12781       Type *CharType = Type::getInt8Ty(I->getContext());
12782 
12783       auto *NumBytesToValue =
12784           ConstantInt::get(I->getContext(), APInt(32, NumBytesToAllocate));
12785 
12786       AllocaInst *NewAllocaInst =
12787           new AllocaInst(CharType, AI->getAddressSpace(), NumBytesToValue,
12788                          AI->getAlign(), AI->getName(), AI->getNextNode());
12789 
12790       if (A.changeAfterManifest(IRPosition::inst(*AI), *NewAllocaInst))
12791         return ChangeStatus::CHANGED;
12792 
12793       break;
12794     }
12795     default:
12796       break;
12797     }
12798 
12799     return ChangeStatus::UNCHANGED;
12800   }
12801 
12802   /// See AbstractAttribute::getAsStr().
12803   const std::string getAsStr(Attributor *A) const override {
12804     if (!isValidState())
12805       return "allocationinfo(<invalid>)";
12806     return "allocationinfo(" +
12807            (AssumedAllocatedSize == HasNoAllocationSize
12808                 ? "none"
12809                 : std::to_string(AssumedAllocatedSize->getFixedValue())) +
12810            ")";
12811   }
12812 
12813 private:
12814   std::optional<TypeSize> AssumedAllocatedSize = HasNoAllocationSize;
12815 
12816   // Maintain the computed allocation size of the object.
12817   // Returns (bool) weather the size of the allocation was modified or not.
12818   bool changeAllocationSize(std::optional<TypeSize> Size) {
12819     if (AssumedAllocatedSize == HasNoAllocationSize ||
12820         AssumedAllocatedSize != Size) {
12821       AssumedAllocatedSize = Size;
12822       return true;
12823     }
12824     return false;
12825   }
12826 };
12827 
12828 struct AAAllocationInfoFloating : AAAllocationInfoImpl {
12829   AAAllocationInfoFloating(const IRPosition &IRP, Attributor &A)
12830       : AAAllocationInfoImpl(IRP, A) {}
12831 
12832   void trackStatistics() const override {
12833     STATS_DECLTRACK_FLOATING_ATTR(allocationinfo);
12834   }
12835 };
12836 
12837 struct AAAllocationInfoReturned : AAAllocationInfoImpl {
12838   AAAllocationInfoReturned(const IRPosition &IRP, Attributor &A)
12839       : AAAllocationInfoImpl(IRP, A) {}
12840 
12841   /// See AbstractAttribute::initialize(...).
12842   void initialize(Attributor &A) override {
12843     // TODO: we don't rewrite function argument for now because it will need to
12844     // rewrite the function signature and all call sites
12845     (void)indicatePessimisticFixpoint();
12846   }
12847 
12848   void trackStatistics() const override {
12849     STATS_DECLTRACK_FNRET_ATTR(allocationinfo);
12850   }
12851 };
12852 
12853 struct AAAllocationInfoCallSiteReturned : AAAllocationInfoImpl {
12854   AAAllocationInfoCallSiteReturned(const IRPosition &IRP, Attributor &A)
12855       : AAAllocationInfoImpl(IRP, A) {}
12856 
12857   void trackStatistics() const override {
12858     STATS_DECLTRACK_CSRET_ATTR(allocationinfo);
12859   }
12860 };
12861 
12862 struct AAAllocationInfoArgument : AAAllocationInfoImpl {
12863   AAAllocationInfoArgument(const IRPosition &IRP, Attributor &A)
12864       : AAAllocationInfoImpl(IRP, A) {}
12865 
12866   void trackStatistics() const override {
12867     STATS_DECLTRACK_ARG_ATTR(allocationinfo);
12868   }
12869 };
12870 
12871 struct AAAllocationInfoCallSiteArgument : AAAllocationInfoImpl {
12872   AAAllocationInfoCallSiteArgument(const IRPosition &IRP, Attributor &A)
12873       : AAAllocationInfoImpl(IRP, A) {}
12874 
12875   /// See AbstractAttribute::initialize(...).
12876   void initialize(Attributor &A) override {
12877 
12878     (void)indicatePessimisticFixpoint();
12879   }
12880 
12881   void trackStatistics() const override {
12882     STATS_DECLTRACK_CSARG_ATTR(allocationinfo);
12883   }
12884 };
12885 } // namespace
12886 
12887 const char AANoUnwind::ID = 0;
12888 const char AANoSync::ID = 0;
12889 const char AANoFree::ID = 0;
12890 const char AANonNull::ID = 0;
12891 const char AAMustProgress::ID = 0;
12892 const char AANoRecurse::ID = 0;
12893 const char AANonConvergent::ID = 0;
12894 const char AAWillReturn::ID = 0;
12895 const char AAUndefinedBehavior::ID = 0;
12896 const char AANoAlias::ID = 0;
12897 const char AAIntraFnReachability::ID = 0;
12898 const char AANoReturn::ID = 0;
12899 const char AAIsDead::ID = 0;
12900 const char AADereferenceable::ID = 0;
12901 const char AAAlign::ID = 0;
12902 const char AAInstanceInfo::ID = 0;
12903 const char AANoCapture::ID = 0;
12904 const char AAValueSimplify::ID = 0;
12905 const char AAHeapToStack::ID = 0;
12906 const char AAPrivatizablePtr::ID = 0;
12907 const char AAMemoryBehavior::ID = 0;
12908 const char AAMemoryLocation::ID = 0;
12909 const char AAValueConstantRange::ID = 0;
12910 const char AAPotentialConstantValues::ID = 0;
12911 const char AAPotentialValues::ID = 0;
12912 const char AANoUndef::ID = 0;
12913 const char AANoFPClass::ID = 0;
12914 const char AACallEdges::ID = 0;
12915 const char AAInterFnReachability::ID = 0;
12916 const char AAPointerInfo::ID = 0;
12917 const char AAAssumptionInfo::ID = 0;
12918 const char AAUnderlyingObjects::ID = 0;
12919 const char AAAddressSpace::ID = 0;
12920 const char AAAllocationInfo::ID = 0;
12921 const char AAIndirectCallInfo::ID = 0;
12922 const char AAGlobalValueInfo::ID = 0;
12923 const char AADenormalFPMath::ID = 0;
12924 
12925 // Macro magic to create the static generator function for attributes that
12926 // follow the naming scheme.
12927 
12928 #define SWITCH_PK_INV(CLASS, PK, POS_NAME)                                     \
12929   case IRPosition::PK:                                                         \
12930     llvm_unreachable("Cannot create " #CLASS " for a " POS_NAME " position!");
12931 
12932 #define SWITCH_PK_CREATE(CLASS, IRP, PK, SUFFIX)                               \
12933   case IRPosition::PK:                                                         \
12934     AA = new (A.Allocator) CLASS##SUFFIX(IRP, A);                              \
12935     ++NumAAs;                                                                  \
12936     break;
12937 
12938 #define CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                 \
12939   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
12940     CLASS *AA = nullptr;                                                       \
12941     switch (IRP.getPositionKind()) {                                           \
12942       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
12943       SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating")                              \
12944       SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument")                           \
12945       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
12946       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned")       \
12947       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument")       \
12948       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
12949       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
12950     }                                                                          \
12951     return *AA;                                                                \
12952   }
12953 
12954 #define CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                    \
12955   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
12956     CLASS *AA = nullptr;                                                       \
12957     switch (IRP.getPositionKind()) {                                           \
12958       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
12959       SWITCH_PK_INV(CLASS, IRP_FUNCTION, "function")                           \
12960       SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site")                         \
12961       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
12962       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
12963       SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned)                     \
12964       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
12965       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
12966     }                                                                          \
12967     return *AA;                                                                \
12968   }
12969 
12970 #define CREATE_ABSTRACT_ATTRIBUTE_FOR_ONE_POSITION(POS, SUFFIX, CLASS)         \
12971   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
12972     CLASS *AA = nullptr;                                                       \
12973     switch (IRP.getPositionKind()) {                                           \
12974       SWITCH_PK_CREATE(CLASS, IRP, POS, SUFFIX)                                \
12975     default:                                                                   \
12976       llvm_unreachable("Cannot create " #CLASS " for position otherthan " #POS \
12977                        " position!");                                          \
12978     }                                                                          \
12979     return *AA;                                                                \
12980   }
12981 
12982 #define CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                      \
12983   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
12984     CLASS *AA = nullptr;                                                       \
12985     switch (IRP.getPositionKind()) {                                           \
12986       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
12987       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
12988       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
12989       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
12990       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
12991       SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned)                     \
12992       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
12993       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
12994     }                                                                          \
12995     return *AA;                                                                \
12996   }
12997 
12998 #define CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)            \
12999   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
13000     CLASS *AA = nullptr;                                                       \
13001     switch (IRP.getPositionKind()) {                                           \
13002       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
13003       SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument")                           \
13004       SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating")                              \
13005       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
13006       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned")       \
13007       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument")       \
13008       SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site")                         \
13009       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
13010     }                                                                          \
13011     return *AA;                                                                \
13012   }
13013 
13014 #define CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                  \
13015   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
13016     CLASS *AA = nullptr;                                                       \
13017     switch (IRP.getPositionKind()) {                                           \
13018       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
13019       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
13020       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
13021       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
13022       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
13023       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
13024       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
13025       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
13026     }                                                                          \
13027     return *AA;                                                                \
13028   }
13029 
13030 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoUnwind)
13031 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoSync)
13032 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoRecurse)
13033 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAWillReturn)
13034 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoReturn)
13035 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryLocation)
13036 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AACallEdges)
13037 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAAssumptionInfo)
13038 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMustProgress)
13039 
13040 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANonNull)
13041 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoAlias)
13042 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPrivatizablePtr)
13043 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AADereferenceable)
13044 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAAlign)
13045 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAInstanceInfo)
13046 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoCapture)
13047 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueConstantRange)
13048 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPotentialConstantValues)
13049 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPotentialValues)
13050 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoUndef)
13051 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoFPClass)
13052 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPointerInfo)
13053 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAAddressSpace)
13054 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAAllocationInfo)
13055 
13056 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueSimplify)
13057 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAIsDead)
13058 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoFree)
13059 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAUnderlyingObjects)
13060 
13061 CREATE_ABSTRACT_ATTRIBUTE_FOR_ONE_POSITION(IRP_CALL_SITE, CallSite,
13062                                            AAIndirectCallInfo)
13063 CREATE_ABSTRACT_ATTRIBUTE_FOR_ONE_POSITION(IRP_FLOAT, Floating,
13064                                            AAGlobalValueInfo)
13065 
13066 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAHeapToStack)
13067 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAUndefinedBehavior)
13068 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANonConvergent)
13069 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAIntraFnReachability)
13070 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAInterFnReachability)
13071 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AADenormalFPMath)
13072 
13073 CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryBehavior)
13074 
13075 #undef CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION
13076 #undef CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION
13077 #undef CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION
13078 #undef CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION
13079 #undef CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION
13080 #undef CREATE_ABSTRACT_ATTRIBUTE_FOR_ONE_POSITION
13081 #undef SWITCH_PK_CREATE
13082 #undef SWITCH_PK_INV
13083