1 //===-- IPO/OpenMPOpt.cpp - Collection of OpenMP specific optimizations ---===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // OpenMP specific optimizations:
10 //
11 // - Deduplication of runtime calls, e.g., omp_get_thread_num.
12 // - Replacing globalized device memory with stack memory.
13 // - Replacing globalized device memory with shared memory.
14 // - Parallel region merging.
15 // - Transforming generic-mode device kernels to SPMD mode.
16 // - Specializing the state machine for generic-mode device kernels.
17 //
18 //===----------------------------------------------------------------------===//
19 
20 #include "llvm/Transforms/IPO/OpenMPOpt.h"
21 
22 #include "llvm/ADT/EnumeratedArray.h"
23 #include "llvm/ADT/PostOrderIterator.h"
24 #include "llvm/ADT/SetVector.h"
25 #include "llvm/ADT/Statistic.h"
26 #include "llvm/ADT/StringRef.h"
27 #include "llvm/Analysis/CallGraph.h"
28 #include "llvm/Analysis/CallGraphSCCPass.h"
29 #include "llvm/Analysis/MemoryLocation.h"
30 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
31 #include "llvm/Analysis/ValueTracking.h"
32 #include "llvm/Frontend/OpenMP/OMPConstants.h"
33 #include "llvm/Frontend/OpenMP/OMPIRBuilder.h"
34 #include "llvm/IR/Assumptions.h"
35 #include "llvm/IR/Constants.h"
36 #include "llvm/IR/DiagnosticInfo.h"
37 #include "llvm/IR/GlobalValue.h"
38 #include "llvm/IR/GlobalVariable.h"
39 #include "llvm/IR/Instruction.h"
40 #include "llvm/IR/Instructions.h"
41 #include "llvm/IR/IntrinsicInst.h"
42 #include "llvm/IR/IntrinsicsAMDGPU.h"
43 #include "llvm/IR/IntrinsicsNVPTX.h"
44 #include "llvm/IR/LLVMContext.h"
45 #include "llvm/InitializePasses.h"
46 #include "llvm/Support/CommandLine.h"
47 #include "llvm/Support/Debug.h"
48 #include "llvm/Transforms/IPO.h"
49 #include "llvm/Transforms/IPO/Attributor.h"
50 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
51 #include "llvm/Transforms/Utils/CallGraphUpdater.h"
52 #include "llvm/Transforms/Utils/CodeExtractor.h"
53 
54 #include <algorithm>
55 
56 using namespace llvm;
57 using namespace omp;
58 
59 #define DEBUG_TYPE "openmp-opt"
60 
61 static cl::opt<bool> DisableOpenMPOptimizations(
62     "openmp-opt-disable", cl::ZeroOrMore,
63     cl::desc("Disable OpenMP specific optimizations."), cl::Hidden,
64     cl::init(false));
65 
66 static cl::opt<bool> EnableParallelRegionMerging(
67     "openmp-opt-enable-merging", cl::ZeroOrMore,
68     cl::desc("Enable the OpenMP region merging optimization."), cl::Hidden,
69     cl::init(false));
70 
71 static cl::opt<bool>
72     DisableInternalization("openmp-opt-disable-internalization", cl::ZeroOrMore,
73                            cl::desc("Disable function internalization."),
74                            cl::Hidden, cl::init(false));
75 
76 static cl::opt<bool> PrintICVValues("openmp-print-icv-values", cl::init(false),
77                                     cl::Hidden);
78 static cl::opt<bool> PrintOpenMPKernels("openmp-print-gpu-kernels",
79                                         cl::init(false), cl::Hidden);
80 
81 static cl::opt<bool> HideMemoryTransferLatency(
82     "openmp-hide-memory-transfer-latency",
83     cl::desc("[WIP] Tries to hide the latency of host to device memory"
84              " transfers"),
85     cl::Hidden, cl::init(false));
86 
87 static cl::opt<bool> DisableOpenMPOptDeglobalization(
88     "openmp-opt-disable-deglobalization", cl::ZeroOrMore,
89     cl::desc("Disable OpenMP optimizations involving deglobalization."),
90     cl::Hidden, cl::init(false));
91 
92 static cl::opt<bool> DisableOpenMPOptSPMDization(
93     "openmp-opt-disable-spmdization", cl::ZeroOrMore,
94     cl::desc("Disable OpenMP optimizations involving SPMD-ization."),
95     cl::Hidden, cl::init(false));
96 
97 static cl::opt<bool> DisableOpenMPOptFolding(
98     "openmp-opt-disable-folding", cl::ZeroOrMore,
99     cl::desc("Disable OpenMP optimizations involving folding."), cl::Hidden,
100     cl::init(false));
101 
102 static cl::opt<bool> DisableOpenMPOptStateMachineRewrite(
103     "openmp-opt-disable-state-machine-rewrite", cl::ZeroOrMore,
104     cl::desc("Disable OpenMP optimizations that replace the state machine."),
105     cl::Hidden, cl::init(false));
106 
107 static cl::opt<bool> DisableOpenMPOptBarrierElimination(
108     "openmp-opt-disable-barrier-elimination", cl::ZeroOrMore,
109     cl::desc("Disable OpenMP optimizations that eliminate barriers."),
110     cl::Hidden, cl::init(false));
111 
112 static cl::opt<bool> PrintModuleAfterOptimizations(
113     "openmp-opt-print-module", cl::ZeroOrMore,
114     cl::desc("Print the current module after OpenMP optimizations."),
115     cl::Hidden, cl::init(false));
116 
117 static cl::opt<bool> AlwaysInlineDeviceFunctions(
118     "openmp-opt-inline-device", cl::ZeroOrMore,
119     cl::desc("Inline all applicible functions on the device."), cl::Hidden,
120     cl::init(false));
121 
122 static cl::opt<bool>
123     EnableVerboseRemarks("openmp-opt-verbose-remarks", cl::ZeroOrMore,
124                          cl::desc("Enables more verbose remarks."), cl::Hidden,
125                          cl::init(false));
126 
127 static cl::opt<unsigned>
128     SetFixpointIterations("openmp-opt-max-iterations", cl::Hidden,
129                           cl::desc("Maximal number of attributor iterations."),
130                           cl::init(256));
131 
132 STATISTIC(NumOpenMPRuntimeCallsDeduplicated,
133           "Number of OpenMP runtime calls deduplicated");
134 STATISTIC(NumOpenMPParallelRegionsDeleted,
135           "Number of OpenMP parallel regions deleted");
136 STATISTIC(NumOpenMPRuntimeFunctionsIdentified,
137           "Number of OpenMP runtime functions identified");
138 STATISTIC(NumOpenMPRuntimeFunctionUsesIdentified,
139           "Number of OpenMP runtime function uses identified");
140 STATISTIC(NumOpenMPTargetRegionKernels,
141           "Number of OpenMP target region entry points (=kernels) identified");
142 STATISTIC(NumOpenMPTargetRegionKernelsSPMD,
143           "Number of OpenMP target region entry points (=kernels) executed in "
144           "SPMD-mode instead of generic-mode");
145 STATISTIC(NumOpenMPTargetRegionKernelsWithoutStateMachine,
146           "Number of OpenMP target region entry points (=kernels) executed in "
147           "generic-mode without a state machines");
148 STATISTIC(NumOpenMPTargetRegionKernelsCustomStateMachineWithFallback,
149           "Number of OpenMP target region entry points (=kernels) executed in "
150           "generic-mode with customized state machines with fallback");
151 STATISTIC(NumOpenMPTargetRegionKernelsCustomStateMachineWithoutFallback,
152           "Number of OpenMP target region entry points (=kernels) executed in "
153           "generic-mode with customized state machines without fallback");
154 STATISTIC(
155     NumOpenMPParallelRegionsReplacedInGPUStateMachine,
156     "Number of OpenMP parallel regions replaced with ID in GPU state machines");
157 STATISTIC(NumOpenMPParallelRegionsMerged,
158           "Number of OpenMP parallel regions merged");
159 STATISTIC(NumBytesMovedToSharedMemory,
160           "Amount of memory pushed to shared memory");
161 STATISTIC(NumBarriersEliminated, "Number of redundant barriers eliminated");
162 
163 #if !defined(NDEBUG)
164 static constexpr auto TAG = "[" DEBUG_TYPE "]";
165 #endif
166 
167 namespace {
168 
169 struct AAHeapToShared;
170 
171 struct AAICVTracker;
172 
173 /// OpenMP specific information. For now, stores RFIs and ICVs also needed for
174 /// Attributor runs.
175 struct OMPInformationCache : public InformationCache {
176   OMPInformationCache(Module &M, AnalysisGetter &AG,
177                       BumpPtrAllocator &Allocator, SetVector<Function *> &CGSCC,
178                       KernelSet &Kernels)
179       : InformationCache(M, AG, Allocator, &CGSCC), OMPBuilder(M),
180         Kernels(Kernels) {
181 
182     OMPBuilder.initialize();
183     initializeRuntimeFunctions();
184     initializeInternalControlVars();
185   }
186 
187   /// Generic information that describes an internal control variable.
188   struct InternalControlVarInfo {
189     /// The kind, as described by InternalControlVar enum.
190     InternalControlVar Kind;
191 
192     /// The name of the ICV.
193     StringRef Name;
194 
195     /// Environment variable associated with this ICV.
196     StringRef EnvVarName;
197 
198     /// Initial value kind.
199     ICVInitValue InitKind;
200 
201     /// Initial value.
202     ConstantInt *InitValue;
203 
204     /// Setter RTL function associated with this ICV.
205     RuntimeFunction Setter;
206 
207     /// Getter RTL function associated with this ICV.
208     RuntimeFunction Getter;
209 
210     /// RTL Function corresponding to the override clause of this ICV
211     RuntimeFunction Clause;
212   };
213 
214   /// Generic information that describes a runtime function
215   struct RuntimeFunctionInfo {
216 
217     /// The kind, as described by the RuntimeFunction enum.
218     RuntimeFunction Kind;
219 
220     /// The name of the function.
221     StringRef Name;
222 
223     /// Flag to indicate a variadic function.
224     bool IsVarArg;
225 
226     /// The return type of the function.
227     Type *ReturnType;
228 
229     /// The argument types of the function.
230     SmallVector<Type *, 8> ArgumentTypes;
231 
232     /// The declaration if available.
233     Function *Declaration = nullptr;
234 
235     /// Uses of this runtime function per function containing the use.
236     using UseVector = SmallVector<Use *, 16>;
237 
238     /// Clear UsesMap for runtime function.
239     void clearUsesMap() { UsesMap.clear(); }
240 
241     /// Boolean conversion that is true if the runtime function was found.
242     operator bool() const { return Declaration; }
243 
244     /// Return the vector of uses in function \p F.
245     UseVector &getOrCreateUseVector(Function *F) {
246       std::shared_ptr<UseVector> &UV = UsesMap[F];
247       if (!UV)
248         UV = std::make_shared<UseVector>();
249       return *UV;
250     }
251 
252     /// Return the vector of uses in function \p F or `nullptr` if there are
253     /// none.
254     const UseVector *getUseVector(Function &F) const {
255       auto I = UsesMap.find(&F);
256       if (I != UsesMap.end())
257         return I->second.get();
258       return nullptr;
259     }
260 
261     /// Return how many functions contain uses of this runtime function.
262     size_t getNumFunctionsWithUses() const { return UsesMap.size(); }
263 
264     /// Return the number of arguments (or the minimal number for variadic
265     /// functions).
266     size_t getNumArgs() const { return ArgumentTypes.size(); }
267 
268     /// Run the callback \p CB on each use and forget the use if the result is
269     /// true. The callback will be fed the function in which the use was
270     /// encountered as second argument.
271     void foreachUse(SmallVectorImpl<Function *> &SCC,
272                     function_ref<bool(Use &, Function &)> CB) {
273       for (Function *F : SCC)
274         foreachUse(CB, F);
275     }
276 
277     /// Run the callback \p CB on each use within the function \p F and forget
278     /// the use if the result is true.
279     void foreachUse(function_ref<bool(Use &, Function &)> CB, Function *F) {
280       SmallVector<unsigned, 8> ToBeDeleted;
281       ToBeDeleted.clear();
282 
283       unsigned Idx = 0;
284       UseVector &UV = getOrCreateUseVector(F);
285 
286       for (Use *U : UV) {
287         if (CB(*U, *F))
288           ToBeDeleted.push_back(Idx);
289         ++Idx;
290       }
291 
292       // Remove the to-be-deleted indices in reverse order as prior
293       // modifications will not modify the smaller indices.
294       while (!ToBeDeleted.empty()) {
295         unsigned Idx = ToBeDeleted.pop_back_val();
296         UV[Idx] = UV.back();
297         UV.pop_back();
298       }
299     }
300 
301   private:
302     /// Map from functions to all uses of this runtime function contained in
303     /// them.
304     DenseMap<Function *, std::shared_ptr<UseVector>> UsesMap;
305 
306   public:
307     /// Iterators for the uses of this runtime function.
308     decltype(UsesMap)::iterator begin() { return UsesMap.begin(); }
309     decltype(UsesMap)::iterator end() { return UsesMap.end(); }
310   };
311 
312   /// An OpenMP-IR-Builder instance
313   OpenMPIRBuilder OMPBuilder;
314 
315   /// Map from runtime function kind to the runtime function description.
316   EnumeratedArray<RuntimeFunctionInfo, RuntimeFunction,
317                   RuntimeFunction::OMPRTL___last>
318       RFIs;
319 
320   /// Map from function declarations/definitions to their runtime enum type.
321   DenseMap<Function *, RuntimeFunction> RuntimeFunctionIDMap;
322 
323   /// Map from ICV kind to the ICV description.
324   EnumeratedArray<InternalControlVarInfo, InternalControlVar,
325                   InternalControlVar::ICV___last>
326       ICVs;
327 
328   /// Helper to initialize all internal control variable information for those
329   /// defined in OMPKinds.def.
330   void initializeInternalControlVars() {
331 #define ICV_RT_SET(_Name, RTL)                                                 \
332   {                                                                            \
333     auto &ICV = ICVs[_Name];                                                   \
334     ICV.Setter = RTL;                                                          \
335   }
336 #define ICV_RT_GET(Name, RTL)                                                  \
337   {                                                                            \
338     auto &ICV = ICVs[Name];                                                    \
339     ICV.Getter = RTL;                                                          \
340   }
341 #define ICV_DATA_ENV(Enum, _Name, _EnvVarName, Init)                           \
342   {                                                                            \
343     auto &ICV = ICVs[Enum];                                                    \
344     ICV.Name = _Name;                                                          \
345     ICV.Kind = Enum;                                                           \
346     ICV.InitKind = Init;                                                       \
347     ICV.EnvVarName = _EnvVarName;                                              \
348     switch (ICV.InitKind) {                                                    \
349     case ICV_IMPLEMENTATION_DEFINED:                                           \
350       ICV.InitValue = nullptr;                                                 \
351       break;                                                                   \
352     case ICV_ZERO:                                                             \
353       ICV.InitValue = ConstantInt::get(                                        \
354           Type::getInt32Ty(OMPBuilder.Int32->getContext()), 0);                \
355       break;                                                                   \
356     case ICV_FALSE:                                                            \
357       ICV.InitValue = ConstantInt::getFalse(OMPBuilder.Int1->getContext());    \
358       break;                                                                   \
359     case ICV_LAST:                                                             \
360       break;                                                                   \
361     }                                                                          \
362   }
363 #include "llvm/Frontend/OpenMP/OMPKinds.def"
364   }
365 
366   /// Returns true if the function declaration \p F matches the runtime
367   /// function types, that is, return type \p RTFRetType, and argument types
368   /// \p RTFArgTypes.
369   static bool declMatchesRTFTypes(Function *F, Type *RTFRetType,
370                                   SmallVector<Type *, 8> &RTFArgTypes) {
371     // TODO: We should output information to the user (under debug output
372     //       and via remarks).
373 
374     if (!F)
375       return false;
376     if (F->getReturnType() != RTFRetType)
377       return false;
378     if (F->arg_size() != RTFArgTypes.size())
379       return false;
380 
381     auto *RTFTyIt = RTFArgTypes.begin();
382     for (Argument &Arg : F->args()) {
383       if (Arg.getType() != *RTFTyIt)
384         return false;
385 
386       ++RTFTyIt;
387     }
388 
389     return true;
390   }
391 
392   // Helper to collect all uses of the declaration in the UsesMap.
393   unsigned collectUses(RuntimeFunctionInfo &RFI, bool CollectStats = true) {
394     unsigned NumUses = 0;
395     if (!RFI.Declaration)
396       return NumUses;
397     OMPBuilder.addAttributes(RFI.Kind, *RFI.Declaration);
398 
399     if (CollectStats) {
400       NumOpenMPRuntimeFunctionsIdentified += 1;
401       NumOpenMPRuntimeFunctionUsesIdentified += RFI.Declaration->getNumUses();
402     }
403 
404     // TODO: We directly convert uses into proper calls and unknown uses.
405     for (Use &U : RFI.Declaration->uses()) {
406       if (Instruction *UserI = dyn_cast<Instruction>(U.getUser())) {
407         if (ModuleSlice.count(UserI->getFunction())) {
408           RFI.getOrCreateUseVector(UserI->getFunction()).push_back(&U);
409           ++NumUses;
410         }
411       } else {
412         RFI.getOrCreateUseVector(nullptr).push_back(&U);
413         ++NumUses;
414       }
415     }
416     return NumUses;
417   }
418 
419   // Helper function to recollect uses of a runtime function.
420   void recollectUsesForFunction(RuntimeFunction RTF) {
421     auto &RFI = RFIs[RTF];
422     RFI.clearUsesMap();
423     collectUses(RFI, /*CollectStats*/ false);
424   }
425 
426   // Helper function to recollect uses of all runtime functions.
427   void recollectUses() {
428     for (int Idx = 0; Idx < RFIs.size(); ++Idx)
429       recollectUsesForFunction(static_cast<RuntimeFunction>(Idx));
430   }
431 
432   // Helper function to inherit the calling convention of the function callee.
433   void setCallingConvention(FunctionCallee Callee, CallInst *CI) {
434     if (Function *Fn = dyn_cast<Function>(Callee.getCallee()))
435       CI->setCallingConv(Fn->getCallingConv());
436   }
437 
438   /// Helper to initialize all runtime function information for those defined
439   /// in OpenMPKinds.def.
440   void initializeRuntimeFunctions() {
441     Module &M = *((*ModuleSlice.begin())->getParent());
442 
443     // Helper macros for handling __VA_ARGS__ in OMP_RTL
444 #define OMP_TYPE(VarName, ...)                                                 \
445   Type *VarName = OMPBuilder.VarName;                                          \
446   (void)VarName;
447 
448 #define OMP_ARRAY_TYPE(VarName, ...)                                           \
449   ArrayType *VarName##Ty = OMPBuilder.VarName##Ty;                             \
450   (void)VarName##Ty;                                                           \
451   PointerType *VarName##PtrTy = OMPBuilder.VarName##PtrTy;                     \
452   (void)VarName##PtrTy;
453 
454 #define OMP_FUNCTION_TYPE(VarName, ...)                                        \
455   FunctionType *VarName = OMPBuilder.VarName;                                  \
456   (void)VarName;                                                               \
457   PointerType *VarName##Ptr = OMPBuilder.VarName##Ptr;                         \
458   (void)VarName##Ptr;
459 
460 #define OMP_STRUCT_TYPE(VarName, ...)                                          \
461   StructType *VarName = OMPBuilder.VarName;                                    \
462   (void)VarName;                                                               \
463   PointerType *VarName##Ptr = OMPBuilder.VarName##Ptr;                         \
464   (void)VarName##Ptr;
465 
466 #define OMP_RTL(_Enum, _Name, _IsVarArg, _ReturnType, ...)                     \
467   {                                                                            \
468     SmallVector<Type *, 8> ArgsTypes({__VA_ARGS__});                           \
469     Function *F = M.getFunction(_Name);                                        \
470     RTLFunctions.insert(F);                                                    \
471     if (declMatchesRTFTypes(F, OMPBuilder._ReturnType, ArgsTypes)) {           \
472       RuntimeFunctionIDMap[F] = _Enum;                                         \
473       auto &RFI = RFIs[_Enum];                                                 \
474       RFI.Kind = _Enum;                                                        \
475       RFI.Name = _Name;                                                        \
476       RFI.IsVarArg = _IsVarArg;                                                \
477       RFI.ReturnType = OMPBuilder._ReturnType;                                 \
478       RFI.ArgumentTypes = std::move(ArgsTypes);                                \
479       RFI.Declaration = F;                                                     \
480       unsigned NumUses = collectUses(RFI);                                     \
481       (void)NumUses;                                                           \
482       LLVM_DEBUG({                                                             \
483         dbgs() << TAG << RFI.Name << (RFI.Declaration ? "" : " not")           \
484                << " found\n";                                                  \
485         if (RFI.Declaration)                                                   \
486           dbgs() << TAG << "-> got " << NumUses << " uses in "                 \
487                  << RFI.getNumFunctionsWithUses()                              \
488                  << " different functions.\n";                                 \
489       });                                                                      \
490     }                                                                          \
491   }
492 #include "llvm/Frontend/OpenMP/OMPKinds.def"
493 
494     // Remove the `noinline` attribute from `__kmpc`, `_OMP::` and `omp_`
495     // functions, except if `optnone` is present.
496     for (Function &F : M) {
497       for (StringRef Prefix : {"__kmpc", "_ZN4_OMP", "omp_"})
498         if (F.getName().startswith(Prefix) &&
499             !F.hasFnAttribute(Attribute::OptimizeNone))
500           F.removeFnAttr(Attribute::NoInline);
501     }
502 
503     // TODO: We should attach the attributes defined in OMPKinds.def.
504   }
505 
506   /// Collection of known kernels (\see Kernel) in the module.
507   KernelSet &Kernels;
508 
509   /// Collection of known OpenMP runtime functions..
510   DenseSet<const Function *> RTLFunctions;
511 };
512 
513 template <typename Ty, bool InsertInvalidates = true>
514 struct BooleanStateWithSetVector : public BooleanState {
515   bool contains(const Ty &Elem) const { return Set.contains(Elem); }
516   bool insert(const Ty &Elem) {
517     if (InsertInvalidates)
518       BooleanState::indicatePessimisticFixpoint();
519     return Set.insert(Elem);
520   }
521 
522   const Ty &operator[](int Idx) const { return Set[Idx]; }
523   bool operator==(const BooleanStateWithSetVector &RHS) const {
524     return BooleanState::operator==(RHS) && Set == RHS.Set;
525   }
526   bool operator!=(const BooleanStateWithSetVector &RHS) const {
527     return !(*this == RHS);
528   }
529 
530   bool empty() const { return Set.empty(); }
531   size_t size() const { return Set.size(); }
532 
533   /// "Clamp" this state with \p RHS.
534   BooleanStateWithSetVector &operator^=(const BooleanStateWithSetVector &RHS) {
535     BooleanState::operator^=(RHS);
536     Set.insert(RHS.Set.begin(), RHS.Set.end());
537     return *this;
538   }
539 
540 private:
541   /// A set to keep track of elements.
542   SetVector<Ty> Set;
543 
544 public:
545   typename decltype(Set)::iterator begin() { return Set.begin(); }
546   typename decltype(Set)::iterator end() { return Set.end(); }
547   typename decltype(Set)::const_iterator begin() const { return Set.begin(); }
548   typename decltype(Set)::const_iterator end() const { return Set.end(); }
549 };
550 
551 template <typename Ty, bool InsertInvalidates = true>
552 using BooleanStateWithPtrSetVector =
553     BooleanStateWithSetVector<Ty *, InsertInvalidates>;
554 
555 struct KernelInfoState : AbstractState {
556   /// Flag to track if we reached a fixpoint.
557   bool IsAtFixpoint = false;
558 
559   /// The parallel regions (identified by the outlined parallel functions) that
560   /// can be reached from the associated function.
561   BooleanStateWithPtrSetVector<Function, /* InsertInvalidates */ false>
562       ReachedKnownParallelRegions;
563 
564   /// State to track what parallel region we might reach.
565   BooleanStateWithPtrSetVector<CallBase> ReachedUnknownParallelRegions;
566 
567   /// State to track if we are in SPMD-mode, assumed or know, and why we decided
568   /// we cannot be. If it is assumed, then RequiresFullRuntime should also be
569   /// false.
570   BooleanStateWithPtrSetVector<Instruction, false> SPMDCompatibilityTracker;
571 
572   /// The __kmpc_target_init call in this kernel, if any. If we find more than
573   /// one we abort as the kernel is malformed.
574   CallBase *KernelInitCB = nullptr;
575 
576   /// The __kmpc_target_deinit call in this kernel, if any. If we find more than
577   /// one we abort as the kernel is malformed.
578   CallBase *KernelDeinitCB = nullptr;
579 
580   /// Flag to indicate if the associated function is a kernel entry.
581   bool IsKernelEntry = false;
582 
583   /// State to track what kernel entries can reach the associated function.
584   BooleanStateWithPtrSetVector<Function, false> ReachingKernelEntries;
585 
586   /// State to indicate if we can track parallel level of the associated
587   /// function. We will give up tracking if we encounter unknown caller or the
588   /// caller is __kmpc_parallel_51.
589   BooleanStateWithSetVector<uint8_t> ParallelLevels;
590 
591   /// Abstract State interface
592   ///{
593 
594   KernelInfoState() {}
595   KernelInfoState(bool BestState) {
596     if (!BestState)
597       indicatePessimisticFixpoint();
598   }
599 
600   /// See AbstractState::isValidState(...)
601   bool isValidState() const override { return true; }
602 
603   /// See AbstractState::isAtFixpoint(...)
604   bool isAtFixpoint() const override { return IsAtFixpoint; }
605 
606   /// See AbstractState::indicatePessimisticFixpoint(...)
607   ChangeStatus indicatePessimisticFixpoint() override {
608     IsAtFixpoint = true;
609     ReachingKernelEntries.indicatePessimisticFixpoint();
610     SPMDCompatibilityTracker.indicatePessimisticFixpoint();
611     ReachedKnownParallelRegions.indicatePessimisticFixpoint();
612     ReachedUnknownParallelRegions.indicatePessimisticFixpoint();
613     return ChangeStatus::CHANGED;
614   }
615 
616   /// See AbstractState::indicateOptimisticFixpoint(...)
617   ChangeStatus indicateOptimisticFixpoint() override {
618     IsAtFixpoint = true;
619     ReachingKernelEntries.indicateOptimisticFixpoint();
620     SPMDCompatibilityTracker.indicateOptimisticFixpoint();
621     ReachedKnownParallelRegions.indicateOptimisticFixpoint();
622     ReachedUnknownParallelRegions.indicateOptimisticFixpoint();
623     return ChangeStatus::UNCHANGED;
624   }
625 
626   /// Return the assumed state
627   KernelInfoState &getAssumed() { return *this; }
628   const KernelInfoState &getAssumed() const { return *this; }
629 
630   bool operator==(const KernelInfoState &RHS) const {
631     if (SPMDCompatibilityTracker != RHS.SPMDCompatibilityTracker)
632       return false;
633     if (ReachedKnownParallelRegions != RHS.ReachedKnownParallelRegions)
634       return false;
635     if (ReachedUnknownParallelRegions != RHS.ReachedUnknownParallelRegions)
636       return false;
637     if (ReachingKernelEntries != RHS.ReachingKernelEntries)
638       return false;
639     return true;
640   }
641 
642   /// Returns true if this kernel contains any OpenMP parallel regions.
643   bool mayContainParallelRegion() {
644     return !ReachedKnownParallelRegions.empty() ||
645            !ReachedUnknownParallelRegions.empty();
646   }
647 
648   /// Return empty set as the best state of potential values.
649   static KernelInfoState getBestState() { return KernelInfoState(true); }
650 
651   static KernelInfoState getBestState(KernelInfoState &KIS) {
652     return getBestState();
653   }
654 
655   /// Return full set as the worst state of potential values.
656   static KernelInfoState getWorstState() { return KernelInfoState(false); }
657 
658   /// "Clamp" this state with \p KIS.
659   KernelInfoState operator^=(const KernelInfoState &KIS) {
660     // Do not merge two different _init and _deinit call sites.
661     if (KIS.KernelInitCB) {
662       if (KernelInitCB && KernelInitCB != KIS.KernelInitCB)
663         llvm_unreachable("Kernel that calls another kernel violates OpenMP-Opt "
664                          "assumptions.");
665       KernelInitCB = KIS.KernelInitCB;
666     }
667     if (KIS.KernelDeinitCB) {
668       if (KernelDeinitCB && KernelDeinitCB != KIS.KernelDeinitCB)
669         llvm_unreachable("Kernel that calls another kernel violates OpenMP-Opt "
670                          "assumptions.");
671       KernelDeinitCB = KIS.KernelDeinitCB;
672     }
673     SPMDCompatibilityTracker ^= KIS.SPMDCompatibilityTracker;
674     ReachedKnownParallelRegions ^= KIS.ReachedKnownParallelRegions;
675     ReachedUnknownParallelRegions ^= KIS.ReachedUnknownParallelRegions;
676     return *this;
677   }
678 
679   KernelInfoState operator&=(const KernelInfoState &KIS) {
680     return (*this ^= KIS);
681   }
682 
683   ///}
684 };
685 
686 /// Used to map the values physically (in the IR) stored in an offload
687 /// array, to a vector in memory.
688 struct OffloadArray {
689   /// Physical array (in the IR).
690   AllocaInst *Array = nullptr;
691   /// Mapped values.
692   SmallVector<Value *, 8> StoredValues;
693   /// Last stores made in the offload array.
694   SmallVector<StoreInst *, 8> LastAccesses;
695 
696   OffloadArray() = default;
697 
698   /// Initializes the OffloadArray with the values stored in \p Array before
699   /// instruction \p Before is reached. Returns false if the initialization
700   /// fails.
701   /// This MUST be used immediately after the construction of the object.
702   bool initialize(AllocaInst &Array, Instruction &Before) {
703     if (!Array.getAllocatedType()->isArrayTy())
704       return false;
705 
706     if (!getValues(Array, Before))
707       return false;
708 
709     this->Array = &Array;
710     return true;
711   }
712 
713   static const unsigned DeviceIDArgNum = 1;
714   static const unsigned BasePtrsArgNum = 3;
715   static const unsigned PtrsArgNum = 4;
716   static const unsigned SizesArgNum = 5;
717 
718 private:
719   /// Traverses the BasicBlock where \p Array is, collecting the stores made to
720   /// \p Array, leaving StoredValues with the values stored before the
721   /// instruction \p Before is reached.
722   bool getValues(AllocaInst &Array, Instruction &Before) {
723     // Initialize container.
724     const uint64_t NumValues = Array.getAllocatedType()->getArrayNumElements();
725     StoredValues.assign(NumValues, nullptr);
726     LastAccesses.assign(NumValues, nullptr);
727 
728     // TODO: This assumes the instruction \p Before is in the same
729     //  BasicBlock as Array. Make it general, for any control flow graph.
730     BasicBlock *BB = Array.getParent();
731     if (BB != Before.getParent())
732       return false;
733 
734     const DataLayout &DL = Array.getModule()->getDataLayout();
735     const unsigned int PointerSize = DL.getPointerSize();
736 
737     for (Instruction &I : *BB) {
738       if (&I == &Before)
739         break;
740 
741       if (!isa<StoreInst>(&I))
742         continue;
743 
744       auto *S = cast<StoreInst>(&I);
745       int64_t Offset = -1;
746       auto *Dst =
747           GetPointerBaseWithConstantOffset(S->getPointerOperand(), Offset, DL);
748       if (Dst == &Array) {
749         int64_t Idx = Offset / PointerSize;
750         StoredValues[Idx] = getUnderlyingObject(S->getValueOperand());
751         LastAccesses[Idx] = S;
752       }
753     }
754 
755     return isFilled();
756   }
757 
758   /// Returns true if all values in StoredValues and
759   /// LastAccesses are not nullptrs.
760   bool isFilled() {
761     const unsigned NumValues = StoredValues.size();
762     for (unsigned I = 0; I < NumValues; ++I) {
763       if (!StoredValues[I] || !LastAccesses[I])
764         return false;
765     }
766 
767     return true;
768   }
769 };
770 
771 struct OpenMPOpt {
772 
773   using OptimizationRemarkGetter =
774       function_ref<OptimizationRemarkEmitter &(Function *)>;
775 
776   OpenMPOpt(SmallVectorImpl<Function *> &SCC, CallGraphUpdater &CGUpdater,
777             OptimizationRemarkGetter OREGetter,
778             OMPInformationCache &OMPInfoCache, Attributor &A)
779       : M(*(*SCC.begin())->getParent()), SCC(SCC), CGUpdater(CGUpdater),
780         OREGetter(OREGetter), OMPInfoCache(OMPInfoCache), A(A) {}
781 
782   /// Check if any remarks are enabled for openmp-opt
783   bool remarksEnabled() {
784     auto &Ctx = M.getContext();
785     return Ctx.getDiagHandlerPtr()->isAnyRemarkEnabled(DEBUG_TYPE);
786   }
787 
788   /// Run all OpenMP optimizations on the underlying SCC/ModuleSlice.
789   bool run(bool IsModulePass) {
790     if (SCC.empty())
791       return false;
792 
793     bool Changed = false;
794 
795     LLVM_DEBUG(dbgs() << TAG << "Run on SCC with " << SCC.size()
796                       << " functions in a slice with "
797                       << OMPInfoCache.ModuleSlice.size() << " functions\n");
798 
799     if (IsModulePass) {
800       Changed |= runAttributor(IsModulePass);
801 
802       // Recollect uses, in case Attributor deleted any.
803       OMPInfoCache.recollectUses();
804 
805       // TODO: This should be folded into buildCustomStateMachine.
806       Changed |= rewriteDeviceCodeStateMachine();
807 
808       if (remarksEnabled())
809         analysisGlobalization();
810 
811       Changed |= eliminateBarriers();
812     } else {
813       if (PrintICVValues)
814         printICVs();
815       if (PrintOpenMPKernels)
816         printKernels();
817 
818       Changed |= runAttributor(IsModulePass);
819 
820       // Recollect uses, in case Attributor deleted any.
821       OMPInfoCache.recollectUses();
822 
823       Changed |= deleteParallelRegions();
824 
825       if (HideMemoryTransferLatency)
826         Changed |= hideMemTransfersLatency();
827       Changed |= deduplicateRuntimeCalls();
828       if (EnableParallelRegionMerging) {
829         if (mergeParallelRegions()) {
830           deduplicateRuntimeCalls();
831           Changed = true;
832         }
833       }
834 
835       Changed |= eliminateBarriers();
836     }
837 
838     return Changed;
839   }
840 
841   /// Print initial ICV values for testing.
842   /// FIXME: This should be done from the Attributor once it is added.
843   void printICVs() const {
844     InternalControlVar ICVs[] = {ICV_nthreads, ICV_active_levels, ICV_cancel,
845                                  ICV_proc_bind};
846 
847     for (Function *F : OMPInfoCache.ModuleSlice) {
848       for (auto ICV : ICVs) {
849         auto ICVInfo = OMPInfoCache.ICVs[ICV];
850         auto Remark = [&](OptimizationRemarkAnalysis ORA) {
851           return ORA << "OpenMP ICV " << ore::NV("OpenMPICV", ICVInfo.Name)
852                      << " Value: "
853                      << (ICVInfo.InitValue
854                              ? toString(ICVInfo.InitValue->getValue(), 10, true)
855                              : "IMPLEMENTATION_DEFINED");
856         };
857 
858         emitRemark<OptimizationRemarkAnalysis>(F, "OpenMPICVTracker", Remark);
859       }
860     }
861   }
862 
863   /// Print OpenMP GPU kernels for testing.
864   void printKernels() const {
865     for (Function *F : SCC) {
866       if (!OMPInfoCache.Kernels.count(F))
867         continue;
868 
869       auto Remark = [&](OptimizationRemarkAnalysis ORA) {
870         return ORA << "OpenMP GPU kernel "
871                    << ore::NV("OpenMPGPUKernel", F->getName()) << "\n";
872       };
873 
874       emitRemark<OptimizationRemarkAnalysis>(F, "OpenMPGPU", Remark);
875     }
876   }
877 
878   /// Return the call if \p U is a callee use in a regular call. If \p RFI is
879   /// given it has to be the callee or a nullptr is returned.
880   static CallInst *getCallIfRegularCall(
881       Use &U, OMPInformationCache::RuntimeFunctionInfo *RFI = nullptr) {
882     CallInst *CI = dyn_cast<CallInst>(U.getUser());
883     if (CI && CI->isCallee(&U) && !CI->hasOperandBundles() &&
884         (!RFI ||
885          (RFI->Declaration && CI->getCalledFunction() == RFI->Declaration)))
886       return CI;
887     return nullptr;
888   }
889 
890   /// Return the call if \p V is a regular call. If \p RFI is given it has to be
891   /// the callee or a nullptr is returned.
892   static CallInst *getCallIfRegularCall(
893       Value &V, OMPInformationCache::RuntimeFunctionInfo *RFI = nullptr) {
894     CallInst *CI = dyn_cast<CallInst>(&V);
895     if (CI && !CI->hasOperandBundles() &&
896         (!RFI ||
897          (RFI->Declaration && CI->getCalledFunction() == RFI->Declaration)))
898       return CI;
899     return nullptr;
900   }
901 
902 private:
903   /// Merge parallel regions when it is safe.
904   bool mergeParallelRegions() {
905     const unsigned CallbackCalleeOperand = 2;
906     const unsigned CallbackFirstArgOperand = 3;
907     using InsertPointTy = OpenMPIRBuilder::InsertPointTy;
908 
909     // Check if there are any __kmpc_fork_call calls to merge.
910     OMPInformationCache::RuntimeFunctionInfo &RFI =
911         OMPInfoCache.RFIs[OMPRTL___kmpc_fork_call];
912 
913     if (!RFI.Declaration)
914       return false;
915 
916     // Unmergable calls that prevent merging a parallel region.
917     OMPInformationCache::RuntimeFunctionInfo UnmergableCallsInfo[] = {
918         OMPInfoCache.RFIs[OMPRTL___kmpc_push_proc_bind],
919         OMPInfoCache.RFIs[OMPRTL___kmpc_push_num_threads],
920     };
921 
922     bool Changed = false;
923     LoopInfo *LI = nullptr;
924     DominatorTree *DT = nullptr;
925 
926     SmallDenseMap<BasicBlock *, SmallPtrSet<Instruction *, 4>> BB2PRMap;
927 
928     BasicBlock *StartBB = nullptr, *EndBB = nullptr;
929     auto BodyGenCB = [&](InsertPointTy AllocaIP, InsertPointTy CodeGenIP,
930                          BasicBlock &ContinuationIP) {
931       BasicBlock *CGStartBB = CodeGenIP.getBlock();
932       BasicBlock *CGEndBB =
933           SplitBlock(CGStartBB, &*CodeGenIP.getPoint(), DT, LI);
934       assert(StartBB != nullptr && "StartBB should not be null");
935       CGStartBB->getTerminator()->setSuccessor(0, StartBB);
936       assert(EndBB != nullptr && "EndBB should not be null");
937       EndBB->getTerminator()->setSuccessor(0, CGEndBB);
938     };
939 
940     auto PrivCB = [&](InsertPointTy AllocaIP, InsertPointTy CodeGenIP, Value &,
941                       Value &Inner, Value *&ReplacementValue) -> InsertPointTy {
942       ReplacementValue = &Inner;
943       return CodeGenIP;
944     };
945 
946     auto FiniCB = [&](InsertPointTy CodeGenIP) {};
947 
948     /// Create a sequential execution region within a merged parallel region,
949     /// encapsulated in a master construct with a barrier for synchronization.
950     auto CreateSequentialRegion = [&](Function *OuterFn,
951                                       BasicBlock *OuterPredBB,
952                                       Instruction *SeqStartI,
953                                       Instruction *SeqEndI) {
954       // Isolate the instructions of the sequential region to a separate
955       // block.
956       BasicBlock *ParentBB = SeqStartI->getParent();
957       BasicBlock *SeqEndBB =
958           SplitBlock(ParentBB, SeqEndI->getNextNode(), DT, LI);
959       BasicBlock *SeqAfterBB =
960           SplitBlock(SeqEndBB, &*SeqEndBB->getFirstInsertionPt(), DT, LI);
961       BasicBlock *SeqStartBB =
962           SplitBlock(ParentBB, SeqStartI, DT, LI, nullptr, "seq.par.merged");
963 
964       assert(ParentBB->getUniqueSuccessor() == SeqStartBB &&
965              "Expected a different CFG");
966       const DebugLoc DL = ParentBB->getTerminator()->getDebugLoc();
967       ParentBB->getTerminator()->eraseFromParent();
968 
969       auto BodyGenCB = [&](InsertPointTy AllocaIP, InsertPointTy CodeGenIP,
970                            BasicBlock &ContinuationIP) {
971         BasicBlock *CGStartBB = CodeGenIP.getBlock();
972         BasicBlock *CGEndBB =
973             SplitBlock(CGStartBB, &*CodeGenIP.getPoint(), DT, LI);
974         assert(SeqStartBB != nullptr && "SeqStartBB should not be null");
975         CGStartBB->getTerminator()->setSuccessor(0, SeqStartBB);
976         assert(SeqEndBB != nullptr && "SeqEndBB should not be null");
977         SeqEndBB->getTerminator()->setSuccessor(0, CGEndBB);
978       };
979       auto FiniCB = [&](InsertPointTy CodeGenIP) {};
980 
981       // Find outputs from the sequential region to outside users and
982       // broadcast their values to them.
983       for (Instruction &I : *SeqStartBB) {
984         SmallPtrSet<Instruction *, 4> OutsideUsers;
985         for (User *Usr : I.users()) {
986           Instruction &UsrI = *cast<Instruction>(Usr);
987           // Ignore outputs to LT intrinsics, code extraction for the merged
988           // parallel region will fix them.
989           if (UsrI.isLifetimeStartOrEnd())
990             continue;
991 
992           if (UsrI.getParent() != SeqStartBB)
993             OutsideUsers.insert(&UsrI);
994         }
995 
996         if (OutsideUsers.empty())
997           continue;
998 
999         // Emit an alloca in the outer region to store the broadcasted
1000         // value.
1001         const DataLayout &DL = M.getDataLayout();
1002         AllocaInst *AllocaI = new AllocaInst(
1003             I.getType(), DL.getAllocaAddrSpace(), nullptr,
1004             I.getName() + ".seq.output.alloc", &OuterFn->front().front());
1005 
1006         // Emit a store instruction in the sequential BB to update the
1007         // value.
1008         new StoreInst(&I, AllocaI, SeqStartBB->getTerminator());
1009 
1010         // Emit a load instruction and replace the use of the output value
1011         // with it.
1012         for (Instruction *UsrI : OutsideUsers) {
1013           LoadInst *LoadI = new LoadInst(
1014               I.getType(), AllocaI, I.getName() + ".seq.output.load", UsrI);
1015           UsrI->replaceUsesOfWith(&I, LoadI);
1016         }
1017       }
1018 
1019       OpenMPIRBuilder::LocationDescription Loc(
1020           InsertPointTy(ParentBB, ParentBB->end()), DL);
1021       InsertPointTy SeqAfterIP =
1022           OMPInfoCache.OMPBuilder.createMaster(Loc, BodyGenCB, FiniCB);
1023 
1024       OMPInfoCache.OMPBuilder.createBarrier(SeqAfterIP, OMPD_parallel);
1025 
1026       BranchInst::Create(SeqAfterBB, SeqAfterIP.getBlock());
1027 
1028       LLVM_DEBUG(dbgs() << TAG << "After sequential inlining " << *OuterFn
1029                         << "\n");
1030     };
1031 
1032     // Helper to merge the __kmpc_fork_call calls in MergableCIs. They are all
1033     // contained in BB and only separated by instructions that can be
1034     // redundantly executed in parallel. The block BB is split before the first
1035     // call (in MergableCIs) and after the last so the entire region we merge
1036     // into a single parallel region is contained in a single basic block
1037     // without any other instructions. We use the OpenMPIRBuilder to outline
1038     // that block and call the resulting function via __kmpc_fork_call.
1039     auto Merge = [&](const SmallVectorImpl<CallInst *> &MergableCIs,
1040                      BasicBlock *BB) {
1041       // TODO: Change the interface to allow single CIs expanded, e.g, to
1042       // include an outer loop.
1043       assert(MergableCIs.size() > 1 && "Assumed multiple mergable CIs");
1044 
1045       auto Remark = [&](OptimizationRemark OR) {
1046         OR << "Parallel region merged with parallel region"
1047            << (MergableCIs.size() > 2 ? "s" : "") << " at ";
1048         for (auto *CI : llvm::drop_begin(MergableCIs)) {
1049           OR << ore::NV("OpenMPParallelMerge", CI->getDebugLoc());
1050           if (CI != MergableCIs.back())
1051             OR << ", ";
1052         }
1053         return OR << ".";
1054       };
1055 
1056       emitRemark<OptimizationRemark>(MergableCIs.front(), "OMP150", Remark);
1057 
1058       Function *OriginalFn = BB->getParent();
1059       LLVM_DEBUG(dbgs() << TAG << "Merge " << MergableCIs.size()
1060                         << " parallel regions in " << OriginalFn->getName()
1061                         << "\n");
1062 
1063       // Isolate the calls to merge in a separate block.
1064       EndBB = SplitBlock(BB, MergableCIs.back()->getNextNode(), DT, LI);
1065       BasicBlock *AfterBB =
1066           SplitBlock(EndBB, &*EndBB->getFirstInsertionPt(), DT, LI);
1067       StartBB = SplitBlock(BB, MergableCIs.front(), DT, LI, nullptr,
1068                            "omp.par.merged");
1069 
1070       assert(BB->getUniqueSuccessor() == StartBB && "Expected a different CFG");
1071       const DebugLoc DL = BB->getTerminator()->getDebugLoc();
1072       BB->getTerminator()->eraseFromParent();
1073 
1074       // Create sequential regions for sequential instructions that are
1075       // in-between mergable parallel regions.
1076       for (auto *It = MergableCIs.begin(), *End = MergableCIs.end() - 1;
1077            It != End; ++It) {
1078         Instruction *ForkCI = *It;
1079         Instruction *NextForkCI = *(It + 1);
1080 
1081         // Continue if there are not in-between instructions.
1082         if (ForkCI->getNextNode() == NextForkCI)
1083           continue;
1084 
1085         CreateSequentialRegion(OriginalFn, BB, ForkCI->getNextNode(),
1086                                NextForkCI->getPrevNode());
1087       }
1088 
1089       OpenMPIRBuilder::LocationDescription Loc(InsertPointTy(BB, BB->end()),
1090                                                DL);
1091       IRBuilder<>::InsertPoint AllocaIP(
1092           &OriginalFn->getEntryBlock(),
1093           OriginalFn->getEntryBlock().getFirstInsertionPt());
1094       // Create the merged parallel region with default proc binding, to
1095       // avoid overriding binding settings, and without explicit cancellation.
1096       InsertPointTy AfterIP = OMPInfoCache.OMPBuilder.createParallel(
1097           Loc, AllocaIP, BodyGenCB, PrivCB, FiniCB, nullptr, nullptr,
1098           OMP_PROC_BIND_default, /* IsCancellable */ false);
1099       BranchInst::Create(AfterBB, AfterIP.getBlock());
1100 
1101       // Perform the actual outlining.
1102       OMPInfoCache.OMPBuilder.finalize(OriginalFn);
1103 
1104       Function *OutlinedFn = MergableCIs.front()->getCaller();
1105 
1106       // Replace the __kmpc_fork_call calls with direct calls to the outlined
1107       // callbacks.
1108       SmallVector<Value *, 8> Args;
1109       for (auto *CI : MergableCIs) {
1110         Value *Callee =
1111             CI->getArgOperand(CallbackCalleeOperand)->stripPointerCasts();
1112         FunctionType *FT =
1113             cast<FunctionType>(Callee->getType()->getPointerElementType());
1114         Args.clear();
1115         Args.push_back(OutlinedFn->getArg(0));
1116         Args.push_back(OutlinedFn->getArg(1));
1117         for (unsigned U = CallbackFirstArgOperand, E = CI->arg_size(); U < E;
1118              ++U)
1119           Args.push_back(CI->getArgOperand(U));
1120 
1121         CallInst *NewCI = CallInst::Create(FT, Callee, Args, "", CI);
1122         if (CI->getDebugLoc())
1123           NewCI->setDebugLoc(CI->getDebugLoc());
1124 
1125         // Forward parameter attributes from the callback to the callee.
1126         for (unsigned U = CallbackFirstArgOperand, E = CI->arg_size(); U < E;
1127              ++U)
1128           for (const Attribute &A : CI->getAttributes().getParamAttrs(U))
1129             NewCI->addParamAttr(
1130                 U - (CallbackFirstArgOperand - CallbackCalleeOperand), A);
1131 
1132         // Emit an explicit barrier to replace the implicit fork-join barrier.
1133         if (CI != MergableCIs.back()) {
1134           // TODO: Remove barrier if the merged parallel region includes the
1135           // 'nowait' clause.
1136           OMPInfoCache.OMPBuilder.createBarrier(
1137               InsertPointTy(NewCI->getParent(),
1138                             NewCI->getNextNode()->getIterator()),
1139               OMPD_parallel);
1140         }
1141 
1142         CI->eraseFromParent();
1143       }
1144 
1145       assert(OutlinedFn != OriginalFn && "Outlining failed");
1146       CGUpdater.registerOutlinedFunction(*OriginalFn, *OutlinedFn);
1147       CGUpdater.reanalyzeFunction(*OriginalFn);
1148 
1149       NumOpenMPParallelRegionsMerged += MergableCIs.size();
1150 
1151       return true;
1152     };
1153 
1154     // Helper function that identifes sequences of
1155     // __kmpc_fork_call uses in a basic block.
1156     auto DetectPRsCB = [&](Use &U, Function &F) {
1157       CallInst *CI = getCallIfRegularCall(U, &RFI);
1158       BB2PRMap[CI->getParent()].insert(CI);
1159 
1160       return false;
1161     };
1162 
1163     BB2PRMap.clear();
1164     RFI.foreachUse(SCC, DetectPRsCB);
1165     SmallVector<SmallVector<CallInst *, 4>, 4> MergableCIsVector;
1166     // Find mergable parallel regions within a basic block that are
1167     // safe to merge, that is any in-between instructions can safely
1168     // execute in parallel after merging.
1169     // TODO: support merging across basic-blocks.
1170     for (auto &It : BB2PRMap) {
1171       auto &CIs = It.getSecond();
1172       if (CIs.size() < 2)
1173         continue;
1174 
1175       BasicBlock *BB = It.getFirst();
1176       SmallVector<CallInst *, 4> MergableCIs;
1177 
1178       /// Returns true if the instruction is mergable, false otherwise.
1179       /// A terminator instruction is unmergable by definition since merging
1180       /// works within a BB. Instructions before the mergable region are
1181       /// mergable if they are not calls to OpenMP runtime functions that may
1182       /// set different execution parameters for subsequent parallel regions.
1183       /// Instructions in-between parallel regions are mergable if they are not
1184       /// calls to any non-intrinsic function since that may call a non-mergable
1185       /// OpenMP runtime function.
1186       auto IsMergable = [&](Instruction &I, bool IsBeforeMergableRegion) {
1187         // We do not merge across BBs, hence return false (unmergable) if the
1188         // instruction is a terminator.
1189         if (I.isTerminator())
1190           return false;
1191 
1192         if (!isa<CallInst>(&I))
1193           return true;
1194 
1195         CallInst *CI = cast<CallInst>(&I);
1196         if (IsBeforeMergableRegion) {
1197           Function *CalledFunction = CI->getCalledFunction();
1198           if (!CalledFunction)
1199             return false;
1200           // Return false (unmergable) if the call before the parallel
1201           // region calls an explicit affinity (proc_bind) or number of
1202           // threads (num_threads) compiler-generated function. Those settings
1203           // may be incompatible with following parallel regions.
1204           // TODO: ICV tracking to detect compatibility.
1205           for (const auto &RFI : UnmergableCallsInfo) {
1206             if (CalledFunction == RFI.Declaration)
1207               return false;
1208           }
1209         } else {
1210           // Return false (unmergable) if there is a call instruction
1211           // in-between parallel regions when it is not an intrinsic. It
1212           // may call an unmergable OpenMP runtime function in its callpath.
1213           // TODO: Keep track of possible OpenMP calls in the callpath.
1214           if (!isa<IntrinsicInst>(CI))
1215             return false;
1216         }
1217 
1218         return true;
1219       };
1220       // Find maximal number of parallel region CIs that are safe to merge.
1221       for (auto It = BB->begin(), End = BB->end(); It != End;) {
1222         Instruction &I = *It;
1223         ++It;
1224 
1225         if (CIs.count(&I)) {
1226           MergableCIs.push_back(cast<CallInst>(&I));
1227           continue;
1228         }
1229 
1230         // Continue expanding if the instruction is mergable.
1231         if (IsMergable(I, MergableCIs.empty()))
1232           continue;
1233 
1234         // Forward the instruction iterator to skip the next parallel region
1235         // since there is an unmergable instruction which can affect it.
1236         for (; It != End; ++It) {
1237           Instruction &SkipI = *It;
1238           if (CIs.count(&SkipI)) {
1239             LLVM_DEBUG(dbgs() << TAG << "Skip parallel region " << SkipI
1240                               << " due to " << I << "\n");
1241             ++It;
1242             break;
1243           }
1244         }
1245 
1246         // Store mergable regions found.
1247         if (MergableCIs.size() > 1) {
1248           MergableCIsVector.push_back(MergableCIs);
1249           LLVM_DEBUG(dbgs() << TAG << "Found " << MergableCIs.size()
1250                             << " parallel regions in block " << BB->getName()
1251                             << " of function " << BB->getParent()->getName()
1252                             << "\n";);
1253         }
1254 
1255         MergableCIs.clear();
1256       }
1257 
1258       if (!MergableCIsVector.empty()) {
1259         Changed = true;
1260 
1261         for (auto &MergableCIs : MergableCIsVector)
1262           Merge(MergableCIs, BB);
1263         MergableCIsVector.clear();
1264       }
1265     }
1266 
1267     if (Changed) {
1268       /// Re-collect use for fork calls, emitted barrier calls, and
1269       /// any emitted master/end_master calls.
1270       OMPInfoCache.recollectUsesForFunction(OMPRTL___kmpc_fork_call);
1271       OMPInfoCache.recollectUsesForFunction(OMPRTL___kmpc_barrier);
1272       OMPInfoCache.recollectUsesForFunction(OMPRTL___kmpc_master);
1273       OMPInfoCache.recollectUsesForFunction(OMPRTL___kmpc_end_master);
1274     }
1275 
1276     return Changed;
1277   }
1278 
1279   /// Try to delete parallel regions if possible.
1280   bool deleteParallelRegions() {
1281     const unsigned CallbackCalleeOperand = 2;
1282 
1283     OMPInformationCache::RuntimeFunctionInfo &RFI =
1284         OMPInfoCache.RFIs[OMPRTL___kmpc_fork_call];
1285 
1286     if (!RFI.Declaration)
1287       return false;
1288 
1289     bool Changed = false;
1290     auto DeleteCallCB = [&](Use &U, Function &) {
1291       CallInst *CI = getCallIfRegularCall(U);
1292       if (!CI)
1293         return false;
1294       auto *Fn = dyn_cast<Function>(
1295           CI->getArgOperand(CallbackCalleeOperand)->stripPointerCasts());
1296       if (!Fn)
1297         return false;
1298       if (!Fn->onlyReadsMemory())
1299         return false;
1300       if (!Fn->hasFnAttribute(Attribute::WillReturn))
1301         return false;
1302 
1303       LLVM_DEBUG(dbgs() << TAG << "Delete read-only parallel region in "
1304                         << CI->getCaller()->getName() << "\n");
1305 
1306       auto Remark = [&](OptimizationRemark OR) {
1307         return OR << "Removing parallel region with no side-effects.";
1308       };
1309       emitRemark<OptimizationRemark>(CI, "OMP160", Remark);
1310 
1311       CGUpdater.removeCallSite(*CI);
1312       CI->eraseFromParent();
1313       Changed = true;
1314       ++NumOpenMPParallelRegionsDeleted;
1315       return true;
1316     };
1317 
1318     RFI.foreachUse(SCC, DeleteCallCB);
1319 
1320     return Changed;
1321   }
1322 
1323   /// Try to eliminate runtime calls by reusing existing ones.
1324   bool deduplicateRuntimeCalls() {
1325     bool Changed = false;
1326 
1327     RuntimeFunction DeduplicableRuntimeCallIDs[] = {
1328         OMPRTL_omp_get_num_threads,
1329         OMPRTL_omp_in_parallel,
1330         OMPRTL_omp_get_cancellation,
1331         OMPRTL_omp_get_thread_limit,
1332         OMPRTL_omp_get_supported_active_levels,
1333         OMPRTL_omp_get_level,
1334         OMPRTL_omp_get_ancestor_thread_num,
1335         OMPRTL_omp_get_team_size,
1336         OMPRTL_omp_get_active_level,
1337         OMPRTL_omp_in_final,
1338         OMPRTL_omp_get_proc_bind,
1339         OMPRTL_omp_get_num_places,
1340         OMPRTL_omp_get_num_procs,
1341         OMPRTL_omp_get_place_num,
1342         OMPRTL_omp_get_partition_num_places,
1343         OMPRTL_omp_get_partition_place_nums};
1344 
1345     // Global-tid is handled separately.
1346     SmallSetVector<Value *, 16> GTIdArgs;
1347     collectGlobalThreadIdArguments(GTIdArgs);
1348     LLVM_DEBUG(dbgs() << TAG << "Found " << GTIdArgs.size()
1349                       << " global thread ID arguments\n");
1350 
1351     for (Function *F : SCC) {
1352       for (auto DeduplicableRuntimeCallID : DeduplicableRuntimeCallIDs)
1353         Changed |= deduplicateRuntimeCalls(
1354             *F, OMPInfoCache.RFIs[DeduplicableRuntimeCallID]);
1355 
1356       // __kmpc_global_thread_num is special as we can replace it with an
1357       // argument in enough cases to make it worth trying.
1358       Value *GTIdArg = nullptr;
1359       for (Argument &Arg : F->args())
1360         if (GTIdArgs.count(&Arg)) {
1361           GTIdArg = &Arg;
1362           break;
1363         }
1364       Changed |= deduplicateRuntimeCalls(
1365           *F, OMPInfoCache.RFIs[OMPRTL___kmpc_global_thread_num], GTIdArg);
1366     }
1367 
1368     return Changed;
1369   }
1370 
1371   /// Tries to hide the latency of runtime calls that involve host to
1372   /// device memory transfers by splitting them into their "issue" and "wait"
1373   /// versions. The "issue" is moved upwards as much as possible. The "wait" is
1374   /// moved downards as much as possible. The "issue" issues the memory transfer
1375   /// asynchronously, returning a handle. The "wait" waits in the returned
1376   /// handle for the memory transfer to finish.
1377   bool hideMemTransfersLatency() {
1378     auto &RFI = OMPInfoCache.RFIs[OMPRTL___tgt_target_data_begin_mapper];
1379     bool Changed = false;
1380     auto SplitMemTransfers = [&](Use &U, Function &Decl) {
1381       auto *RTCall = getCallIfRegularCall(U, &RFI);
1382       if (!RTCall)
1383         return false;
1384 
1385       OffloadArray OffloadArrays[3];
1386       if (!getValuesInOffloadArrays(*RTCall, OffloadArrays))
1387         return false;
1388 
1389       LLVM_DEBUG(dumpValuesInOffloadArrays(OffloadArrays));
1390 
1391       // TODO: Check if can be moved upwards.
1392       bool WasSplit = false;
1393       Instruction *WaitMovementPoint = canBeMovedDownwards(*RTCall);
1394       if (WaitMovementPoint)
1395         WasSplit = splitTargetDataBeginRTC(*RTCall, *WaitMovementPoint);
1396 
1397       Changed |= WasSplit;
1398       return WasSplit;
1399     };
1400     RFI.foreachUse(SCC, SplitMemTransfers);
1401 
1402     return Changed;
1403   }
1404 
1405   /// Eliminates redundant, aligned barriers in OpenMP offloaded kernels.
1406   /// TODO: Make this an AA and expand it to work across blocks and functions.
1407   bool eliminateBarriers() {
1408     bool Changed = false;
1409 
1410     if (DisableOpenMPOptBarrierElimination)
1411       return /*Changed=*/false;
1412 
1413     if (OMPInfoCache.Kernels.empty())
1414       return /*Changed=*/false;
1415 
1416     enum ImplicitBarrierType { IBT_ENTRY, IBT_EXIT };
1417 
1418     class BarrierInfo {
1419       Instruction *I;
1420       enum ImplicitBarrierType Type;
1421 
1422     public:
1423       BarrierInfo(enum ImplicitBarrierType Type) : I(nullptr), Type(Type) {}
1424       BarrierInfo(Instruction &I) : I(&I) {}
1425 
1426       bool isImplicit() { return !I; }
1427 
1428       bool isImplicitEntry() { return isImplicit() && Type == IBT_ENTRY; }
1429 
1430       bool isImplicitExit() { return isImplicit() && Type == IBT_EXIT; }
1431 
1432       Instruction *getInstruction() { return I; }
1433     };
1434 
1435     for (Function *Kernel : OMPInfoCache.Kernels) {
1436       for (BasicBlock &BB : *Kernel) {
1437         SmallVector<BarrierInfo, 8> BarriersInBlock;
1438         SmallPtrSet<Instruction *, 8> BarriersToBeDeleted;
1439 
1440         // Add the kernel entry implicit barrier.
1441         if (&Kernel->getEntryBlock() == &BB)
1442           BarriersInBlock.push_back(IBT_ENTRY);
1443 
1444         // Find implicit and explicit aligned barriers in the same basic block.
1445         for (Instruction &I : BB) {
1446           if (isa<ReturnInst>(I)) {
1447             // Add the implicit barrier when exiting the kernel.
1448             BarriersInBlock.push_back(IBT_EXIT);
1449             continue;
1450           }
1451           CallBase *CB = dyn_cast<CallBase>(&I);
1452           if (!CB)
1453             continue;
1454 
1455           auto IsAlignBarrierCB = [&](CallBase &CB) {
1456             switch (CB.getIntrinsicID()) {
1457             case Intrinsic::nvvm_barrier0:
1458             case Intrinsic::nvvm_barrier0_and:
1459             case Intrinsic::nvvm_barrier0_or:
1460             case Intrinsic::nvvm_barrier0_popc:
1461               return true;
1462             default:
1463               break;
1464             }
1465             return hasAssumption(CB,
1466                                  KnownAssumptionString("ompx_aligned_barrier"));
1467           };
1468 
1469           if (IsAlignBarrierCB(*CB)) {
1470             // Add an explicit aligned barrier.
1471             BarriersInBlock.push_back(I);
1472           }
1473         }
1474 
1475         if (BarriersInBlock.size() <= 1)
1476           continue;
1477 
1478         // A barrier in a barrier pair is removeable if all instructions
1479         // between the barriers in the pair are side-effect free modulo the
1480         // barrier operation.
1481         auto IsBarrierRemoveable = [&Kernel](BarrierInfo *StartBI,
1482                                              BarrierInfo *EndBI) {
1483           assert(
1484               !StartBI->isImplicitExit() &&
1485               "Expected start barrier to be other than a kernel exit barrier");
1486           assert(
1487               !EndBI->isImplicitEntry() &&
1488               "Expected end barrier to be other than a kernel entry barrier");
1489           // If StarBI instructions is null then this the implicit
1490           // kernel entry barrier, so iterate from the first instruction in the
1491           // entry block.
1492           Instruction *I = (StartBI->isImplicitEntry())
1493                                ? &Kernel->getEntryBlock().front()
1494                                : StartBI->getInstruction()->getNextNode();
1495           assert(I && "Expected non-null start instruction");
1496           Instruction *E = (EndBI->isImplicitExit())
1497                                ? I->getParent()->getTerminator()
1498                                : EndBI->getInstruction();
1499           assert(E && "Expected non-null end instruction");
1500 
1501           for (; I != E; I = I->getNextNode()) {
1502             if (!I->mayHaveSideEffects() && !I->mayReadFromMemory())
1503               continue;
1504 
1505             auto IsPotentiallyAffectedByBarrier =
1506                 [](Optional<MemoryLocation> Loc) {
1507                   const Value *Obj = (Loc && Loc->Ptr)
1508                                          ? getUnderlyingObject(Loc->Ptr)
1509                                          : nullptr;
1510                   if (!Obj) {
1511                     LLVM_DEBUG(
1512                         dbgs()
1513                         << "Access to unknown location requires barriers\n");
1514                     return true;
1515                   }
1516                   if (isa<UndefValue>(Obj))
1517                     return false;
1518                   if (isa<AllocaInst>(Obj))
1519                     return false;
1520                   if (auto *GV = dyn_cast<GlobalVariable>(Obj)) {
1521                     if (GV->isConstant())
1522                       return false;
1523                     if (GV->isThreadLocal())
1524                       return false;
1525                     if (GV->getAddressSpace() == (int)AddressSpace::Local)
1526                       return false;
1527                     if (GV->getAddressSpace() == (int)AddressSpace::Constant)
1528                       return false;
1529                   }
1530                   LLVM_DEBUG(dbgs() << "Access to '" << *Obj
1531                                     << "' requires barriers\n");
1532                   return true;
1533                 };
1534 
1535             if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I)) {
1536               Optional<MemoryLocation> Loc = MemoryLocation::getForDest(MI);
1537               if (IsPotentiallyAffectedByBarrier(Loc))
1538                 return false;
1539               if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(I)) {
1540                 Optional<MemoryLocation> Loc =
1541                     MemoryLocation::getForSource(MTI);
1542                 if (IsPotentiallyAffectedByBarrier(Loc))
1543                   return false;
1544               }
1545               continue;
1546             }
1547 
1548             if (auto *LI = dyn_cast<LoadInst>(I))
1549               if (LI->hasMetadata(LLVMContext::MD_invariant_load))
1550                 continue;
1551 
1552             Optional<MemoryLocation> Loc = MemoryLocation::getOrNone(I);
1553             if (IsPotentiallyAffectedByBarrier(Loc))
1554               return false;
1555           }
1556 
1557           return true;
1558         };
1559 
1560         // Iterate barrier pairs and remove an explicit barrier if analysis
1561         // deems it removeable.
1562         for (auto *It = BarriersInBlock.begin(),
1563                   *End = BarriersInBlock.end() - 1;
1564              It != End; ++It) {
1565 
1566           BarrierInfo *StartBI = It;
1567           BarrierInfo *EndBI = (It + 1);
1568 
1569           // Cannot remove when both are implicit barriers, continue.
1570           if (StartBI->isImplicit() && EndBI->isImplicit())
1571             continue;
1572 
1573           if (!IsBarrierRemoveable(StartBI, EndBI))
1574             continue;
1575 
1576           assert(!(StartBI->isImplicit() && EndBI->isImplicit()) &&
1577                  "Expected at least one explicit barrier to remove.");
1578 
1579           // Remove an explicit barrier, check first, then second.
1580           if (!StartBI->isImplicit()) {
1581             LLVM_DEBUG(dbgs() << "Remove start barrier "
1582                               << *StartBI->getInstruction() << "\n");
1583             BarriersToBeDeleted.insert(StartBI->getInstruction());
1584           } else {
1585             LLVM_DEBUG(dbgs() << "Remove end barrier "
1586                               << *EndBI->getInstruction() << "\n");
1587             BarriersToBeDeleted.insert(EndBI->getInstruction());
1588           }
1589         }
1590 
1591         if (BarriersToBeDeleted.empty())
1592           continue;
1593 
1594         Changed = true;
1595         for (Instruction *I : BarriersToBeDeleted) {
1596           ++NumBarriersEliminated;
1597           auto Remark = [&](OptimizationRemark OR) {
1598             return OR << "Redundant barrier eliminated.";
1599           };
1600 
1601           if (EnableVerboseRemarks)
1602             emitRemark<OptimizationRemark>(I, "OMP190", Remark);
1603           I->eraseFromParent();
1604         }
1605       }
1606     }
1607 
1608     return Changed;
1609   }
1610 
1611   void analysisGlobalization() {
1612     auto &RFI = OMPInfoCache.RFIs[OMPRTL___kmpc_alloc_shared];
1613 
1614     auto CheckGlobalization = [&](Use &U, Function &Decl) {
1615       if (CallInst *CI = getCallIfRegularCall(U, &RFI)) {
1616         auto Remark = [&](OptimizationRemarkMissed ORM) {
1617           return ORM
1618                  << "Found thread data sharing on the GPU. "
1619                  << "Expect degraded performance due to data globalization.";
1620         };
1621         emitRemark<OptimizationRemarkMissed>(CI, "OMP112", Remark);
1622       }
1623 
1624       return false;
1625     };
1626 
1627     RFI.foreachUse(SCC, CheckGlobalization);
1628   }
1629 
1630   /// Maps the values stored in the offload arrays passed as arguments to
1631   /// \p RuntimeCall into the offload arrays in \p OAs.
1632   bool getValuesInOffloadArrays(CallInst &RuntimeCall,
1633                                 MutableArrayRef<OffloadArray> OAs) {
1634     assert(OAs.size() == 3 && "Need space for three offload arrays!");
1635 
1636     // A runtime call that involves memory offloading looks something like:
1637     // call void @__tgt_target_data_begin_mapper(arg0, arg1,
1638     //   i8** %offload_baseptrs, i8** %offload_ptrs, i64* %offload_sizes,
1639     // ...)
1640     // So, the idea is to access the allocas that allocate space for these
1641     // offload arrays, offload_baseptrs, offload_ptrs, offload_sizes.
1642     // Therefore:
1643     // i8** %offload_baseptrs.
1644     Value *BasePtrsArg =
1645         RuntimeCall.getArgOperand(OffloadArray::BasePtrsArgNum);
1646     // i8** %offload_ptrs.
1647     Value *PtrsArg = RuntimeCall.getArgOperand(OffloadArray::PtrsArgNum);
1648     // i8** %offload_sizes.
1649     Value *SizesArg = RuntimeCall.getArgOperand(OffloadArray::SizesArgNum);
1650 
1651     // Get values stored in **offload_baseptrs.
1652     auto *V = getUnderlyingObject(BasePtrsArg);
1653     if (!isa<AllocaInst>(V))
1654       return false;
1655     auto *BasePtrsArray = cast<AllocaInst>(V);
1656     if (!OAs[0].initialize(*BasePtrsArray, RuntimeCall))
1657       return false;
1658 
1659     // Get values stored in **offload_baseptrs.
1660     V = getUnderlyingObject(PtrsArg);
1661     if (!isa<AllocaInst>(V))
1662       return false;
1663     auto *PtrsArray = cast<AllocaInst>(V);
1664     if (!OAs[1].initialize(*PtrsArray, RuntimeCall))
1665       return false;
1666 
1667     // Get values stored in **offload_sizes.
1668     V = getUnderlyingObject(SizesArg);
1669     // If it's a [constant] global array don't analyze it.
1670     if (isa<GlobalValue>(V))
1671       return isa<Constant>(V);
1672     if (!isa<AllocaInst>(V))
1673       return false;
1674 
1675     auto *SizesArray = cast<AllocaInst>(V);
1676     if (!OAs[2].initialize(*SizesArray, RuntimeCall))
1677       return false;
1678 
1679     return true;
1680   }
1681 
1682   /// Prints the values in the OffloadArrays \p OAs using LLVM_DEBUG.
1683   /// For now this is a way to test that the function getValuesInOffloadArrays
1684   /// is working properly.
1685   /// TODO: Move this to a unittest when unittests are available for OpenMPOpt.
1686   void dumpValuesInOffloadArrays(ArrayRef<OffloadArray> OAs) {
1687     assert(OAs.size() == 3 && "There are three offload arrays to debug!");
1688 
1689     LLVM_DEBUG(dbgs() << TAG << " Successfully got offload values:\n");
1690     std::string ValuesStr;
1691     raw_string_ostream Printer(ValuesStr);
1692     std::string Separator = " --- ";
1693 
1694     for (auto *BP : OAs[0].StoredValues) {
1695       BP->print(Printer);
1696       Printer << Separator;
1697     }
1698     LLVM_DEBUG(dbgs() << "\t\toffload_baseptrs: " << Printer.str() << "\n");
1699     ValuesStr.clear();
1700 
1701     for (auto *P : OAs[1].StoredValues) {
1702       P->print(Printer);
1703       Printer << Separator;
1704     }
1705     LLVM_DEBUG(dbgs() << "\t\toffload_ptrs: " << Printer.str() << "\n");
1706     ValuesStr.clear();
1707 
1708     for (auto *S : OAs[2].StoredValues) {
1709       S->print(Printer);
1710       Printer << Separator;
1711     }
1712     LLVM_DEBUG(dbgs() << "\t\toffload_sizes: " << Printer.str() << "\n");
1713   }
1714 
1715   /// Returns the instruction where the "wait" counterpart \p RuntimeCall can be
1716   /// moved. Returns nullptr if the movement is not possible, or not worth it.
1717   Instruction *canBeMovedDownwards(CallInst &RuntimeCall) {
1718     // FIXME: This traverses only the BasicBlock where RuntimeCall is.
1719     //  Make it traverse the CFG.
1720 
1721     Instruction *CurrentI = &RuntimeCall;
1722     bool IsWorthIt = false;
1723     while ((CurrentI = CurrentI->getNextNode())) {
1724 
1725       // TODO: Once we detect the regions to be offloaded we should use the
1726       //  alias analysis manager to check if CurrentI may modify one of
1727       //  the offloaded regions.
1728       if (CurrentI->mayHaveSideEffects() || CurrentI->mayReadFromMemory()) {
1729         if (IsWorthIt)
1730           return CurrentI;
1731 
1732         return nullptr;
1733       }
1734 
1735       // FIXME: For now if we move it over anything without side effect
1736       //  is worth it.
1737       IsWorthIt = true;
1738     }
1739 
1740     // Return end of BasicBlock.
1741     return RuntimeCall.getParent()->getTerminator();
1742   }
1743 
1744   /// Splits \p RuntimeCall into its "issue" and "wait" counterparts.
1745   bool splitTargetDataBeginRTC(CallInst &RuntimeCall,
1746                                Instruction &WaitMovementPoint) {
1747     // Create stack allocated handle (__tgt_async_info) at the beginning of the
1748     // function. Used for storing information of the async transfer, allowing to
1749     // wait on it later.
1750     auto &IRBuilder = OMPInfoCache.OMPBuilder;
1751     auto *F = RuntimeCall.getCaller();
1752     Instruction *FirstInst = &(F->getEntryBlock().front());
1753     AllocaInst *Handle = new AllocaInst(
1754         IRBuilder.AsyncInfo, F->getAddressSpace(), "handle", FirstInst);
1755 
1756     // Add "issue" runtime call declaration:
1757     // declare %struct.tgt_async_info @__tgt_target_data_begin_issue(i64, i32,
1758     //   i8**, i8**, i64*, i64*)
1759     FunctionCallee IssueDecl = IRBuilder.getOrCreateRuntimeFunction(
1760         M, OMPRTL___tgt_target_data_begin_mapper_issue);
1761 
1762     // Change RuntimeCall call site for its asynchronous version.
1763     SmallVector<Value *, 16> Args;
1764     for (auto &Arg : RuntimeCall.args())
1765       Args.push_back(Arg.get());
1766     Args.push_back(Handle);
1767 
1768     CallInst *IssueCallsite =
1769         CallInst::Create(IssueDecl, Args, /*NameStr=*/"", &RuntimeCall);
1770     OMPInfoCache.setCallingConvention(IssueDecl, IssueCallsite);
1771     RuntimeCall.eraseFromParent();
1772 
1773     // Add "wait" runtime call declaration:
1774     // declare void @__tgt_target_data_begin_wait(i64, %struct.__tgt_async_info)
1775     FunctionCallee WaitDecl = IRBuilder.getOrCreateRuntimeFunction(
1776         M, OMPRTL___tgt_target_data_begin_mapper_wait);
1777 
1778     Value *WaitParams[2] = {
1779         IssueCallsite->getArgOperand(
1780             OffloadArray::DeviceIDArgNum), // device_id.
1781         Handle                             // handle to wait on.
1782     };
1783     CallInst *WaitCallsite = CallInst::Create(
1784         WaitDecl, WaitParams, /*NameStr=*/"", &WaitMovementPoint);
1785     OMPInfoCache.setCallingConvention(WaitDecl, WaitCallsite);
1786 
1787     return true;
1788   }
1789 
1790   static Value *combinedIdentStruct(Value *CurrentIdent, Value *NextIdent,
1791                                     bool GlobalOnly, bool &SingleChoice) {
1792     if (CurrentIdent == NextIdent)
1793       return CurrentIdent;
1794 
1795     // TODO: Figure out how to actually combine multiple debug locations. For
1796     //       now we just keep an existing one if there is a single choice.
1797     if (!GlobalOnly || isa<GlobalValue>(NextIdent)) {
1798       SingleChoice = !CurrentIdent;
1799       return NextIdent;
1800     }
1801     return nullptr;
1802   }
1803 
1804   /// Return an `struct ident_t*` value that represents the ones used in the
1805   /// calls of \p RFI inside of \p F. If \p GlobalOnly is true, we will not
1806   /// return a local `struct ident_t*`. For now, if we cannot find a suitable
1807   /// return value we create one from scratch. We also do not yet combine
1808   /// information, e.g., the source locations, see combinedIdentStruct.
1809   Value *
1810   getCombinedIdentFromCallUsesIn(OMPInformationCache::RuntimeFunctionInfo &RFI,
1811                                  Function &F, bool GlobalOnly) {
1812     bool SingleChoice = true;
1813     Value *Ident = nullptr;
1814     auto CombineIdentStruct = [&](Use &U, Function &Caller) {
1815       CallInst *CI = getCallIfRegularCall(U, &RFI);
1816       if (!CI || &F != &Caller)
1817         return false;
1818       Ident = combinedIdentStruct(Ident, CI->getArgOperand(0),
1819                                   /* GlobalOnly */ true, SingleChoice);
1820       return false;
1821     };
1822     RFI.foreachUse(SCC, CombineIdentStruct);
1823 
1824     if (!Ident || !SingleChoice) {
1825       // The IRBuilder uses the insertion block to get to the module, this is
1826       // unfortunate but we work around it for now.
1827       if (!OMPInfoCache.OMPBuilder.getInsertionPoint().getBlock())
1828         OMPInfoCache.OMPBuilder.updateToLocation(OpenMPIRBuilder::InsertPointTy(
1829             &F.getEntryBlock(), F.getEntryBlock().begin()));
1830       // Create a fallback location if non was found.
1831       // TODO: Use the debug locations of the calls instead.
1832       uint32_t SrcLocStrSize;
1833       Constant *Loc =
1834           OMPInfoCache.OMPBuilder.getOrCreateDefaultSrcLocStr(SrcLocStrSize);
1835       Ident = OMPInfoCache.OMPBuilder.getOrCreateIdent(Loc, SrcLocStrSize);
1836     }
1837     return Ident;
1838   }
1839 
1840   /// Try to eliminate calls of \p RFI in \p F by reusing an existing one or
1841   /// \p ReplVal if given.
1842   bool deduplicateRuntimeCalls(Function &F,
1843                                OMPInformationCache::RuntimeFunctionInfo &RFI,
1844                                Value *ReplVal = nullptr) {
1845     auto *UV = RFI.getUseVector(F);
1846     if (!UV || UV->size() + (ReplVal != nullptr) < 2)
1847       return false;
1848 
1849     LLVM_DEBUG(
1850         dbgs() << TAG << "Deduplicate " << UV->size() << " uses of " << RFI.Name
1851                << (ReplVal ? " with an existing value\n" : "\n") << "\n");
1852 
1853     assert((!ReplVal || (isa<Argument>(ReplVal) &&
1854                          cast<Argument>(ReplVal)->getParent() == &F)) &&
1855            "Unexpected replacement value!");
1856 
1857     // TODO: Use dominance to find a good position instead.
1858     auto CanBeMoved = [this](CallBase &CB) {
1859       unsigned NumArgs = CB.arg_size();
1860       if (NumArgs == 0)
1861         return true;
1862       if (CB.getArgOperand(0)->getType() != OMPInfoCache.OMPBuilder.IdentPtr)
1863         return false;
1864       for (unsigned U = 1; U < NumArgs; ++U)
1865         if (isa<Instruction>(CB.getArgOperand(U)))
1866           return false;
1867       return true;
1868     };
1869 
1870     if (!ReplVal) {
1871       for (Use *U : *UV)
1872         if (CallInst *CI = getCallIfRegularCall(*U, &RFI)) {
1873           if (!CanBeMoved(*CI))
1874             continue;
1875 
1876           // If the function is a kernel, dedup will move
1877           // the runtime call right after the kernel init callsite. Otherwise,
1878           // it will move it to the beginning of the caller function.
1879           if (isKernel(F)) {
1880             auto &KernelInitRFI = OMPInfoCache.RFIs[OMPRTL___kmpc_target_init];
1881             auto *KernelInitUV = KernelInitRFI.getUseVector(F);
1882 
1883             if (KernelInitUV->empty())
1884               continue;
1885 
1886             assert(KernelInitUV->size() == 1 &&
1887                    "Expected a single __kmpc_target_init in kernel\n");
1888 
1889             CallInst *KernelInitCI =
1890                 getCallIfRegularCall(*KernelInitUV->front(), &KernelInitRFI);
1891             assert(KernelInitCI &&
1892                    "Expected a call to __kmpc_target_init in kernel\n");
1893 
1894             CI->moveAfter(KernelInitCI);
1895           } else
1896             CI->moveBefore(&*F.getEntryBlock().getFirstInsertionPt());
1897           ReplVal = CI;
1898           break;
1899         }
1900       if (!ReplVal)
1901         return false;
1902     }
1903 
1904     // If we use a call as a replacement value we need to make sure the ident is
1905     // valid at the new location. For now we just pick a global one, either
1906     // existing and used by one of the calls, or created from scratch.
1907     if (CallBase *CI = dyn_cast<CallBase>(ReplVal)) {
1908       if (!CI->arg_empty() &&
1909           CI->getArgOperand(0)->getType() == OMPInfoCache.OMPBuilder.IdentPtr) {
1910         Value *Ident = getCombinedIdentFromCallUsesIn(RFI, F,
1911                                                       /* GlobalOnly */ true);
1912         CI->setArgOperand(0, Ident);
1913       }
1914     }
1915 
1916     bool Changed = false;
1917     auto ReplaceAndDeleteCB = [&](Use &U, Function &Caller) {
1918       CallInst *CI = getCallIfRegularCall(U, &RFI);
1919       if (!CI || CI == ReplVal || &F != &Caller)
1920         return false;
1921       assert(CI->getCaller() == &F && "Unexpected call!");
1922 
1923       auto Remark = [&](OptimizationRemark OR) {
1924         return OR << "OpenMP runtime call "
1925                   << ore::NV("OpenMPOptRuntime", RFI.Name) << " deduplicated.";
1926       };
1927       if (CI->getDebugLoc())
1928         emitRemark<OptimizationRemark>(CI, "OMP170", Remark);
1929       else
1930         emitRemark<OptimizationRemark>(&F, "OMP170", Remark);
1931 
1932       CGUpdater.removeCallSite(*CI);
1933       CI->replaceAllUsesWith(ReplVal);
1934       CI->eraseFromParent();
1935       ++NumOpenMPRuntimeCallsDeduplicated;
1936       Changed = true;
1937       return true;
1938     };
1939     RFI.foreachUse(SCC, ReplaceAndDeleteCB);
1940 
1941     return Changed;
1942   }
1943 
1944   /// Collect arguments that represent the global thread id in \p GTIdArgs.
1945   void collectGlobalThreadIdArguments(SmallSetVector<Value *, 16> &GTIdArgs) {
1946     // TODO: Below we basically perform a fixpoint iteration with a pessimistic
1947     //       initialization. We could define an AbstractAttribute instead and
1948     //       run the Attributor here once it can be run as an SCC pass.
1949 
1950     // Helper to check the argument \p ArgNo at all call sites of \p F for
1951     // a GTId.
1952     auto CallArgOpIsGTId = [&](Function &F, unsigned ArgNo, CallInst &RefCI) {
1953       if (!F.hasLocalLinkage())
1954         return false;
1955       for (Use &U : F.uses()) {
1956         if (CallInst *CI = getCallIfRegularCall(U)) {
1957           Value *ArgOp = CI->getArgOperand(ArgNo);
1958           if (CI == &RefCI || GTIdArgs.count(ArgOp) ||
1959               getCallIfRegularCall(
1960                   *ArgOp, &OMPInfoCache.RFIs[OMPRTL___kmpc_global_thread_num]))
1961             continue;
1962         }
1963         return false;
1964       }
1965       return true;
1966     };
1967 
1968     // Helper to identify uses of a GTId as GTId arguments.
1969     auto AddUserArgs = [&](Value &GTId) {
1970       for (Use &U : GTId.uses())
1971         if (CallInst *CI = dyn_cast<CallInst>(U.getUser()))
1972           if (CI->isArgOperand(&U))
1973             if (Function *Callee = CI->getCalledFunction())
1974               if (CallArgOpIsGTId(*Callee, U.getOperandNo(), *CI))
1975                 GTIdArgs.insert(Callee->getArg(U.getOperandNo()));
1976     };
1977 
1978     // The argument users of __kmpc_global_thread_num calls are GTIds.
1979     OMPInformationCache::RuntimeFunctionInfo &GlobThreadNumRFI =
1980         OMPInfoCache.RFIs[OMPRTL___kmpc_global_thread_num];
1981 
1982     GlobThreadNumRFI.foreachUse(SCC, [&](Use &U, Function &F) {
1983       if (CallInst *CI = getCallIfRegularCall(U, &GlobThreadNumRFI))
1984         AddUserArgs(*CI);
1985       return false;
1986     });
1987 
1988     // Transitively search for more arguments by looking at the users of the
1989     // ones we know already. During the search the GTIdArgs vector is extended
1990     // so we cannot cache the size nor can we use a range based for.
1991     for (unsigned U = 0; U < GTIdArgs.size(); ++U)
1992       AddUserArgs(*GTIdArgs[U]);
1993   }
1994 
1995   /// Kernel (=GPU) optimizations and utility functions
1996   ///
1997   ///{{
1998 
1999   /// Check if \p F is a kernel, hence entry point for target offloading.
2000   bool isKernel(Function &F) { return OMPInfoCache.Kernels.count(&F); }
2001 
2002   /// Cache to remember the unique kernel for a function.
2003   DenseMap<Function *, Optional<Kernel>> UniqueKernelMap;
2004 
2005   /// Find the unique kernel that will execute \p F, if any.
2006   Kernel getUniqueKernelFor(Function &F);
2007 
2008   /// Find the unique kernel that will execute \p I, if any.
2009   Kernel getUniqueKernelFor(Instruction &I) {
2010     return getUniqueKernelFor(*I.getFunction());
2011   }
2012 
2013   /// Rewrite the device (=GPU) code state machine create in non-SPMD mode in
2014   /// the cases we can avoid taking the address of a function.
2015   bool rewriteDeviceCodeStateMachine();
2016 
2017   ///
2018   ///}}
2019 
2020   /// Emit a remark generically
2021   ///
2022   /// This template function can be used to generically emit a remark. The
2023   /// RemarkKind should be one of the following:
2024   ///   - OptimizationRemark to indicate a successful optimization attempt
2025   ///   - OptimizationRemarkMissed to report a failed optimization attempt
2026   ///   - OptimizationRemarkAnalysis to provide additional information about an
2027   ///     optimization attempt
2028   ///
2029   /// The remark is built using a callback function provided by the caller that
2030   /// takes a RemarkKind as input and returns a RemarkKind.
2031   template <typename RemarkKind, typename RemarkCallBack>
2032   void emitRemark(Instruction *I, StringRef RemarkName,
2033                   RemarkCallBack &&RemarkCB) const {
2034     Function *F = I->getParent()->getParent();
2035     auto &ORE = OREGetter(F);
2036 
2037     if (RemarkName.startswith("OMP"))
2038       ORE.emit([&]() {
2039         return RemarkCB(RemarkKind(DEBUG_TYPE, RemarkName, I))
2040                << " [" << RemarkName << "]";
2041       });
2042     else
2043       ORE.emit(
2044           [&]() { return RemarkCB(RemarkKind(DEBUG_TYPE, RemarkName, I)); });
2045   }
2046 
2047   /// Emit a remark on a function.
2048   template <typename RemarkKind, typename RemarkCallBack>
2049   void emitRemark(Function *F, StringRef RemarkName,
2050                   RemarkCallBack &&RemarkCB) const {
2051     auto &ORE = OREGetter(F);
2052 
2053     if (RemarkName.startswith("OMP"))
2054       ORE.emit([&]() {
2055         return RemarkCB(RemarkKind(DEBUG_TYPE, RemarkName, F))
2056                << " [" << RemarkName << "]";
2057       });
2058     else
2059       ORE.emit(
2060           [&]() { return RemarkCB(RemarkKind(DEBUG_TYPE, RemarkName, F)); });
2061   }
2062 
2063   /// RAII struct to temporarily change an RTL function's linkage to external.
2064   /// This prevents it from being mistakenly removed by other optimizations.
2065   struct ExternalizationRAII {
2066     ExternalizationRAII(OMPInformationCache &OMPInfoCache,
2067                         RuntimeFunction RFKind)
2068         : Declaration(OMPInfoCache.RFIs[RFKind].Declaration) {
2069       if (!Declaration)
2070         return;
2071 
2072       LinkageType = Declaration->getLinkage();
2073       Declaration->setLinkage(GlobalValue::ExternalLinkage);
2074     }
2075 
2076     ~ExternalizationRAII() {
2077       if (!Declaration)
2078         return;
2079 
2080       Declaration->setLinkage(LinkageType);
2081     }
2082 
2083     Function *Declaration;
2084     GlobalValue::LinkageTypes LinkageType;
2085   };
2086 
2087   /// The underlying module.
2088   Module &M;
2089 
2090   /// The SCC we are operating on.
2091   SmallVectorImpl<Function *> &SCC;
2092 
2093   /// Callback to update the call graph, the first argument is a removed call,
2094   /// the second an optional replacement call.
2095   CallGraphUpdater &CGUpdater;
2096 
2097   /// Callback to get an OptimizationRemarkEmitter from a Function *
2098   OptimizationRemarkGetter OREGetter;
2099 
2100   /// OpenMP-specific information cache. Also Used for Attributor runs.
2101   OMPInformationCache &OMPInfoCache;
2102 
2103   /// Attributor instance.
2104   Attributor &A;
2105 
2106   /// Helper function to run Attributor on SCC.
2107   bool runAttributor(bool IsModulePass) {
2108     if (SCC.empty())
2109       return false;
2110 
2111     // Temporarily make these function have external linkage so the Attributor
2112     // doesn't remove them when we try to look them up later.
2113     ExternalizationRAII Parallel(OMPInfoCache, OMPRTL___kmpc_kernel_parallel);
2114     ExternalizationRAII EndParallel(OMPInfoCache,
2115                                     OMPRTL___kmpc_kernel_end_parallel);
2116     ExternalizationRAII BarrierSPMD(OMPInfoCache,
2117                                     OMPRTL___kmpc_barrier_simple_spmd);
2118     ExternalizationRAII BarrierGeneric(OMPInfoCache,
2119                                        OMPRTL___kmpc_barrier_simple_generic);
2120     ExternalizationRAII ThreadId(OMPInfoCache,
2121                                  OMPRTL___kmpc_get_hardware_thread_id_in_block);
2122     ExternalizationRAII NumThreads(
2123         OMPInfoCache, OMPRTL___kmpc_get_hardware_num_threads_in_block);
2124     ExternalizationRAII WarpSize(OMPInfoCache, OMPRTL___kmpc_get_warp_size);
2125 
2126     registerAAs(IsModulePass);
2127 
2128     ChangeStatus Changed = A.run();
2129 
2130     LLVM_DEBUG(dbgs() << "[Attributor] Done with " << SCC.size()
2131                       << " functions, result: " << Changed << ".\n");
2132 
2133     return Changed == ChangeStatus::CHANGED;
2134   }
2135 
2136   void registerFoldRuntimeCall(RuntimeFunction RF);
2137 
2138   /// Populate the Attributor with abstract attribute opportunities in the
2139   /// function.
2140   void registerAAs(bool IsModulePass);
2141 };
2142 
2143 Kernel OpenMPOpt::getUniqueKernelFor(Function &F) {
2144   if (!OMPInfoCache.ModuleSlice.count(&F))
2145     return nullptr;
2146 
2147   // Use a scope to keep the lifetime of the CachedKernel short.
2148   {
2149     Optional<Kernel> &CachedKernel = UniqueKernelMap[&F];
2150     if (CachedKernel)
2151       return *CachedKernel;
2152 
2153     // TODO: We should use an AA to create an (optimistic and callback
2154     //       call-aware) call graph. For now we stick to simple patterns that
2155     //       are less powerful, basically the worst fixpoint.
2156     if (isKernel(F)) {
2157       CachedKernel = Kernel(&F);
2158       return *CachedKernel;
2159     }
2160 
2161     CachedKernel = nullptr;
2162     if (!F.hasLocalLinkage()) {
2163 
2164       // See https://openmp.llvm.org/remarks/OptimizationRemarks.html
2165       auto Remark = [&](OptimizationRemarkAnalysis ORA) {
2166         return ORA << "Potentially unknown OpenMP target region caller.";
2167       };
2168       emitRemark<OptimizationRemarkAnalysis>(&F, "OMP100", Remark);
2169 
2170       return nullptr;
2171     }
2172   }
2173 
2174   auto GetUniqueKernelForUse = [&](const Use &U) -> Kernel {
2175     if (auto *Cmp = dyn_cast<ICmpInst>(U.getUser())) {
2176       // Allow use in equality comparisons.
2177       if (Cmp->isEquality())
2178         return getUniqueKernelFor(*Cmp);
2179       return nullptr;
2180     }
2181     if (auto *CB = dyn_cast<CallBase>(U.getUser())) {
2182       // Allow direct calls.
2183       if (CB->isCallee(&U))
2184         return getUniqueKernelFor(*CB);
2185 
2186       OMPInformationCache::RuntimeFunctionInfo &KernelParallelRFI =
2187           OMPInfoCache.RFIs[OMPRTL___kmpc_parallel_51];
2188       // Allow the use in __kmpc_parallel_51 calls.
2189       if (OpenMPOpt::getCallIfRegularCall(*U.getUser(), &KernelParallelRFI))
2190         return getUniqueKernelFor(*CB);
2191       return nullptr;
2192     }
2193     // Disallow every other use.
2194     return nullptr;
2195   };
2196 
2197   // TODO: In the future we want to track more than just a unique kernel.
2198   SmallPtrSet<Kernel, 2> PotentialKernels;
2199   OMPInformationCache::foreachUse(F, [&](const Use &U) {
2200     PotentialKernels.insert(GetUniqueKernelForUse(U));
2201   });
2202 
2203   Kernel K = nullptr;
2204   if (PotentialKernels.size() == 1)
2205     K = *PotentialKernels.begin();
2206 
2207   // Cache the result.
2208   UniqueKernelMap[&F] = K;
2209 
2210   return K;
2211 }
2212 
2213 bool OpenMPOpt::rewriteDeviceCodeStateMachine() {
2214   OMPInformationCache::RuntimeFunctionInfo &KernelParallelRFI =
2215       OMPInfoCache.RFIs[OMPRTL___kmpc_parallel_51];
2216 
2217   bool Changed = false;
2218   if (!KernelParallelRFI)
2219     return Changed;
2220 
2221   // If we have disabled state machine changes, exit
2222   if (DisableOpenMPOptStateMachineRewrite)
2223     return Changed;
2224 
2225   for (Function *F : SCC) {
2226 
2227     // Check if the function is a use in a __kmpc_parallel_51 call at
2228     // all.
2229     bool UnknownUse = false;
2230     bool KernelParallelUse = false;
2231     unsigned NumDirectCalls = 0;
2232 
2233     SmallVector<Use *, 2> ToBeReplacedStateMachineUses;
2234     OMPInformationCache::foreachUse(*F, [&](Use &U) {
2235       if (auto *CB = dyn_cast<CallBase>(U.getUser()))
2236         if (CB->isCallee(&U)) {
2237           ++NumDirectCalls;
2238           return;
2239         }
2240 
2241       if (isa<ICmpInst>(U.getUser())) {
2242         ToBeReplacedStateMachineUses.push_back(&U);
2243         return;
2244       }
2245 
2246       // Find wrapper functions that represent parallel kernels.
2247       CallInst *CI =
2248           OpenMPOpt::getCallIfRegularCall(*U.getUser(), &KernelParallelRFI);
2249       const unsigned int WrapperFunctionArgNo = 6;
2250       if (!KernelParallelUse && CI &&
2251           CI->getArgOperandNo(&U) == WrapperFunctionArgNo) {
2252         KernelParallelUse = true;
2253         ToBeReplacedStateMachineUses.push_back(&U);
2254         return;
2255       }
2256       UnknownUse = true;
2257     });
2258 
2259     // Do not emit a remark if we haven't seen a __kmpc_parallel_51
2260     // use.
2261     if (!KernelParallelUse)
2262       continue;
2263 
2264     // If this ever hits, we should investigate.
2265     // TODO: Checking the number of uses is not a necessary restriction and
2266     // should be lifted.
2267     if (UnknownUse || NumDirectCalls != 1 ||
2268         ToBeReplacedStateMachineUses.size() > 2) {
2269       auto Remark = [&](OptimizationRemarkAnalysis ORA) {
2270         return ORA << "Parallel region is used in "
2271                    << (UnknownUse ? "unknown" : "unexpected")
2272                    << " ways. Will not attempt to rewrite the state machine.";
2273       };
2274       emitRemark<OptimizationRemarkAnalysis>(F, "OMP101", Remark);
2275       continue;
2276     }
2277 
2278     // Even if we have __kmpc_parallel_51 calls, we (for now) give
2279     // up if the function is not called from a unique kernel.
2280     Kernel K = getUniqueKernelFor(*F);
2281     if (!K) {
2282       auto Remark = [&](OptimizationRemarkAnalysis ORA) {
2283         return ORA << "Parallel region is not called from a unique kernel. "
2284                       "Will not attempt to rewrite the state machine.";
2285       };
2286       emitRemark<OptimizationRemarkAnalysis>(F, "OMP102", Remark);
2287       continue;
2288     }
2289 
2290     // We now know F is a parallel body function called only from the kernel K.
2291     // We also identified the state machine uses in which we replace the
2292     // function pointer by a new global symbol for identification purposes. This
2293     // ensures only direct calls to the function are left.
2294 
2295     Module &M = *F->getParent();
2296     Type *Int8Ty = Type::getInt8Ty(M.getContext());
2297 
2298     auto *ID = new GlobalVariable(
2299         M, Int8Ty, /* isConstant */ true, GlobalValue::PrivateLinkage,
2300         UndefValue::get(Int8Ty), F->getName() + ".ID");
2301 
2302     for (Use *U : ToBeReplacedStateMachineUses)
2303       U->set(ConstantExpr::getPointerBitCastOrAddrSpaceCast(
2304           ID, U->get()->getType()));
2305 
2306     ++NumOpenMPParallelRegionsReplacedInGPUStateMachine;
2307 
2308     Changed = true;
2309   }
2310 
2311   return Changed;
2312 }
2313 
2314 /// Abstract Attribute for tracking ICV values.
2315 struct AAICVTracker : public StateWrapper<BooleanState, AbstractAttribute> {
2316   using Base = StateWrapper<BooleanState, AbstractAttribute>;
2317   AAICVTracker(const IRPosition &IRP, Attributor &A) : Base(IRP) {}
2318 
2319   void initialize(Attributor &A) override {
2320     Function *F = getAnchorScope();
2321     if (!F || !A.isFunctionIPOAmendable(*F))
2322       indicatePessimisticFixpoint();
2323   }
2324 
2325   /// Returns true if value is assumed to be tracked.
2326   bool isAssumedTracked() const { return getAssumed(); }
2327 
2328   /// Returns true if value is known to be tracked.
2329   bool isKnownTracked() const { return getAssumed(); }
2330 
2331   /// Create an abstract attribute biew for the position \p IRP.
2332   static AAICVTracker &createForPosition(const IRPosition &IRP, Attributor &A);
2333 
2334   /// Return the value with which \p I can be replaced for specific \p ICV.
2335   virtual Optional<Value *> getReplacementValue(InternalControlVar ICV,
2336                                                 const Instruction *I,
2337                                                 Attributor &A) const {
2338     return None;
2339   }
2340 
2341   /// Return an assumed unique ICV value if a single candidate is found. If
2342   /// there cannot be one, return a nullptr. If it is not clear yet, return the
2343   /// Optional::NoneType.
2344   virtual Optional<Value *>
2345   getUniqueReplacementValue(InternalControlVar ICV) const = 0;
2346 
2347   // Currently only nthreads is being tracked.
2348   // this array will only grow with time.
2349   InternalControlVar TrackableICVs[1] = {ICV_nthreads};
2350 
2351   /// See AbstractAttribute::getName()
2352   const std::string getName() const override { return "AAICVTracker"; }
2353 
2354   /// See AbstractAttribute::getIdAddr()
2355   const char *getIdAddr() const override { return &ID; }
2356 
2357   /// This function should return true if the type of the \p AA is AAICVTracker
2358   static bool classof(const AbstractAttribute *AA) {
2359     return (AA->getIdAddr() == &ID);
2360   }
2361 
2362   static const char ID;
2363 };
2364 
2365 struct AAICVTrackerFunction : public AAICVTracker {
2366   AAICVTrackerFunction(const IRPosition &IRP, Attributor &A)
2367       : AAICVTracker(IRP, A) {}
2368 
2369   // FIXME: come up with better string.
2370   const std::string getAsStr() const override { return "ICVTrackerFunction"; }
2371 
2372   // FIXME: come up with some stats.
2373   void trackStatistics() const override {}
2374 
2375   /// We don't manifest anything for this AA.
2376   ChangeStatus manifest(Attributor &A) override {
2377     return ChangeStatus::UNCHANGED;
2378   }
2379 
2380   // Map of ICV to their values at specific program point.
2381   EnumeratedArray<DenseMap<Instruction *, Value *>, InternalControlVar,
2382                   InternalControlVar::ICV___last>
2383       ICVReplacementValuesMap;
2384 
2385   ChangeStatus updateImpl(Attributor &A) override {
2386     ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
2387 
2388     Function *F = getAnchorScope();
2389 
2390     auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache());
2391 
2392     for (InternalControlVar ICV : TrackableICVs) {
2393       auto &SetterRFI = OMPInfoCache.RFIs[OMPInfoCache.ICVs[ICV].Setter];
2394 
2395       auto &ValuesMap = ICVReplacementValuesMap[ICV];
2396       auto TrackValues = [&](Use &U, Function &) {
2397         CallInst *CI = OpenMPOpt::getCallIfRegularCall(U);
2398         if (!CI)
2399           return false;
2400 
2401         // FIXME: handle setters with more that 1 arguments.
2402         /// Track new value.
2403         if (ValuesMap.insert(std::make_pair(CI, CI->getArgOperand(0))).second)
2404           HasChanged = ChangeStatus::CHANGED;
2405 
2406         return false;
2407       };
2408 
2409       auto CallCheck = [&](Instruction &I) {
2410         Optional<Value *> ReplVal = getValueForCall(A, I, ICV);
2411         if (ReplVal.hasValue() &&
2412             ValuesMap.insert(std::make_pair(&I, *ReplVal)).second)
2413           HasChanged = ChangeStatus::CHANGED;
2414 
2415         return true;
2416       };
2417 
2418       // Track all changes of an ICV.
2419       SetterRFI.foreachUse(TrackValues, F);
2420 
2421       bool UsedAssumedInformation = false;
2422       A.checkForAllInstructions(CallCheck, *this, {Instruction::Call},
2423                                 UsedAssumedInformation,
2424                                 /* CheckBBLivenessOnly */ true);
2425 
2426       /// TODO: Figure out a way to avoid adding entry in
2427       /// ICVReplacementValuesMap
2428       Instruction *Entry = &F->getEntryBlock().front();
2429       if (HasChanged == ChangeStatus::CHANGED && !ValuesMap.count(Entry))
2430         ValuesMap.insert(std::make_pair(Entry, nullptr));
2431     }
2432 
2433     return HasChanged;
2434   }
2435 
2436   /// Helper to check if \p I is a call and get the value for it if it is
2437   /// unique.
2438   Optional<Value *> getValueForCall(Attributor &A, const Instruction &I,
2439                                     InternalControlVar &ICV) const {
2440 
2441     const auto *CB = dyn_cast<CallBase>(&I);
2442     if (!CB || CB->hasFnAttr("no_openmp") ||
2443         CB->hasFnAttr("no_openmp_routines"))
2444       return None;
2445 
2446     auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache());
2447     auto &GetterRFI = OMPInfoCache.RFIs[OMPInfoCache.ICVs[ICV].Getter];
2448     auto &SetterRFI = OMPInfoCache.RFIs[OMPInfoCache.ICVs[ICV].Setter];
2449     Function *CalledFunction = CB->getCalledFunction();
2450 
2451     // Indirect call, assume ICV changes.
2452     if (CalledFunction == nullptr)
2453       return nullptr;
2454     if (CalledFunction == GetterRFI.Declaration)
2455       return None;
2456     if (CalledFunction == SetterRFI.Declaration) {
2457       if (ICVReplacementValuesMap[ICV].count(&I))
2458         return ICVReplacementValuesMap[ICV].lookup(&I);
2459 
2460       return nullptr;
2461     }
2462 
2463     // Since we don't know, assume it changes the ICV.
2464     if (CalledFunction->isDeclaration())
2465       return nullptr;
2466 
2467     const auto &ICVTrackingAA = A.getAAFor<AAICVTracker>(
2468         *this, IRPosition::callsite_returned(*CB), DepClassTy::REQUIRED);
2469 
2470     if (ICVTrackingAA.isAssumedTracked()) {
2471       Optional<Value *> URV = ICVTrackingAA.getUniqueReplacementValue(ICV);
2472       if (!URV || (*URV && AA::isValidAtPosition(**URV, I, OMPInfoCache)))
2473         return URV;
2474     }
2475 
2476     // If we don't know, assume it changes.
2477     return nullptr;
2478   }
2479 
2480   // We don't check unique value for a function, so return None.
2481   Optional<Value *>
2482   getUniqueReplacementValue(InternalControlVar ICV) const override {
2483     return None;
2484   }
2485 
2486   /// Return the value with which \p I can be replaced for specific \p ICV.
2487   Optional<Value *> getReplacementValue(InternalControlVar ICV,
2488                                         const Instruction *I,
2489                                         Attributor &A) const override {
2490     const auto &ValuesMap = ICVReplacementValuesMap[ICV];
2491     if (ValuesMap.count(I))
2492       return ValuesMap.lookup(I);
2493 
2494     SmallVector<const Instruction *, 16> Worklist;
2495     SmallPtrSet<const Instruction *, 16> Visited;
2496     Worklist.push_back(I);
2497 
2498     Optional<Value *> ReplVal;
2499 
2500     while (!Worklist.empty()) {
2501       const Instruction *CurrInst = Worklist.pop_back_val();
2502       if (!Visited.insert(CurrInst).second)
2503         continue;
2504 
2505       const BasicBlock *CurrBB = CurrInst->getParent();
2506 
2507       // Go up and look for all potential setters/calls that might change the
2508       // ICV.
2509       while ((CurrInst = CurrInst->getPrevNode())) {
2510         if (ValuesMap.count(CurrInst)) {
2511           Optional<Value *> NewReplVal = ValuesMap.lookup(CurrInst);
2512           // Unknown value, track new.
2513           if (!ReplVal.hasValue()) {
2514             ReplVal = NewReplVal;
2515             break;
2516           }
2517 
2518           // If we found a new value, we can't know the icv value anymore.
2519           if (NewReplVal.hasValue())
2520             if (ReplVal != NewReplVal)
2521               return nullptr;
2522 
2523           break;
2524         }
2525 
2526         Optional<Value *> NewReplVal = getValueForCall(A, *CurrInst, ICV);
2527         if (!NewReplVal.hasValue())
2528           continue;
2529 
2530         // Unknown value, track new.
2531         if (!ReplVal.hasValue()) {
2532           ReplVal = NewReplVal;
2533           break;
2534         }
2535 
2536         // if (NewReplVal.hasValue())
2537         // We found a new value, we can't know the icv value anymore.
2538         if (ReplVal != NewReplVal)
2539           return nullptr;
2540       }
2541 
2542       // If we are in the same BB and we have a value, we are done.
2543       if (CurrBB == I->getParent() && ReplVal.hasValue())
2544         return ReplVal;
2545 
2546       // Go through all predecessors and add terminators for analysis.
2547       for (const BasicBlock *Pred : predecessors(CurrBB))
2548         if (const Instruction *Terminator = Pred->getTerminator())
2549           Worklist.push_back(Terminator);
2550     }
2551 
2552     return ReplVal;
2553   }
2554 };
2555 
2556 struct AAICVTrackerFunctionReturned : AAICVTracker {
2557   AAICVTrackerFunctionReturned(const IRPosition &IRP, Attributor &A)
2558       : AAICVTracker(IRP, A) {}
2559 
2560   // FIXME: come up with better string.
2561   const std::string getAsStr() const override {
2562     return "ICVTrackerFunctionReturned";
2563   }
2564 
2565   // FIXME: come up with some stats.
2566   void trackStatistics() const override {}
2567 
2568   /// We don't manifest anything for this AA.
2569   ChangeStatus manifest(Attributor &A) override {
2570     return ChangeStatus::UNCHANGED;
2571   }
2572 
2573   // Map of ICV to their values at specific program point.
2574   EnumeratedArray<Optional<Value *>, InternalControlVar,
2575                   InternalControlVar::ICV___last>
2576       ICVReplacementValuesMap;
2577 
2578   /// Return the value with which \p I can be replaced for specific \p ICV.
2579   Optional<Value *>
2580   getUniqueReplacementValue(InternalControlVar ICV) const override {
2581     return ICVReplacementValuesMap[ICV];
2582   }
2583 
2584   ChangeStatus updateImpl(Attributor &A) override {
2585     ChangeStatus Changed = ChangeStatus::UNCHANGED;
2586     const auto &ICVTrackingAA = A.getAAFor<AAICVTracker>(
2587         *this, IRPosition::function(*getAnchorScope()), DepClassTy::REQUIRED);
2588 
2589     if (!ICVTrackingAA.isAssumedTracked())
2590       return indicatePessimisticFixpoint();
2591 
2592     for (InternalControlVar ICV : TrackableICVs) {
2593       Optional<Value *> &ReplVal = ICVReplacementValuesMap[ICV];
2594       Optional<Value *> UniqueICVValue;
2595 
2596       auto CheckReturnInst = [&](Instruction &I) {
2597         Optional<Value *> NewReplVal =
2598             ICVTrackingAA.getReplacementValue(ICV, &I, A);
2599 
2600         // If we found a second ICV value there is no unique returned value.
2601         if (UniqueICVValue.hasValue() && UniqueICVValue != NewReplVal)
2602           return false;
2603 
2604         UniqueICVValue = NewReplVal;
2605 
2606         return true;
2607       };
2608 
2609       bool UsedAssumedInformation = false;
2610       if (!A.checkForAllInstructions(CheckReturnInst, *this, {Instruction::Ret},
2611                                      UsedAssumedInformation,
2612                                      /* CheckBBLivenessOnly */ true))
2613         UniqueICVValue = nullptr;
2614 
2615       if (UniqueICVValue == ReplVal)
2616         continue;
2617 
2618       ReplVal = UniqueICVValue;
2619       Changed = ChangeStatus::CHANGED;
2620     }
2621 
2622     return Changed;
2623   }
2624 };
2625 
2626 struct AAICVTrackerCallSite : AAICVTracker {
2627   AAICVTrackerCallSite(const IRPosition &IRP, Attributor &A)
2628       : AAICVTracker(IRP, A) {}
2629 
2630   void initialize(Attributor &A) override {
2631     Function *F = getAnchorScope();
2632     if (!F || !A.isFunctionIPOAmendable(*F))
2633       indicatePessimisticFixpoint();
2634 
2635     // We only initialize this AA for getters, so we need to know which ICV it
2636     // gets.
2637     auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache());
2638     for (InternalControlVar ICV : TrackableICVs) {
2639       auto ICVInfo = OMPInfoCache.ICVs[ICV];
2640       auto &Getter = OMPInfoCache.RFIs[ICVInfo.Getter];
2641       if (Getter.Declaration == getAssociatedFunction()) {
2642         AssociatedICV = ICVInfo.Kind;
2643         return;
2644       }
2645     }
2646 
2647     /// Unknown ICV.
2648     indicatePessimisticFixpoint();
2649   }
2650 
2651   ChangeStatus manifest(Attributor &A) override {
2652     if (!ReplVal.hasValue() || !ReplVal.getValue())
2653       return ChangeStatus::UNCHANGED;
2654 
2655     A.changeValueAfterManifest(*getCtxI(), **ReplVal);
2656     A.deleteAfterManifest(*getCtxI());
2657 
2658     return ChangeStatus::CHANGED;
2659   }
2660 
2661   // FIXME: come up with better string.
2662   const std::string getAsStr() const override { return "ICVTrackerCallSite"; }
2663 
2664   // FIXME: come up with some stats.
2665   void trackStatistics() const override {}
2666 
2667   InternalControlVar AssociatedICV;
2668   Optional<Value *> ReplVal;
2669 
2670   ChangeStatus updateImpl(Attributor &A) override {
2671     const auto &ICVTrackingAA = A.getAAFor<AAICVTracker>(
2672         *this, IRPosition::function(*getAnchorScope()), DepClassTy::REQUIRED);
2673 
2674     // We don't have any information, so we assume it changes the ICV.
2675     if (!ICVTrackingAA.isAssumedTracked())
2676       return indicatePessimisticFixpoint();
2677 
2678     Optional<Value *> NewReplVal =
2679         ICVTrackingAA.getReplacementValue(AssociatedICV, getCtxI(), A);
2680 
2681     if (ReplVal == NewReplVal)
2682       return ChangeStatus::UNCHANGED;
2683 
2684     ReplVal = NewReplVal;
2685     return ChangeStatus::CHANGED;
2686   }
2687 
2688   // Return the value with which associated value can be replaced for specific
2689   // \p ICV.
2690   Optional<Value *>
2691   getUniqueReplacementValue(InternalControlVar ICV) const override {
2692     return ReplVal;
2693   }
2694 };
2695 
2696 struct AAICVTrackerCallSiteReturned : AAICVTracker {
2697   AAICVTrackerCallSiteReturned(const IRPosition &IRP, Attributor &A)
2698       : AAICVTracker(IRP, A) {}
2699 
2700   // FIXME: come up with better string.
2701   const std::string getAsStr() const override {
2702     return "ICVTrackerCallSiteReturned";
2703   }
2704 
2705   // FIXME: come up with some stats.
2706   void trackStatistics() const override {}
2707 
2708   /// We don't manifest anything for this AA.
2709   ChangeStatus manifest(Attributor &A) override {
2710     return ChangeStatus::UNCHANGED;
2711   }
2712 
2713   // Map of ICV to their values at specific program point.
2714   EnumeratedArray<Optional<Value *>, InternalControlVar,
2715                   InternalControlVar::ICV___last>
2716       ICVReplacementValuesMap;
2717 
2718   /// Return the value with which associated value can be replaced for specific
2719   /// \p ICV.
2720   Optional<Value *>
2721   getUniqueReplacementValue(InternalControlVar ICV) const override {
2722     return ICVReplacementValuesMap[ICV];
2723   }
2724 
2725   ChangeStatus updateImpl(Attributor &A) override {
2726     ChangeStatus Changed = ChangeStatus::UNCHANGED;
2727     const auto &ICVTrackingAA = A.getAAFor<AAICVTracker>(
2728         *this, IRPosition::returned(*getAssociatedFunction()),
2729         DepClassTy::REQUIRED);
2730 
2731     // We don't have any information, so we assume it changes the ICV.
2732     if (!ICVTrackingAA.isAssumedTracked())
2733       return indicatePessimisticFixpoint();
2734 
2735     for (InternalControlVar ICV : TrackableICVs) {
2736       Optional<Value *> &ReplVal = ICVReplacementValuesMap[ICV];
2737       Optional<Value *> NewReplVal =
2738           ICVTrackingAA.getUniqueReplacementValue(ICV);
2739 
2740       if (ReplVal == NewReplVal)
2741         continue;
2742 
2743       ReplVal = NewReplVal;
2744       Changed = ChangeStatus::CHANGED;
2745     }
2746     return Changed;
2747   }
2748 };
2749 
2750 struct AAExecutionDomainFunction : public AAExecutionDomain {
2751   AAExecutionDomainFunction(const IRPosition &IRP, Attributor &A)
2752       : AAExecutionDomain(IRP, A) {}
2753 
2754   const std::string getAsStr() const override {
2755     return "[AAExecutionDomain] " + std::to_string(SingleThreadedBBs.size()) +
2756            "/" + std::to_string(NumBBs) + " BBs thread 0 only.";
2757   }
2758 
2759   /// See AbstractAttribute::trackStatistics().
2760   void trackStatistics() const override {}
2761 
2762   void initialize(Attributor &A) override {
2763     Function *F = getAnchorScope();
2764     for (const auto &BB : *F)
2765       SingleThreadedBBs.insert(&BB);
2766     NumBBs = SingleThreadedBBs.size();
2767   }
2768 
2769   ChangeStatus manifest(Attributor &A) override {
2770     LLVM_DEBUG({
2771       for (const BasicBlock *BB : SingleThreadedBBs)
2772         dbgs() << TAG << " Basic block @" << getAnchorScope()->getName() << " "
2773                << BB->getName() << " is executed by a single thread.\n";
2774     });
2775     return ChangeStatus::UNCHANGED;
2776   }
2777 
2778   ChangeStatus updateImpl(Attributor &A) override;
2779 
2780   /// Check if an instruction is executed by a single thread.
2781   bool isExecutedByInitialThreadOnly(const Instruction &I) const override {
2782     return isExecutedByInitialThreadOnly(*I.getParent());
2783   }
2784 
2785   bool isExecutedByInitialThreadOnly(const BasicBlock &BB) const override {
2786     return isValidState() && SingleThreadedBBs.contains(&BB);
2787   }
2788 
2789   /// Set of basic blocks that are executed by a single thread.
2790   SmallSetVector<const BasicBlock *, 16> SingleThreadedBBs;
2791 
2792   /// Total number of basic blocks in this function.
2793   long unsigned NumBBs;
2794 };
2795 
2796 ChangeStatus AAExecutionDomainFunction::updateImpl(Attributor &A) {
2797   Function *F = getAnchorScope();
2798   ReversePostOrderTraversal<Function *> RPOT(F);
2799   auto NumSingleThreadedBBs = SingleThreadedBBs.size();
2800 
2801   bool AllCallSitesKnown;
2802   auto PredForCallSite = [&](AbstractCallSite ACS) {
2803     const auto &ExecutionDomainAA = A.getAAFor<AAExecutionDomain>(
2804         *this, IRPosition::function(*ACS.getInstruction()->getFunction()),
2805         DepClassTy::REQUIRED);
2806     return ACS.isDirectCall() &&
2807            ExecutionDomainAA.isExecutedByInitialThreadOnly(
2808                *ACS.getInstruction());
2809   };
2810 
2811   if (!A.checkForAllCallSites(PredForCallSite, *this,
2812                               /* RequiresAllCallSites */ true,
2813                               AllCallSitesKnown))
2814     SingleThreadedBBs.remove(&F->getEntryBlock());
2815 
2816   auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache());
2817   auto &RFI = OMPInfoCache.RFIs[OMPRTL___kmpc_target_init];
2818 
2819   // Check if the edge into the successor block contains a condition that only
2820   // lets the main thread execute it.
2821   auto IsInitialThreadOnly = [&](BranchInst *Edge, BasicBlock *SuccessorBB) {
2822     if (!Edge || !Edge->isConditional())
2823       return false;
2824     if (Edge->getSuccessor(0) != SuccessorBB)
2825       return false;
2826 
2827     auto *Cmp = dyn_cast<CmpInst>(Edge->getCondition());
2828     if (!Cmp || !Cmp->isTrueWhenEqual() || !Cmp->isEquality())
2829       return false;
2830 
2831     ConstantInt *C = dyn_cast<ConstantInt>(Cmp->getOperand(1));
2832     if (!C)
2833       return false;
2834 
2835     // Match: -1 == __kmpc_target_init (for non-SPMD kernels only!)
2836     if (C->isAllOnesValue()) {
2837       auto *CB = dyn_cast<CallBase>(Cmp->getOperand(0));
2838       CB = CB ? OpenMPOpt::getCallIfRegularCall(*CB, &RFI) : nullptr;
2839       if (!CB)
2840         return false;
2841       const int InitModeArgNo = 1;
2842       auto *ModeCI = dyn_cast<ConstantInt>(CB->getOperand(InitModeArgNo));
2843       return ModeCI && (ModeCI->getSExtValue() & OMP_TGT_EXEC_MODE_GENERIC);
2844     }
2845 
2846     if (C->isZero()) {
2847       // Match: 0 == llvm.nvvm.read.ptx.sreg.tid.x()
2848       if (auto *II = dyn_cast<IntrinsicInst>(Cmp->getOperand(0)))
2849         if (II->getIntrinsicID() == Intrinsic::nvvm_read_ptx_sreg_tid_x)
2850           return true;
2851 
2852       // Match: 0 == llvm.amdgcn.workitem.id.x()
2853       if (auto *II = dyn_cast<IntrinsicInst>(Cmp->getOperand(0)))
2854         if (II->getIntrinsicID() == Intrinsic::amdgcn_workitem_id_x)
2855           return true;
2856     }
2857 
2858     return false;
2859   };
2860 
2861   // Merge all the predecessor states into the current basic block. A basic
2862   // block is executed by a single thread if all of its predecessors are.
2863   auto MergePredecessorStates = [&](BasicBlock *BB) {
2864     if (pred_empty(BB))
2865       return SingleThreadedBBs.contains(BB);
2866 
2867     bool IsInitialThread = true;
2868     for (BasicBlock *PredBB : predecessors(BB)) {
2869       if (!IsInitialThreadOnly(dyn_cast<BranchInst>(PredBB->getTerminator()),
2870                                BB))
2871         IsInitialThread &= SingleThreadedBBs.contains(PredBB);
2872     }
2873 
2874     return IsInitialThread;
2875   };
2876 
2877   for (auto *BB : RPOT) {
2878     if (!MergePredecessorStates(BB))
2879       SingleThreadedBBs.remove(BB);
2880   }
2881 
2882   return (NumSingleThreadedBBs == SingleThreadedBBs.size())
2883              ? ChangeStatus::UNCHANGED
2884              : ChangeStatus::CHANGED;
2885 }
2886 
2887 /// Try to replace memory allocation calls called by a single thread with a
2888 /// static buffer of shared memory.
2889 struct AAHeapToShared : public StateWrapper<BooleanState, AbstractAttribute> {
2890   using Base = StateWrapper<BooleanState, AbstractAttribute>;
2891   AAHeapToShared(const IRPosition &IRP, Attributor &A) : Base(IRP) {}
2892 
2893   /// Create an abstract attribute view for the position \p IRP.
2894   static AAHeapToShared &createForPosition(const IRPosition &IRP,
2895                                            Attributor &A);
2896 
2897   /// Returns true if HeapToShared conversion is assumed to be possible.
2898   virtual bool isAssumedHeapToShared(CallBase &CB) const = 0;
2899 
2900   /// Returns true if HeapToShared conversion is assumed and the CB is a
2901   /// callsite to a free operation to be removed.
2902   virtual bool isAssumedHeapToSharedRemovedFree(CallBase &CB) const = 0;
2903 
2904   /// See AbstractAttribute::getName().
2905   const std::string getName() const override { return "AAHeapToShared"; }
2906 
2907   /// See AbstractAttribute::getIdAddr().
2908   const char *getIdAddr() const override { return &ID; }
2909 
2910   /// This function should return true if the type of the \p AA is
2911   /// AAHeapToShared.
2912   static bool classof(const AbstractAttribute *AA) {
2913     return (AA->getIdAddr() == &ID);
2914   }
2915 
2916   /// Unique ID (due to the unique address)
2917   static const char ID;
2918 };
2919 
2920 struct AAHeapToSharedFunction : public AAHeapToShared {
2921   AAHeapToSharedFunction(const IRPosition &IRP, Attributor &A)
2922       : AAHeapToShared(IRP, A) {}
2923 
2924   const std::string getAsStr() const override {
2925     return "[AAHeapToShared] " + std::to_string(MallocCalls.size()) +
2926            " malloc calls eligible.";
2927   }
2928 
2929   /// See AbstractAttribute::trackStatistics().
2930   void trackStatistics() const override {}
2931 
2932   /// This functions finds free calls that will be removed by the
2933   /// HeapToShared transformation.
2934   void findPotentialRemovedFreeCalls(Attributor &A) {
2935     auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache());
2936     auto &FreeRFI = OMPInfoCache.RFIs[OMPRTL___kmpc_free_shared];
2937 
2938     PotentialRemovedFreeCalls.clear();
2939     // Update free call users of found malloc calls.
2940     for (CallBase *CB : MallocCalls) {
2941       SmallVector<CallBase *, 4> FreeCalls;
2942       for (auto *U : CB->users()) {
2943         CallBase *C = dyn_cast<CallBase>(U);
2944         if (C && C->getCalledFunction() == FreeRFI.Declaration)
2945           FreeCalls.push_back(C);
2946       }
2947 
2948       if (FreeCalls.size() != 1)
2949         continue;
2950 
2951       PotentialRemovedFreeCalls.insert(FreeCalls.front());
2952     }
2953   }
2954 
2955   void initialize(Attributor &A) override {
2956     auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache());
2957     auto &RFI = OMPInfoCache.RFIs[OMPRTL___kmpc_alloc_shared];
2958 
2959     for (User *U : RFI.Declaration->users())
2960       if (CallBase *CB = dyn_cast<CallBase>(U))
2961         MallocCalls.insert(CB);
2962 
2963     findPotentialRemovedFreeCalls(A);
2964   }
2965 
2966   bool isAssumedHeapToShared(CallBase &CB) const override {
2967     return isValidState() && MallocCalls.count(&CB);
2968   }
2969 
2970   bool isAssumedHeapToSharedRemovedFree(CallBase &CB) const override {
2971     return isValidState() && PotentialRemovedFreeCalls.count(&CB);
2972   }
2973 
2974   ChangeStatus manifest(Attributor &A) override {
2975     if (MallocCalls.empty())
2976       return ChangeStatus::UNCHANGED;
2977 
2978     auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache());
2979     auto &FreeCall = OMPInfoCache.RFIs[OMPRTL___kmpc_free_shared];
2980 
2981     Function *F = getAnchorScope();
2982     auto *HS = A.lookupAAFor<AAHeapToStack>(IRPosition::function(*F), this,
2983                                             DepClassTy::OPTIONAL);
2984 
2985     ChangeStatus Changed = ChangeStatus::UNCHANGED;
2986     for (CallBase *CB : MallocCalls) {
2987       // Skip replacing this if HeapToStack has already claimed it.
2988       if (HS && HS->isAssumedHeapToStack(*CB))
2989         continue;
2990 
2991       // Find the unique free call to remove it.
2992       SmallVector<CallBase *, 4> FreeCalls;
2993       for (auto *U : CB->users()) {
2994         CallBase *C = dyn_cast<CallBase>(U);
2995         if (C && C->getCalledFunction() == FreeCall.Declaration)
2996           FreeCalls.push_back(C);
2997       }
2998       if (FreeCalls.size() != 1)
2999         continue;
3000 
3001       auto *AllocSize = cast<ConstantInt>(CB->getArgOperand(0));
3002 
3003       LLVM_DEBUG(dbgs() << TAG << "Replace globalization call " << *CB
3004                         << " with " << AllocSize->getZExtValue()
3005                         << " bytes of shared memory\n");
3006 
3007       // Create a new shared memory buffer of the same size as the allocation
3008       // and replace all the uses of the original allocation with it.
3009       Module *M = CB->getModule();
3010       Type *Int8Ty = Type::getInt8Ty(M->getContext());
3011       Type *Int8ArrTy = ArrayType::get(Int8Ty, AllocSize->getZExtValue());
3012       auto *SharedMem = new GlobalVariable(
3013           *M, Int8ArrTy, /* IsConstant */ false, GlobalValue::InternalLinkage,
3014           UndefValue::get(Int8ArrTy), CB->getName() + "_shared", nullptr,
3015           GlobalValue::NotThreadLocal,
3016           static_cast<unsigned>(AddressSpace::Shared));
3017       auto *NewBuffer =
3018           ConstantExpr::getPointerCast(SharedMem, Int8Ty->getPointerTo());
3019 
3020       auto Remark = [&](OptimizationRemark OR) {
3021         return OR << "Replaced globalized variable with "
3022                   << ore::NV("SharedMemory", AllocSize->getZExtValue())
3023                   << ((AllocSize->getZExtValue() != 1) ? " bytes " : " byte ")
3024                   << "of shared memory.";
3025       };
3026       A.emitRemark<OptimizationRemark>(CB, "OMP111", Remark);
3027 
3028       MaybeAlign Alignment = CB->getRetAlign();
3029       assert(Alignment &&
3030              "HeapToShared on allocation without alignment attribute");
3031       SharedMem->setAlignment(MaybeAlign(Alignment));
3032 
3033       A.changeValueAfterManifest(*CB, *NewBuffer);
3034       A.deleteAfterManifest(*CB);
3035       A.deleteAfterManifest(*FreeCalls.front());
3036 
3037       NumBytesMovedToSharedMemory += AllocSize->getZExtValue();
3038       Changed = ChangeStatus::CHANGED;
3039     }
3040 
3041     return Changed;
3042   }
3043 
3044   ChangeStatus updateImpl(Attributor &A) override {
3045     auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache());
3046     auto &RFI = OMPInfoCache.RFIs[OMPRTL___kmpc_alloc_shared];
3047     Function *F = getAnchorScope();
3048 
3049     auto NumMallocCalls = MallocCalls.size();
3050 
3051     // Only consider malloc calls executed by a single thread with a constant.
3052     for (User *U : RFI.Declaration->users()) {
3053       const auto &ED = A.getAAFor<AAExecutionDomain>(
3054           *this, IRPosition::function(*F), DepClassTy::REQUIRED);
3055       if (CallBase *CB = dyn_cast<CallBase>(U))
3056         if (!isa<ConstantInt>(CB->getArgOperand(0)) ||
3057             !ED.isExecutedByInitialThreadOnly(*CB))
3058           MallocCalls.remove(CB);
3059     }
3060 
3061     findPotentialRemovedFreeCalls(A);
3062 
3063     if (NumMallocCalls != MallocCalls.size())
3064       return ChangeStatus::CHANGED;
3065 
3066     return ChangeStatus::UNCHANGED;
3067   }
3068 
3069   /// Collection of all malloc calls in a function.
3070   SmallSetVector<CallBase *, 4> MallocCalls;
3071   /// Collection of potentially removed free calls in a function.
3072   SmallPtrSet<CallBase *, 4> PotentialRemovedFreeCalls;
3073 };
3074 
3075 struct AAKernelInfo : public StateWrapper<KernelInfoState, AbstractAttribute> {
3076   using Base = StateWrapper<KernelInfoState, AbstractAttribute>;
3077   AAKernelInfo(const IRPosition &IRP, Attributor &A) : Base(IRP) {}
3078 
3079   /// Statistics are tracked as part of manifest for now.
3080   void trackStatistics() const override {}
3081 
3082   /// See AbstractAttribute::getAsStr()
3083   const std::string getAsStr() const override {
3084     if (!isValidState())
3085       return "<invalid>";
3086     return std::string(SPMDCompatibilityTracker.isAssumed() ? "SPMD"
3087                                                             : "generic") +
3088            std::string(SPMDCompatibilityTracker.isAtFixpoint() ? " [FIX]"
3089                                                                : "") +
3090            std::string(" #PRs: ") +
3091            (ReachedKnownParallelRegions.isValidState()
3092                 ? std::to_string(ReachedKnownParallelRegions.size())
3093                 : "<invalid>") +
3094            ", #Unknown PRs: " +
3095            (ReachedUnknownParallelRegions.isValidState()
3096                 ? std::to_string(ReachedUnknownParallelRegions.size())
3097                 : "<invalid>") +
3098            ", #Reaching Kernels: " +
3099            (ReachingKernelEntries.isValidState()
3100                 ? std::to_string(ReachingKernelEntries.size())
3101                 : "<invalid>");
3102   }
3103 
3104   /// Create an abstract attribute biew for the position \p IRP.
3105   static AAKernelInfo &createForPosition(const IRPosition &IRP, Attributor &A);
3106 
3107   /// See AbstractAttribute::getName()
3108   const std::string getName() const override { return "AAKernelInfo"; }
3109 
3110   /// See AbstractAttribute::getIdAddr()
3111   const char *getIdAddr() const override { return &ID; }
3112 
3113   /// This function should return true if the type of the \p AA is AAKernelInfo
3114   static bool classof(const AbstractAttribute *AA) {
3115     return (AA->getIdAddr() == &ID);
3116   }
3117 
3118   static const char ID;
3119 };
3120 
3121 /// The function kernel info abstract attribute, basically, what can we say
3122 /// about a function with regards to the KernelInfoState.
3123 struct AAKernelInfoFunction : AAKernelInfo {
3124   AAKernelInfoFunction(const IRPosition &IRP, Attributor &A)
3125       : AAKernelInfo(IRP, A) {}
3126 
3127   SmallPtrSet<Instruction *, 4> GuardedInstructions;
3128 
3129   SmallPtrSetImpl<Instruction *> &getGuardedInstructions() {
3130     return GuardedInstructions;
3131   }
3132 
3133   /// See AbstractAttribute::initialize(...).
3134   void initialize(Attributor &A) override {
3135     // This is a high-level transform that might change the constant arguments
3136     // of the init and dinit calls. We need to tell the Attributor about this
3137     // to avoid other parts using the current constant value for simpliication.
3138     auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache());
3139 
3140     Function *Fn = getAnchorScope();
3141     if (!OMPInfoCache.Kernels.count(Fn))
3142       return;
3143 
3144     // Add itself to the reaching kernel and set IsKernelEntry.
3145     ReachingKernelEntries.insert(Fn);
3146     IsKernelEntry = true;
3147 
3148     OMPInformationCache::RuntimeFunctionInfo &InitRFI =
3149         OMPInfoCache.RFIs[OMPRTL___kmpc_target_init];
3150     OMPInformationCache::RuntimeFunctionInfo &DeinitRFI =
3151         OMPInfoCache.RFIs[OMPRTL___kmpc_target_deinit];
3152 
3153     // For kernels we perform more initialization work, first we find the init
3154     // and deinit calls.
3155     auto StoreCallBase = [](Use &U,
3156                             OMPInformationCache::RuntimeFunctionInfo &RFI,
3157                             CallBase *&Storage) {
3158       CallBase *CB = OpenMPOpt::getCallIfRegularCall(U, &RFI);
3159       assert(CB &&
3160              "Unexpected use of __kmpc_target_init or __kmpc_target_deinit!");
3161       assert(!Storage &&
3162              "Multiple uses of __kmpc_target_init or __kmpc_target_deinit!");
3163       Storage = CB;
3164       return false;
3165     };
3166     InitRFI.foreachUse(
3167         [&](Use &U, Function &) {
3168           StoreCallBase(U, InitRFI, KernelInitCB);
3169           return false;
3170         },
3171         Fn);
3172     DeinitRFI.foreachUse(
3173         [&](Use &U, Function &) {
3174           StoreCallBase(U, DeinitRFI, KernelDeinitCB);
3175           return false;
3176         },
3177         Fn);
3178 
3179     // Ignore kernels without initializers such as global constructors.
3180     if (!KernelInitCB || !KernelDeinitCB) {
3181       indicateOptimisticFixpoint();
3182       return;
3183     }
3184 
3185     // For kernels we might need to initialize/finalize the IsSPMD state and
3186     // we need to register a simplification callback so that the Attributor
3187     // knows the constant arguments to __kmpc_target_init and
3188     // __kmpc_target_deinit might actually change.
3189 
3190     Attributor::SimplifictionCallbackTy StateMachineSimplifyCB =
3191         [&](const IRPosition &IRP, const AbstractAttribute *AA,
3192             bool &UsedAssumedInformation) -> Optional<Value *> {
3193       // IRP represents the "use generic state machine" argument of an
3194       // __kmpc_target_init call. We will answer this one with the internal
3195       // state. As long as we are not in an invalid state, we will create a
3196       // custom state machine so the value should be a `i1 false`. If we are
3197       // in an invalid state, we won't change the value that is in the IR.
3198       if (!ReachedKnownParallelRegions.isValidState())
3199         return nullptr;
3200       // If we have disabled state machine rewrites, don't make a custom one.
3201       if (DisableOpenMPOptStateMachineRewrite)
3202         return nullptr;
3203       if (AA)
3204         A.recordDependence(*this, *AA, DepClassTy::OPTIONAL);
3205       UsedAssumedInformation = !isAtFixpoint();
3206       auto *FalseVal =
3207           ConstantInt::getBool(IRP.getAnchorValue().getContext(), false);
3208       return FalseVal;
3209     };
3210 
3211     Attributor::SimplifictionCallbackTy ModeSimplifyCB =
3212         [&](const IRPosition &IRP, const AbstractAttribute *AA,
3213             bool &UsedAssumedInformation) -> Optional<Value *> {
3214       // IRP represents the "SPMDCompatibilityTracker" argument of an
3215       // __kmpc_target_init or
3216       // __kmpc_target_deinit call. We will answer this one with the internal
3217       // state.
3218       if (!SPMDCompatibilityTracker.isValidState())
3219         return nullptr;
3220       if (!SPMDCompatibilityTracker.isAtFixpoint()) {
3221         if (AA)
3222           A.recordDependence(*this, *AA, DepClassTy::OPTIONAL);
3223         UsedAssumedInformation = true;
3224       } else {
3225         UsedAssumedInformation = false;
3226       }
3227       auto *Val = ConstantInt::getSigned(
3228           IntegerType::getInt8Ty(IRP.getAnchorValue().getContext()),
3229           SPMDCompatibilityTracker.isAssumed() ? OMP_TGT_EXEC_MODE_SPMD
3230                                                : OMP_TGT_EXEC_MODE_GENERIC);
3231       return Val;
3232     };
3233 
3234     Attributor::SimplifictionCallbackTy IsGenericModeSimplifyCB =
3235         [&](const IRPosition &IRP, const AbstractAttribute *AA,
3236             bool &UsedAssumedInformation) -> Optional<Value *> {
3237       // IRP represents the "RequiresFullRuntime" argument of an
3238       // __kmpc_target_init or __kmpc_target_deinit call. We will answer this
3239       // one with the internal state of the SPMDCompatibilityTracker, so if
3240       // generic then true, if SPMD then false.
3241       if (!SPMDCompatibilityTracker.isValidState())
3242         return nullptr;
3243       if (!SPMDCompatibilityTracker.isAtFixpoint()) {
3244         if (AA)
3245           A.recordDependence(*this, *AA, DepClassTy::OPTIONAL);
3246         UsedAssumedInformation = true;
3247       } else {
3248         UsedAssumedInformation = false;
3249       }
3250       auto *Val = ConstantInt::getBool(IRP.getAnchorValue().getContext(),
3251                                        !SPMDCompatibilityTracker.isAssumed());
3252       return Val;
3253     };
3254 
3255     constexpr const int InitModeArgNo = 1;
3256     constexpr const int DeinitModeArgNo = 1;
3257     constexpr const int InitUseStateMachineArgNo = 2;
3258     constexpr const int InitRequiresFullRuntimeArgNo = 3;
3259     constexpr const int DeinitRequiresFullRuntimeArgNo = 2;
3260     A.registerSimplificationCallback(
3261         IRPosition::callsite_argument(*KernelInitCB, InitUseStateMachineArgNo),
3262         StateMachineSimplifyCB);
3263     A.registerSimplificationCallback(
3264         IRPosition::callsite_argument(*KernelInitCB, InitModeArgNo),
3265         ModeSimplifyCB);
3266     A.registerSimplificationCallback(
3267         IRPosition::callsite_argument(*KernelDeinitCB, DeinitModeArgNo),
3268         ModeSimplifyCB);
3269     A.registerSimplificationCallback(
3270         IRPosition::callsite_argument(*KernelInitCB,
3271                                       InitRequiresFullRuntimeArgNo),
3272         IsGenericModeSimplifyCB);
3273     A.registerSimplificationCallback(
3274         IRPosition::callsite_argument(*KernelDeinitCB,
3275                                       DeinitRequiresFullRuntimeArgNo),
3276         IsGenericModeSimplifyCB);
3277 
3278     // Check if we know we are in SPMD-mode already.
3279     ConstantInt *ModeArg =
3280         dyn_cast<ConstantInt>(KernelInitCB->getArgOperand(InitModeArgNo));
3281     if (ModeArg && (ModeArg->getSExtValue() & OMP_TGT_EXEC_MODE_SPMD))
3282       SPMDCompatibilityTracker.indicateOptimisticFixpoint();
3283     // This is a generic region but SPMDization is disabled so stop tracking.
3284     else if (DisableOpenMPOptSPMDization)
3285       SPMDCompatibilityTracker.indicatePessimisticFixpoint();
3286   }
3287 
3288   /// Sanitize the string \p S such that it is a suitable global symbol name.
3289   static std::string sanitizeForGlobalName(std::string S) {
3290     std::replace_if(
3291         S.begin(), S.end(),
3292         [](const char C) {
3293           return !((C >= 'a' && C <= 'z') || (C >= 'A' && C <= 'Z') ||
3294                    (C >= '0' && C <= '9') || C == '_');
3295         },
3296         '.');
3297     return S;
3298   }
3299 
3300   /// Modify the IR based on the KernelInfoState as the fixpoint iteration is
3301   /// finished now.
3302   ChangeStatus manifest(Attributor &A) override {
3303     // If we are not looking at a kernel with __kmpc_target_init and
3304     // __kmpc_target_deinit call we cannot actually manifest the information.
3305     if (!KernelInitCB || !KernelDeinitCB)
3306       return ChangeStatus::UNCHANGED;
3307 
3308     // If we can we change the execution mode to SPMD-mode otherwise we build a
3309     // custom state machine.
3310     ChangeStatus Changed = ChangeStatus::UNCHANGED;
3311     if (!changeToSPMDMode(A, Changed))
3312       return buildCustomStateMachine(A);
3313 
3314     return Changed;
3315   }
3316 
3317   bool changeToSPMDMode(Attributor &A, ChangeStatus &Changed) {
3318     auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache());
3319 
3320     if (!SPMDCompatibilityTracker.isAssumed()) {
3321       for (Instruction *NonCompatibleI : SPMDCompatibilityTracker) {
3322         if (!NonCompatibleI)
3323           continue;
3324 
3325         // Skip diagnostics on calls to known OpenMP runtime functions for now.
3326         if (auto *CB = dyn_cast<CallBase>(NonCompatibleI))
3327           if (OMPInfoCache.RTLFunctions.contains(CB->getCalledFunction()))
3328             continue;
3329 
3330         auto Remark = [&](OptimizationRemarkAnalysis ORA) {
3331           ORA << "Value has potential side effects preventing SPMD-mode "
3332                  "execution";
3333           if (isa<CallBase>(NonCompatibleI)) {
3334             ORA << ". Add `__attribute__((assume(\"ompx_spmd_amenable\")))` to "
3335                    "the called function to override";
3336           }
3337           return ORA << ".";
3338         };
3339         A.emitRemark<OptimizationRemarkAnalysis>(NonCompatibleI, "OMP121",
3340                                                  Remark);
3341 
3342         LLVM_DEBUG(dbgs() << TAG << "SPMD-incompatible side-effect: "
3343                           << *NonCompatibleI << "\n");
3344       }
3345 
3346       return false;
3347     }
3348 
3349     // Check if the kernel is already in SPMD mode, if so, return success.
3350     Function *Kernel = getAnchorScope();
3351     GlobalVariable *ExecMode = Kernel->getParent()->getGlobalVariable(
3352         (Kernel->getName() + "_exec_mode").str());
3353     assert(ExecMode && "Kernel without exec mode?");
3354     assert(ExecMode->getInitializer() && "ExecMode doesn't have initializer!");
3355 
3356     // Set the global exec mode flag to indicate SPMD-Generic mode.
3357     assert(isa<ConstantInt>(ExecMode->getInitializer()) &&
3358            "ExecMode is not an integer!");
3359     const int8_t ExecModeVal =
3360         cast<ConstantInt>(ExecMode->getInitializer())->getSExtValue();
3361     if (ExecModeVal != OMP_TGT_EXEC_MODE_GENERIC)
3362       return true;
3363 
3364     // We will now unconditionally modify the IR, indicate a change.
3365     Changed = ChangeStatus::CHANGED;
3366 
3367     auto CreateGuardedRegion = [&](Instruction *RegionStartI,
3368                                    Instruction *RegionEndI) {
3369       LoopInfo *LI = nullptr;
3370       DominatorTree *DT = nullptr;
3371       MemorySSAUpdater *MSU = nullptr;
3372       using InsertPointTy = OpenMPIRBuilder::InsertPointTy;
3373 
3374       BasicBlock *ParentBB = RegionStartI->getParent();
3375       Function *Fn = ParentBB->getParent();
3376       Module &M = *Fn->getParent();
3377 
3378       // Create all the blocks and logic.
3379       // ParentBB:
3380       //    goto RegionCheckTidBB
3381       // RegionCheckTidBB:
3382       //    Tid = __kmpc_hardware_thread_id()
3383       //    if (Tid != 0)
3384       //        goto RegionBarrierBB
3385       // RegionStartBB:
3386       //    <execute instructions guarded>
3387       //    goto RegionEndBB
3388       // RegionEndBB:
3389       //    <store escaping values to shared mem>
3390       //    goto RegionBarrierBB
3391       //  RegionBarrierBB:
3392       //    __kmpc_simple_barrier_spmd()
3393       //    // second barrier is omitted if lacking escaping values.
3394       //    <load escaping values from shared mem>
3395       //    __kmpc_simple_barrier_spmd()
3396       //    goto RegionExitBB
3397       // RegionExitBB:
3398       //    <execute rest of instructions>
3399 
3400       BasicBlock *RegionEndBB = SplitBlock(ParentBB, RegionEndI->getNextNode(),
3401                                            DT, LI, MSU, "region.guarded.end");
3402       BasicBlock *RegionBarrierBB =
3403           SplitBlock(RegionEndBB, &*RegionEndBB->getFirstInsertionPt(), DT, LI,
3404                      MSU, "region.barrier");
3405       BasicBlock *RegionExitBB =
3406           SplitBlock(RegionBarrierBB, &*RegionBarrierBB->getFirstInsertionPt(),
3407                      DT, LI, MSU, "region.exit");
3408       BasicBlock *RegionStartBB =
3409           SplitBlock(ParentBB, RegionStartI, DT, LI, MSU, "region.guarded");
3410 
3411       assert(ParentBB->getUniqueSuccessor() == RegionStartBB &&
3412              "Expected a different CFG");
3413 
3414       BasicBlock *RegionCheckTidBB = SplitBlock(
3415           ParentBB, ParentBB->getTerminator(), DT, LI, MSU, "region.check.tid");
3416 
3417       // Register basic blocks with the Attributor.
3418       A.registerManifestAddedBasicBlock(*RegionEndBB);
3419       A.registerManifestAddedBasicBlock(*RegionBarrierBB);
3420       A.registerManifestAddedBasicBlock(*RegionExitBB);
3421       A.registerManifestAddedBasicBlock(*RegionStartBB);
3422       A.registerManifestAddedBasicBlock(*RegionCheckTidBB);
3423 
3424       bool HasBroadcastValues = false;
3425       // Find escaping outputs from the guarded region to outside users and
3426       // broadcast their values to them.
3427       for (Instruction &I : *RegionStartBB) {
3428         SmallPtrSet<Instruction *, 4> OutsideUsers;
3429         for (User *Usr : I.users()) {
3430           Instruction &UsrI = *cast<Instruction>(Usr);
3431           if (UsrI.getParent() != RegionStartBB)
3432             OutsideUsers.insert(&UsrI);
3433         }
3434 
3435         if (OutsideUsers.empty())
3436           continue;
3437 
3438         HasBroadcastValues = true;
3439 
3440         // Emit a global variable in shared memory to store the broadcasted
3441         // value.
3442         auto *SharedMem = new GlobalVariable(
3443             M, I.getType(), /* IsConstant */ false,
3444             GlobalValue::InternalLinkage, UndefValue::get(I.getType()),
3445             sanitizeForGlobalName(
3446                 (I.getName() + ".guarded.output.alloc").str()),
3447             nullptr, GlobalValue::NotThreadLocal,
3448             static_cast<unsigned>(AddressSpace::Shared));
3449 
3450         // Emit a store instruction to update the value.
3451         new StoreInst(&I, SharedMem, RegionEndBB->getTerminator());
3452 
3453         LoadInst *LoadI = new LoadInst(I.getType(), SharedMem,
3454                                        I.getName() + ".guarded.output.load",
3455                                        RegionBarrierBB->getTerminator());
3456 
3457         // Emit a load instruction and replace uses of the output value.
3458         for (Instruction *UsrI : OutsideUsers)
3459           UsrI->replaceUsesOfWith(&I, LoadI);
3460       }
3461 
3462       auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache());
3463 
3464       // Go to tid check BB in ParentBB.
3465       const DebugLoc DL = ParentBB->getTerminator()->getDebugLoc();
3466       ParentBB->getTerminator()->eraseFromParent();
3467       OpenMPIRBuilder::LocationDescription Loc(
3468           InsertPointTy(ParentBB, ParentBB->end()), DL);
3469       OMPInfoCache.OMPBuilder.updateToLocation(Loc);
3470       uint32_t SrcLocStrSize;
3471       auto *SrcLocStr =
3472           OMPInfoCache.OMPBuilder.getOrCreateSrcLocStr(Loc, SrcLocStrSize);
3473       Value *Ident =
3474           OMPInfoCache.OMPBuilder.getOrCreateIdent(SrcLocStr, SrcLocStrSize);
3475       BranchInst::Create(RegionCheckTidBB, ParentBB)->setDebugLoc(DL);
3476 
3477       // Add check for Tid in RegionCheckTidBB
3478       RegionCheckTidBB->getTerminator()->eraseFromParent();
3479       OpenMPIRBuilder::LocationDescription LocRegionCheckTid(
3480           InsertPointTy(RegionCheckTidBB, RegionCheckTidBB->end()), DL);
3481       OMPInfoCache.OMPBuilder.updateToLocation(LocRegionCheckTid);
3482       FunctionCallee HardwareTidFn =
3483           OMPInfoCache.OMPBuilder.getOrCreateRuntimeFunction(
3484               M, OMPRTL___kmpc_get_hardware_thread_id_in_block);
3485       CallInst *Tid =
3486           OMPInfoCache.OMPBuilder.Builder.CreateCall(HardwareTidFn, {});
3487       Tid->setDebugLoc(DL);
3488       OMPInfoCache.setCallingConvention(HardwareTidFn, Tid);
3489       Value *TidCheck = OMPInfoCache.OMPBuilder.Builder.CreateIsNull(Tid);
3490       OMPInfoCache.OMPBuilder.Builder
3491           .CreateCondBr(TidCheck, RegionStartBB, RegionBarrierBB)
3492           ->setDebugLoc(DL);
3493 
3494       // First barrier for synchronization, ensures main thread has updated
3495       // values.
3496       FunctionCallee BarrierFn =
3497           OMPInfoCache.OMPBuilder.getOrCreateRuntimeFunction(
3498               M, OMPRTL___kmpc_barrier_simple_spmd);
3499       OMPInfoCache.OMPBuilder.updateToLocation(InsertPointTy(
3500           RegionBarrierBB, RegionBarrierBB->getFirstInsertionPt()));
3501       CallInst *Barrier =
3502           OMPInfoCache.OMPBuilder.Builder.CreateCall(BarrierFn, {Ident, Tid});
3503       Barrier->setDebugLoc(DL);
3504       OMPInfoCache.setCallingConvention(BarrierFn, Barrier);
3505 
3506       // Second barrier ensures workers have read broadcast values.
3507       if (HasBroadcastValues) {
3508         CallInst *Barrier = CallInst::Create(BarrierFn, {Ident, Tid}, "",
3509                                              RegionBarrierBB->getTerminator());
3510         Barrier->setDebugLoc(DL);
3511         OMPInfoCache.setCallingConvention(BarrierFn, Barrier);
3512       }
3513     };
3514 
3515     auto &AllocSharedRFI = OMPInfoCache.RFIs[OMPRTL___kmpc_alloc_shared];
3516     SmallPtrSet<BasicBlock *, 8> Visited;
3517     for (Instruction *GuardedI : SPMDCompatibilityTracker) {
3518       BasicBlock *BB = GuardedI->getParent();
3519       if (!Visited.insert(BB).second)
3520         continue;
3521 
3522       SmallVector<std::pair<Instruction *, Instruction *>> Reorders;
3523       Instruction *LastEffect = nullptr;
3524       BasicBlock::reverse_iterator IP = BB->rbegin(), IPEnd = BB->rend();
3525       while (++IP != IPEnd) {
3526         if (!IP->mayHaveSideEffects() && !IP->mayReadFromMemory())
3527           continue;
3528         Instruction *I = &*IP;
3529         if (OpenMPOpt::getCallIfRegularCall(*I, &AllocSharedRFI))
3530           continue;
3531         if (!I->user_empty() || !SPMDCompatibilityTracker.contains(I)) {
3532           LastEffect = nullptr;
3533           continue;
3534         }
3535         if (LastEffect)
3536           Reorders.push_back({I, LastEffect});
3537         LastEffect = &*IP;
3538       }
3539       for (auto &Reorder : Reorders)
3540         Reorder.first->moveBefore(Reorder.second);
3541     }
3542 
3543     SmallVector<std::pair<Instruction *, Instruction *>, 4> GuardedRegions;
3544 
3545     for (Instruction *GuardedI : SPMDCompatibilityTracker) {
3546       BasicBlock *BB = GuardedI->getParent();
3547       auto *CalleeAA = A.lookupAAFor<AAKernelInfo>(
3548           IRPosition::function(*GuardedI->getFunction()), nullptr,
3549           DepClassTy::NONE);
3550       assert(CalleeAA != nullptr && "Expected Callee AAKernelInfo");
3551       auto &CalleeAAFunction = *cast<AAKernelInfoFunction>(CalleeAA);
3552       // Continue if instruction is already guarded.
3553       if (CalleeAAFunction.getGuardedInstructions().contains(GuardedI))
3554         continue;
3555 
3556       Instruction *GuardedRegionStart = nullptr, *GuardedRegionEnd = nullptr;
3557       for (Instruction &I : *BB) {
3558         // If instruction I needs to be guarded update the guarded region
3559         // bounds.
3560         if (SPMDCompatibilityTracker.contains(&I)) {
3561           CalleeAAFunction.getGuardedInstructions().insert(&I);
3562           if (GuardedRegionStart)
3563             GuardedRegionEnd = &I;
3564           else
3565             GuardedRegionStart = GuardedRegionEnd = &I;
3566 
3567           continue;
3568         }
3569 
3570         // Instruction I does not need guarding, store
3571         // any region found and reset bounds.
3572         if (GuardedRegionStart) {
3573           GuardedRegions.push_back(
3574               std::make_pair(GuardedRegionStart, GuardedRegionEnd));
3575           GuardedRegionStart = nullptr;
3576           GuardedRegionEnd = nullptr;
3577         }
3578       }
3579     }
3580 
3581     for (auto &GR : GuardedRegions)
3582       CreateGuardedRegion(GR.first, GR.second);
3583 
3584     // Adjust the global exec mode flag that tells the runtime what mode this
3585     // kernel is executed in.
3586     assert(ExecModeVal == OMP_TGT_EXEC_MODE_GENERIC &&
3587            "Initially non-SPMD kernel has SPMD exec mode!");
3588     ExecMode->setInitializer(
3589         ConstantInt::get(ExecMode->getInitializer()->getType(),
3590                          ExecModeVal | OMP_TGT_EXEC_MODE_GENERIC_SPMD));
3591 
3592     // Next rewrite the init and deinit calls to indicate we use SPMD-mode now.
3593     const int InitModeArgNo = 1;
3594     const int DeinitModeArgNo = 1;
3595     const int InitUseStateMachineArgNo = 2;
3596     const int InitRequiresFullRuntimeArgNo = 3;
3597     const int DeinitRequiresFullRuntimeArgNo = 2;
3598 
3599     auto &Ctx = getAnchorValue().getContext();
3600     A.changeUseAfterManifest(
3601         KernelInitCB->getArgOperandUse(InitModeArgNo),
3602         *ConstantInt::getSigned(IntegerType::getInt8Ty(Ctx),
3603                                 OMP_TGT_EXEC_MODE_SPMD));
3604     A.changeUseAfterManifest(
3605         KernelInitCB->getArgOperandUse(InitUseStateMachineArgNo),
3606         *ConstantInt::getBool(Ctx, false));
3607     A.changeUseAfterManifest(
3608         KernelDeinitCB->getArgOperandUse(DeinitModeArgNo),
3609         *ConstantInt::getSigned(IntegerType::getInt8Ty(Ctx),
3610                                 OMP_TGT_EXEC_MODE_SPMD));
3611     A.changeUseAfterManifest(
3612         KernelInitCB->getArgOperandUse(InitRequiresFullRuntimeArgNo),
3613         *ConstantInt::getBool(Ctx, false));
3614     A.changeUseAfterManifest(
3615         KernelDeinitCB->getArgOperandUse(DeinitRequiresFullRuntimeArgNo),
3616         *ConstantInt::getBool(Ctx, false));
3617 
3618     ++NumOpenMPTargetRegionKernelsSPMD;
3619 
3620     auto Remark = [&](OptimizationRemark OR) {
3621       return OR << "Transformed generic-mode kernel to SPMD-mode.";
3622     };
3623     A.emitRemark<OptimizationRemark>(KernelInitCB, "OMP120", Remark);
3624     return true;
3625   };
3626 
3627   ChangeStatus buildCustomStateMachine(Attributor &A) {
3628     // If we have disabled state machine rewrites, don't make a custom one
3629     if (DisableOpenMPOptStateMachineRewrite)
3630       return ChangeStatus::UNCHANGED;
3631 
3632     // Don't rewrite the state machine if we are not in a valid state.
3633     if (!ReachedKnownParallelRegions.isValidState())
3634       return ChangeStatus::UNCHANGED;
3635 
3636     const int InitModeArgNo = 1;
3637     const int InitUseStateMachineArgNo = 2;
3638 
3639     // Check if the current configuration is non-SPMD and generic state machine.
3640     // If we already have SPMD mode or a custom state machine we do not need to
3641     // go any further. If it is anything but a constant something is weird and
3642     // we give up.
3643     ConstantInt *UseStateMachine = dyn_cast<ConstantInt>(
3644         KernelInitCB->getArgOperand(InitUseStateMachineArgNo));
3645     ConstantInt *Mode =
3646         dyn_cast<ConstantInt>(KernelInitCB->getArgOperand(InitModeArgNo));
3647 
3648     // If we are stuck with generic mode, try to create a custom device (=GPU)
3649     // state machine which is specialized for the parallel regions that are
3650     // reachable by the kernel.
3651     if (!UseStateMachine || UseStateMachine->isZero() || !Mode ||
3652         (Mode->getSExtValue() & OMP_TGT_EXEC_MODE_SPMD))
3653       return ChangeStatus::UNCHANGED;
3654 
3655     // If not SPMD mode, indicate we use a custom state machine now.
3656     auto &Ctx = getAnchorValue().getContext();
3657     auto *FalseVal = ConstantInt::getBool(Ctx, false);
3658     A.changeUseAfterManifest(
3659         KernelInitCB->getArgOperandUse(InitUseStateMachineArgNo), *FalseVal);
3660 
3661     // If we don't actually need a state machine we are done here. This can
3662     // happen if there simply are no parallel regions. In the resulting kernel
3663     // all worker threads will simply exit right away, leaving the main thread
3664     // to do the work alone.
3665     if (!mayContainParallelRegion()) {
3666       ++NumOpenMPTargetRegionKernelsWithoutStateMachine;
3667 
3668       auto Remark = [&](OptimizationRemark OR) {
3669         return OR << "Removing unused state machine from generic-mode kernel.";
3670       };
3671       A.emitRemark<OptimizationRemark>(KernelInitCB, "OMP130", Remark);
3672 
3673       return ChangeStatus::CHANGED;
3674     }
3675 
3676     // Keep track in the statistics of our new shiny custom state machine.
3677     if (ReachedUnknownParallelRegions.empty()) {
3678       ++NumOpenMPTargetRegionKernelsCustomStateMachineWithoutFallback;
3679 
3680       auto Remark = [&](OptimizationRemark OR) {
3681         return OR << "Rewriting generic-mode kernel with a customized state "
3682                      "machine.";
3683       };
3684       A.emitRemark<OptimizationRemark>(KernelInitCB, "OMP131", Remark);
3685     } else {
3686       ++NumOpenMPTargetRegionKernelsCustomStateMachineWithFallback;
3687 
3688       auto Remark = [&](OptimizationRemarkAnalysis OR) {
3689         return OR << "Generic-mode kernel is executed with a customized state "
3690                      "machine that requires a fallback.";
3691       };
3692       A.emitRemark<OptimizationRemarkAnalysis>(KernelInitCB, "OMP132", Remark);
3693 
3694       // Tell the user why we ended up with a fallback.
3695       for (CallBase *UnknownParallelRegionCB : ReachedUnknownParallelRegions) {
3696         if (!UnknownParallelRegionCB)
3697           continue;
3698         auto Remark = [&](OptimizationRemarkAnalysis ORA) {
3699           return ORA << "Call may contain unknown parallel regions. Use "
3700                      << "`__attribute__((assume(\"omp_no_parallelism\")))` to "
3701                         "override.";
3702         };
3703         A.emitRemark<OptimizationRemarkAnalysis>(UnknownParallelRegionCB,
3704                                                  "OMP133", Remark);
3705       }
3706     }
3707 
3708     // Create all the blocks:
3709     //
3710     //                       InitCB = __kmpc_target_init(...)
3711     //                       BlockHwSize =
3712     //                         __kmpc_get_hardware_num_threads_in_block();
3713     //                       WarpSize = __kmpc_get_warp_size();
3714     //                       BlockSize = BlockHwSize - WarpSize;
3715     // IsWorkerCheckBB:      bool IsWorker = InitCB != -1;
3716     //                       if (IsWorker) {
3717     //                         if (InitCB >= BlockSize) return;
3718     // SMBeginBB:               __kmpc_barrier_simple_generic(...);
3719     //                         void *WorkFn;
3720     //                         bool Active = __kmpc_kernel_parallel(&WorkFn);
3721     //                         if (!WorkFn) return;
3722     // SMIsActiveCheckBB:       if (Active) {
3723     // SMIfCascadeCurrentBB:      if      (WorkFn == <ParFn0>)
3724     //                              ParFn0(...);
3725     // SMIfCascadeCurrentBB:      else if (WorkFn == <ParFn1>)
3726     //                              ParFn1(...);
3727     //                            ...
3728     // SMIfCascadeCurrentBB:      else
3729     //                              ((WorkFnTy*)WorkFn)(...);
3730     // SMEndParallelBB:           __kmpc_kernel_end_parallel(...);
3731     //                          }
3732     // SMDoneBB:                __kmpc_barrier_simple_generic(...);
3733     //                          goto SMBeginBB;
3734     //                       }
3735     // UserCodeEntryBB:      // user code
3736     //                       __kmpc_target_deinit(...)
3737     //
3738     Function *Kernel = getAssociatedFunction();
3739     assert(Kernel && "Expected an associated function!");
3740 
3741     BasicBlock *InitBB = KernelInitCB->getParent();
3742     BasicBlock *UserCodeEntryBB = InitBB->splitBasicBlock(
3743         KernelInitCB->getNextNode(), "thread.user_code.check");
3744     BasicBlock *IsWorkerCheckBB =
3745         BasicBlock::Create(Ctx, "is_worker_check", Kernel, UserCodeEntryBB);
3746     BasicBlock *StateMachineBeginBB = BasicBlock::Create(
3747         Ctx, "worker_state_machine.begin", Kernel, UserCodeEntryBB);
3748     BasicBlock *StateMachineFinishedBB = BasicBlock::Create(
3749         Ctx, "worker_state_machine.finished", Kernel, UserCodeEntryBB);
3750     BasicBlock *StateMachineIsActiveCheckBB = BasicBlock::Create(
3751         Ctx, "worker_state_machine.is_active.check", Kernel, UserCodeEntryBB);
3752     BasicBlock *StateMachineIfCascadeCurrentBB =
3753         BasicBlock::Create(Ctx, "worker_state_machine.parallel_region.check",
3754                            Kernel, UserCodeEntryBB);
3755     BasicBlock *StateMachineEndParallelBB =
3756         BasicBlock::Create(Ctx, "worker_state_machine.parallel_region.end",
3757                            Kernel, UserCodeEntryBB);
3758     BasicBlock *StateMachineDoneBarrierBB = BasicBlock::Create(
3759         Ctx, "worker_state_machine.done.barrier", Kernel, UserCodeEntryBB);
3760     A.registerManifestAddedBasicBlock(*InitBB);
3761     A.registerManifestAddedBasicBlock(*UserCodeEntryBB);
3762     A.registerManifestAddedBasicBlock(*IsWorkerCheckBB);
3763     A.registerManifestAddedBasicBlock(*StateMachineBeginBB);
3764     A.registerManifestAddedBasicBlock(*StateMachineFinishedBB);
3765     A.registerManifestAddedBasicBlock(*StateMachineIsActiveCheckBB);
3766     A.registerManifestAddedBasicBlock(*StateMachineIfCascadeCurrentBB);
3767     A.registerManifestAddedBasicBlock(*StateMachineEndParallelBB);
3768     A.registerManifestAddedBasicBlock(*StateMachineDoneBarrierBB);
3769 
3770     const DebugLoc &DLoc = KernelInitCB->getDebugLoc();
3771     ReturnInst::Create(Ctx, StateMachineFinishedBB)->setDebugLoc(DLoc);
3772     InitBB->getTerminator()->eraseFromParent();
3773 
3774     Instruction *IsWorker =
3775         ICmpInst::Create(ICmpInst::ICmp, llvm::CmpInst::ICMP_NE, KernelInitCB,
3776                          ConstantInt::get(KernelInitCB->getType(), -1),
3777                          "thread.is_worker", InitBB);
3778     IsWorker->setDebugLoc(DLoc);
3779     BranchInst::Create(IsWorkerCheckBB, UserCodeEntryBB, IsWorker, InitBB);
3780 
3781     Module &M = *Kernel->getParent();
3782     auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache());
3783     FunctionCallee BlockHwSizeFn =
3784         OMPInfoCache.OMPBuilder.getOrCreateRuntimeFunction(
3785             M, OMPRTL___kmpc_get_hardware_num_threads_in_block);
3786     FunctionCallee WarpSizeFn =
3787         OMPInfoCache.OMPBuilder.getOrCreateRuntimeFunction(
3788             M, OMPRTL___kmpc_get_warp_size);
3789     CallInst *BlockHwSize =
3790         CallInst::Create(BlockHwSizeFn, "block.hw_size", IsWorkerCheckBB);
3791     OMPInfoCache.setCallingConvention(BlockHwSizeFn, BlockHwSize);
3792     BlockHwSize->setDebugLoc(DLoc);
3793     CallInst *WarpSize =
3794         CallInst::Create(WarpSizeFn, "warp.size", IsWorkerCheckBB);
3795     OMPInfoCache.setCallingConvention(WarpSizeFn, WarpSize);
3796     WarpSize->setDebugLoc(DLoc);
3797     Instruction *BlockSize = BinaryOperator::CreateSub(
3798         BlockHwSize, WarpSize, "block.size", IsWorkerCheckBB);
3799     BlockSize->setDebugLoc(DLoc);
3800     Instruction *IsMainOrWorker = ICmpInst::Create(
3801         ICmpInst::ICmp, llvm::CmpInst::ICMP_SLT, KernelInitCB, BlockSize,
3802         "thread.is_main_or_worker", IsWorkerCheckBB);
3803     IsMainOrWorker->setDebugLoc(DLoc);
3804     BranchInst::Create(StateMachineBeginBB, StateMachineFinishedBB,
3805                        IsMainOrWorker, IsWorkerCheckBB);
3806 
3807     // Create local storage for the work function pointer.
3808     const DataLayout &DL = M.getDataLayout();
3809     Type *VoidPtrTy = Type::getInt8PtrTy(Ctx);
3810     Instruction *WorkFnAI =
3811         new AllocaInst(VoidPtrTy, DL.getAllocaAddrSpace(), nullptr,
3812                        "worker.work_fn.addr", &Kernel->getEntryBlock().front());
3813     WorkFnAI->setDebugLoc(DLoc);
3814 
3815     OMPInfoCache.OMPBuilder.updateToLocation(
3816         OpenMPIRBuilder::LocationDescription(
3817             IRBuilder<>::InsertPoint(StateMachineBeginBB,
3818                                      StateMachineBeginBB->end()),
3819             DLoc));
3820 
3821     Value *Ident = KernelInitCB->getArgOperand(0);
3822     Value *GTid = KernelInitCB;
3823 
3824     FunctionCallee BarrierFn =
3825         OMPInfoCache.OMPBuilder.getOrCreateRuntimeFunction(
3826             M, OMPRTL___kmpc_barrier_simple_generic);
3827     CallInst *Barrier =
3828         CallInst::Create(BarrierFn, {Ident, GTid}, "", StateMachineBeginBB);
3829     OMPInfoCache.setCallingConvention(BarrierFn, Barrier);
3830     Barrier->setDebugLoc(DLoc);
3831 
3832     if (WorkFnAI->getType()->getPointerAddressSpace() !=
3833         (unsigned int)AddressSpace::Generic) {
3834       WorkFnAI = new AddrSpaceCastInst(
3835           WorkFnAI,
3836           PointerType::getWithSamePointeeType(
3837               cast<PointerType>(WorkFnAI->getType()),
3838               (unsigned int)AddressSpace::Generic),
3839           WorkFnAI->getName() + ".generic", StateMachineBeginBB);
3840       WorkFnAI->setDebugLoc(DLoc);
3841     }
3842 
3843     FunctionCallee KernelParallelFn =
3844         OMPInfoCache.OMPBuilder.getOrCreateRuntimeFunction(
3845             M, OMPRTL___kmpc_kernel_parallel);
3846     CallInst *IsActiveWorker = CallInst::Create(
3847         KernelParallelFn, {WorkFnAI}, "worker.is_active", StateMachineBeginBB);
3848     OMPInfoCache.setCallingConvention(KernelParallelFn, IsActiveWorker);
3849     IsActiveWorker->setDebugLoc(DLoc);
3850     Instruction *WorkFn = new LoadInst(VoidPtrTy, WorkFnAI, "worker.work_fn",
3851                                        StateMachineBeginBB);
3852     WorkFn->setDebugLoc(DLoc);
3853 
3854     FunctionType *ParallelRegionFnTy = FunctionType::get(
3855         Type::getVoidTy(Ctx), {Type::getInt16Ty(Ctx), Type::getInt32Ty(Ctx)},
3856         false);
3857     Value *WorkFnCast = BitCastInst::CreatePointerBitCastOrAddrSpaceCast(
3858         WorkFn, ParallelRegionFnTy->getPointerTo(), "worker.work_fn.addr_cast",
3859         StateMachineBeginBB);
3860 
3861     Instruction *IsDone =
3862         ICmpInst::Create(ICmpInst::ICmp, llvm::CmpInst::ICMP_EQ, WorkFn,
3863                          Constant::getNullValue(VoidPtrTy), "worker.is_done",
3864                          StateMachineBeginBB);
3865     IsDone->setDebugLoc(DLoc);
3866     BranchInst::Create(StateMachineFinishedBB, StateMachineIsActiveCheckBB,
3867                        IsDone, StateMachineBeginBB)
3868         ->setDebugLoc(DLoc);
3869 
3870     BranchInst::Create(StateMachineIfCascadeCurrentBB,
3871                        StateMachineDoneBarrierBB, IsActiveWorker,
3872                        StateMachineIsActiveCheckBB)
3873         ->setDebugLoc(DLoc);
3874 
3875     Value *ZeroArg =
3876         Constant::getNullValue(ParallelRegionFnTy->getParamType(0));
3877 
3878     // Now that we have most of the CFG skeleton it is time for the if-cascade
3879     // that checks the function pointer we got from the runtime against the
3880     // parallel regions we expect, if there are any.
3881     for (int I = 0, E = ReachedKnownParallelRegions.size(); I < E; ++I) {
3882       auto *ParallelRegion = ReachedKnownParallelRegions[I];
3883       BasicBlock *PRExecuteBB = BasicBlock::Create(
3884           Ctx, "worker_state_machine.parallel_region.execute", Kernel,
3885           StateMachineEndParallelBB);
3886       CallInst::Create(ParallelRegion, {ZeroArg, GTid}, "", PRExecuteBB)
3887           ->setDebugLoc(DLoc);
3888       BranchInst::Create(StateMachineEndParallelBB, PRExecuteBB)
3889           ->setDebugLoc(DLoc);
3890 
3891       BasicBlock *PRNextBB =
3892           BasicBlock::Create(Ctx, "worker_state_machine.parallel_region.check",
3893                              Kernel, StateMachineEndParallelBB);
3894 
3895       // Check if we need to compare the pointer at all or if we can just
3896       // call the parallel region function.
3897       Value *IsPR;
3898       if (I + 1 < E || !ReachedUnknownParallelRegions.empty()) {
3899         Instruction *CmpI = ICmpInst::Create(
3900             ICmpInst::ICmp, llvm::CmpInst::ICMP_EQ, WorkFnCast, ParallelRegion,
3901             "worker.check_parallel_region", StateMachineIfCascadeCurrentBB);
3902         CmpI->setDebugLoc(DLoc);
3903         IsPR = CmpI;
3904       } else {
3905         IsPR = ConstantInt::getTrue(Ctx);
3906       }
3907 
3908       BranchInst::Create(PRExecuteBB, PRNextBB, IsPR,
3909                          StateMachineIfCascadeCurrentBB)
3910           ->setDebugLoc(DLoc);
3911       StateMachineIfCascadeCurrentBB = PRNextBB;
3912     }
3913 
3914     // At the end of the if-cascade we place the indirect function pointer call
3915     // in case we might need it, that is if there can be parallel regions we
3916     // have not handled in the if-cascade above.
3917     if (!ReachedUnknownParallelRegions.empty()) {
3918       StateMachineIfCascadeCurrentBB->setName(
3919           "worker_state_machine.parallel_region.fallback.execute");
3920       CallInst::Create(ParallelRegionFnTy, WorkFnCast, {ZeroArg, GTid}, "",
3921                        StateMachineIfCascadeCurrentBB)
3922           ->setDebugLoc(DLoc);
3923     }
3924     BranchInst::Create(StateMachineEndParallelBB,
3925                        StateMachineIfCascadeCurrentBB)
3926         ->setDebugLoc(DLoc);
3927 
3928     FunctionCallee EndParallelFn =
3929         OMPInfoCache.OMPBuilder.getOrCreateRuntimeFunction(
3930             M, OMPRTL___kmpc_kernel_end_parallel);
3931     CallInst *EndParallel =
3932         CallInst::Create(EndParallelFn, {}, "", StateMachineEndParallelBB);
3933     OMPInfoCache.setCallingConvention(EndParallelFn, EndParallel);
3934     EndParallel->setDebugLoc(DLoc);
3935     BranchInst::Create(StateMachineDoneBarrierBB, StateMachineEndParallelBB)
3936         ->setDebugLoc(DLoc);
3937 
3938     CallInst::Create(BarrierFn, {Ident, GTid}, "", StateMachineDoneBarrierBB)
3939         ->setDebugLoc(DLoc);
3940     BranchInst::Create(StateMachineBeginBB, StateMachineDoneBarrierBB)
3941         ->setDebugLoc(DLoc);
3942 
3943     return ChangeStatus::CHANGED;
3944   }
3945 
3946   /// Fixpoint iteration update function. Will be called every time a dependence
3947   /// changed its state (and in the beginning).
3948   ChangeStatus updateImpl(Attributor &A) override {
3949     KernelInfoState StateBefore = getState();
3950 
3951     // Callback to check a read/write instruction.
3952     auto CheckRWInst = [&](Instruction &I) {
3953       // We handle calls later.
3954       if (isa<CallBase>(I))
3955         return true;
3956       // We only care about write effects.
3957       if (!I.mayWriteToMemory())
3958         return true;
3959       if (auto *SI = dyn_cast<StoreInst>(&I)) {
3960         SmallVector<const Value *> Objects;
3961         getUnderlyingObjects(SI->getPointerOperand(), Objects);
3962         if (llvm::all_of(Objects,
3963                          [](const Value *Obj) { return isa<AllocaInst>(Obj); }))
3964           return true;
3965         // Check for AAHeapToStack moved objects which must not be guarded.
3966         auto &HS = A.getAAFor<AAHeapToStack>(
3967             *this, IRPosition::function(*I.getFunction()),
3968             DepClassTy::OPTIONAL);
3969         if (llvm::all_of(Objects, [&HS](const Value *Obj) {
3970               auto *CB = dyn_cast<CallBase>(Obj);
3971               if (!CB)
3972                 return false;
3973               return HS.isAssumedHeapToStack(*CB);
3974             })) {
3975           return true;
3976         }
3977       }
3978 
3979       // Insert instruction that needs guarding.
3980       SPMDCompatibilityTracker.insert(&I);
3981       return true;
3982     };
3983 
3984     bool UsedAssumedInformationInCheckRWInst = false;
3985     if (!SPMDCompatibilityTracker.isAtFixpoint())
3986       if (!A.checkForAllReadWriteInstructions(
3987               CheckRWInst, *this, UsedAssumedInformationInCheckRWInst))
3988         SPMDCompatibilityTracker.indicatePessimisticFixpoint();
3989 
3990     bool UsedAssumedInformationFromReachingKernels = false;
3991     if (!IsKernelEntry) {
3992       updateParallelLevels(A);
3993 
3994       bool AllReachingKernelsKnown = true;
3995       updateReachingKernelEntries(A, AllReachingKernelsKnown);
3996       UsedAssumedInformationFromReachingKernels = !AllReachingKernelsKnown;
3997 
3998       if (!ParallelLevels.isValidState())
3999         SPMDCompatibilityTracker.indicatePessimisticFixpoint();
4000       else if (!ReachingKernelEntries.isValidState())
4001         SPMDCompatibilityTracker.indicatePessimisticFixpoint();
4002       else if (!SPMDCompatibilityTracker.empty()) {
4003         // Check if all reaching kernels agree on the mode as we can otherwise
4004         // not guard instructions. We might not be sure about the mode so we
4005         // we cannot fix the internal spmd-zation state either.
4006         int SPMD = 0, Generic = 0;
4007         for (auto *Kernel : ReachingKernelEntries) {
4008           auto &CBAA = A.getAAFor<AAKernelInfo>(
4009               *this, IRPosition::function(*Kernel), DepClassTy::OPTIONAL);
4010           if (CBAA.SPMDCompatibilityTracker.isValidState() &&
4011               CBAA.SPMDCompatibilityTracker.isAssumed())
4012             ++SPMD;
4013           else
4014             ++Generic;
4015           if (!CBAA.SPMDCompatibilityTracker.isAtFixpoint())
4016             UsedAssumedInformationFromReachingKernels = true;
4017         }
4018         if (SPMD != 0 && Generic != 0)
4019           SPMDCompatibilityTracker.indicatePessimisticFixpoint();
4020       }
4021     }
4022 
4023     // Callback to check a call instruction.
4024     bool AllParallelRegionStatesWereFixed = true;
4025     bool AllSPMDStatesWereFixed = true;
4026     auto CheckCallInst = [&](Instruction &I) {
4027       auto &CB = cast<CallBase>(I);
4028       auto &CBAA = A.getAAFor<AAKernelInfo>(
4029           *this, IRPosition::callsite_function(CB), DepClassTy::OPTIONAL);
4030       getState() ^= CBAA.getState();
4031       AllSPMDStatesWereFixed &= CBAA.SPMDCompatibilityTracker.isAtFixpoint();
4032       AllParallelRegionStatesWereFixed &=
4033           CBAA.ReachedKnownParallelRegions.isAtFixpoint();
4034       AllParallelRegionStatesWereFixed &=
4035           CBAA.ReachedUnknownParallelRegions.isAtFixpoint();
4036       return true;
4037     };
4038 
4039     bool UsedAssumedInformationInCheckCallInst = false;
4040     if (!A.checkForAllCallLikeInstructions(
4041             CheckCallInst, *this, UsedAssumedInformationInCheckCallInst)) {
4042       LLVM_DEBUG(dbgs() << TAG
4043                         << "Failed to visit all call-like instructions!\n";);
4044       return indicatePessimisticFixpoint();
4045     }
4046 
4047     // If we haven't used any assumed information for the reached parallel
4048     // region states we can fix it.
4049     if (!UsedAssumedInformationInCheckCallInst &&
4050         AllParallelRegionStatesWereFixed) {
4051       ReachedKnownParallelRegions.indicateOptimisticFixpoint();
4052       ReachedUnknownParallelRegions.indicateOptimisticFixpoint();
4053     }
4054 
4055     // If we are sure there are no parallel regions in the kernel we do not
4056     // want SPMD mode.
4057     if (IsKernelEntry && ReachedUnknownParallelRegions.isAtFixpoint() &&
4058         ReachedKnownParallelRegions.isAtFixpoint() &&
4059         ReachedUnknownParallelRegions.isValidState() &&
4060         ReachedKnownParallelRegions.isValidState() &&
4061         !mayContainParallelRegion())
4062       SPMDCompatibilityTracker.indicatePessimisticFixpoint();
4063 
4064     // If we haven't used any assumed information for the SPMD state we can fix
4065     // it.
4066     if (!UsedAssumedInformationInCheckRWInst &&
4067         !UsedAssumedInformationInCheckCallInst &&
4068         !UsedAssumedInformationFromReachingKernels && AllSPMDStatesWereFixed)
4069       SPMDCompatibilityTracker.indicateOptimisticFixpoint();
4070 
4071     return StateBefore == getState() ? ChangeStatus::UNCHANGED
4072                                      : ChangeStatus::CHANGED;
4073   }
4074 
4075 private:
4076   /// Update info regarding reaching kernels.
4077   void updateReachingKernelEntries(Attributor &A,
4078                                    bool &AllReachingKernelsKnown) {
4079     auto PredCallSite = [&](AbstractCallSite ACS) {
4080       Function *Caller = ACS.getInstruction()->getFunction();
4081 
4082       assert(Caller && "Caller is nullptr");
4083 
4084       auto &CAA = A.getOrCreateAAFor<AAKernelInfo>(
4085           IRPosition::function(*Caller), this, DepClassTy::REQUIRED);
4086       if (CAA.ReachingKernelEntries.isValidState()) {
4087         ReachingKernelEntries ^= CAA.ReachingKernelEntries;
4088         return true;
4089       }
4090 
4091       // We lost track of the caller of the associated function, any kernel
4092       // could reach now.
4093       ReachingKernelEntries.indicatePessimisticFixpoint();
4094 
4095       return true;
4096     };
4097 
4098     if (!A.checkForAllCallSites(PredCallSite, *this,
4099                                 true /* RequireAllCallSites */,
4100                                 AllReachingKernelsKnown))
4101       ReachingKernelEntries.indicatePessimisticFixpoint();
4102   }
4103 
4104   /// Update info regarding parallel levels.
4105   void updateParallelLevels(Attributor &A) {
4106     auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache());
4107     OMPInformationCache::RuntimeFunctionInfo &Parallel51RFI =
4108         OMPInfoCache.RFIs[OMPRTL___kmpc_parallel_51];
4109 
4110     auto PredCallSite = [&](AbstractCallSite ACS) {
4111       Function *Caller = ACS.getInstruction()->getFunction();
4112 
4113       assert(Caller && "Caller is nullptr");
4114 
4115       auto &CAA =
4116           A.getOrCreateAAFor<AAKernelInfo>(IRPosition::function(*Caller));
4117       if (CAA.ParallelLevels.isValidState()) {
4118         // Any function that is called by `__kmpc_parallel_51` will not be
4119         // folded as the parallel level in the function is updated. In order to
4120         // get it right, all the analysis would depend on the implentation. That
4121         // said, if in the future any change to the implementation, the analysis
4122         // could be wrong. As a consequence, we are just conservative here.
4123         if (Caller == Parallel51RFI.Declaration) {
4124           ParallelLevels.indicatePessimisticFixpoint();
4125           return true;
4126         }
4127 
4128         ParallelLevels ^= CAA.ParallelLevels;
4129 
4130         return true;
4131       }
4132 
4133       // We lost track of the caller of the associated function, any kernel
4134       // could reach now.
4135       ParallelLevels.indicatePessimisticFixpoint();
4136 
4137       return true;
4138     };
4139 
4140     bool AllCallSitesKnown = true;
4141     if (!A.checkForAllCallSites(PredCallSite, *this,
4142                                 true /* RequireAllCallSites */,
4143                                 AllCallSitesKnown))
4144       ParallelLevels.indicatePessimisticFixpoint();
4145   }
4146 };
4147 
4148 /// The call site kernel info abstract attribute, basically, what can we say
4149 /// about a call site with regards to the KernelInfoState. For now this simply
4150 /// forwards the information from the callee.
4151 struct AAKernelInfoCallSite : AAKernelInfo {
4152   AAKernelInfoCallSite(const IRPosition &IRP, Attributor &A)
4153       : AAKernelInfo(IRP, A) {}
4154 
4155   /// See AbstractAttribute::initialize(...).
4156   void initialize(Attributor &A) override {
4157     AAKernelInfo::initialize(A);
4158 
4159     CallBase &CB = cast<CallBase>(getAssociatedValue());
4160     Function *Callee = getAssociatedFunction();
4161 
4162     auto &AssumptionAA = A.getAAFor<AAAssumptionInfo>(
4163         *this, IRPosition::callsite_function(CB), DepClassTy::OPTIONAL);
4164 
4165     // Check for SPMD-mode assumptions.
4166     if (AssumptionAA.hasAssumption("ompx_spmd_amenable")) {
4167       SPMDCompatibilityTracker.indicateOptimisticFixpoint();
4168       indicateOptimisticFixpoint();
4169     }
4170 
4171     // First weed out calls we do not care about, that is readonly/readnone
4172     // calls, intrinsics, and "no_openmp" calls. Neither of these can reach a
4173     // parallel region or anything else we are looking for.
4174     if (!CB.mayWriteToMemory() || isa<IntrinsicInst>(CB)) {
4175       indicateOptimisticFixpoint();
4176       return;
4177     }
4178 
4179     // Next we check if we know the callee. If it is a known OpenMP function
4180     // we will handle them explicitly in the switch below. If it is not, we
4181     // will use an AAKernelInfo object on the callee to gather information and
4182     // merge that into the current state. The latter happens in the updateImpl.
4183     auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache());
4184     const auto &It = OMPInfoCache.RuntimeFunctionIDMap.find(Callee);
4185     if (It == OMPInfoCache.RuntimeFunctionIDMap.end()) {
4186       // Unknown caller or declarations are not analyzable, we give up.
4187       if (!Callee || !A.isFunctionIPOAmendable(*Callee)) {
4188 
4189         // Unknown callees might contain parallel regions, except if they have
4190         // an appropriate assumption attached.
4191         if (!(AssumptionAA.hasAssumption("omp_no_openmp") ||
4192               AssumptionAA.hasAssumption("omp_no_parallelism")))
4193           ReachedUnknownParallelRegions.insert(&CB);
4194 
4195         // If SPMDCompatibilityTracker is not fixed, we need to give up on the
4196         // idea we can run something unknown in SPMD-mode.
4197         if (!SPMDCompatibilityTracker.isAtFixpoint()) {
4198           SPMDCompatibilityTracker.indicatePessimisticFixpoint();
4199           SPMDCompatibilityTracker.insert(&CB);
4200         }
4201 
4202         // We have updated the state for this unknown call properly, there won't
4203         // be any change so we indicate a fixpoint.
4204         indicateOptimisticFixpoint();
4205       }
4206       // If the callee is known and can be used in IPO, we will update the state
4207       // based on the callee state in updateImpl.
4208       return;
4209     }
4210 
4211     const unsigned int WrapperFunctionArgNo = 6;
4212     RuntimeFunction RF = It->getSecond();
4213     switch (RF) {
4214     // All the functions we know are compatible with SPMD mode.
4215     case OMPRTL___kmpc_is_spmd_exec_mode:
4216     case OMPRTL___kmpc_distribute_static_fini:
4217     case OMPRTL___kmpc_for_static_fini:
4218     case OMPRTL___kmpc_global_thread_num:
4219     case OMPRTL___kmpc_get_hardware_num_threads_in_block:
4220     case OMPRTL___kmpc_get_hardware_num_blocks:
4221     case OMPRTL___kmpc_single:
4222     case OMPRTL___kmpc_end_single:
4223     case OMPRTL___kmpc_master:
4224     case OMPRTL___kmpc_end_master:
4225     case OMPRTL___kmpc_barrier:
4226     case OMPRTL___kmpc_nvptx_parallel_reduce_nowait_v2:
4227     case OMPRTL___kmpc_nvptx_teams_reduce_nowait_v2:
4228     case OMPRTL___kmpc_nvptx_end_reduce_nowait:
4229       break;
4230     case OMPRTL___kmpc_distribute_static_init_4:
4231     case OMPRTL___kmpc_distribute_static_init_4u:
4232     case OMPRTL___kmpc_distribute_static_init_8:
4233     case OMPRTL___kmpc_distribute_static_init_8u:
4234     case OMPRTL___kmpc_for_static_init_4:
4235     case OMPRTL___kmpc_for_static_init_4u:
4236     case OMPRTL___kmpc_for_static_init_8:
4237     case OMPRTL___kmpc_for_static_init_8u: {
4238       // Check the schedule and allow static schedule in SPMD mode.
4239       unsigned ScheduleArgOpNo = 2;
4240       auto *ScheduleTypeCI =
4241           dyn_cast<ConstantInt>(CB.getArgOperand(ScheduleArgOpNo));
4242       unsigned ScheduleTypeVal =
4243           ScheduleTypeCI ? ScheduleTypeCI->getZExtValue() : 0;
4244       switch (OMPScheduleType(ScheduleTypeVal)) {
4245       case OMPScheduleType::Static:
4246       case OMPScheduleType::StaticChunked:
4247       case OMPScheduleType::Distribute:
4248       case OMPScheduleType::DistributeChunked:
4249         break;
4250       default:
4251         SPMDCompatibilityTracker.indicatePessimisticFixpoint();
4252         SPMDCompatibilityTracker.insert(&CB);
4253         break;
4254       };
4255     } break;
4256     case OMPRTL___kmpc_target_init:
4257       KernelInitCB = &CB;
4258       break;
4259     case OMPRTL___kmpc_target_deinit:
4260       KernelDeinitCB = &CB;
4261       break;
4262     case OMPRTL___kmpc_parallel_51:
4263       if (auto *ParallelRegion = dyn_cast<Function>(
4264               CB.getArgOperand(WrapperFunctionArgNo)->stripPointerCasts())) {
4265         ReachedKnownParallelRegions.insert(ParallelRegion);
4266         break;
4267       }
4268       // The condition above should usually get the parallel region function
4269       // pointer and record it. In the off chance it doesn't we assume the
4270       // worst.
4271       ReachedUnknownParallelRegions.insert(&CB);
4272       break;
4273     case OMPRTL___kmpc_omp_task:
4274       // We do not look into tasks right now, just give up.
4275       SPMDCompatibilityTracker.indicatePessimisticFixpoint();
4276       SPMDCompatibilityTracker.insert(&CB);
4277       ReachedUnknownParallelRegions.insert(&CB);
4278       break;
4279     case OMPRTL___kmpc_alloc_shared:
4280     case OMPRTL___kmpc_free_shared:
4281       // Return without setting a fixpoint, to be resolved in updateImpl.
4282       return;
4283     default:
4284       // Unknown OpenMP runtime calls cannot be executed in SPMD-mode,
4285       // generally. However, they do not hide parallel regions.
4286       SPMDCompatibilityTracker.indicatePessimisticFixpoint();
4287       SPMDCompatibilityTracker.insert(&CB);
4288       break;
4289     }
4290     // All other OpenMP runtime calls will not reach parallel regions so they
4291     // can be safely ignored for now. Since it is a known OpenMP runtime call we
4292     // have now modeled all effects and there is no need for any update.
4293     indicateOptimisticFixpoint();
4294   }
4295 
4296   ChangeStatus updateImpl(Attributor &A) override {
4297     // TODO: Once we have call site specific value information we can provide
4298     //       call site specific liveness information and then it makes
4299     //       sense to specialize attributes for call sites arguments instead of
4300     //       redirecting requests to the callee argument.
4301     Function *F = getAssociatedFunction();
4302 
4303     auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache());
4304     const auto &It = OMPInfoCache.RuntimeFunctionIDMap.find(F);
4305 
4306     // If F is not a runtime function, propagate the AAKernelInfo of the callee.
4307     if (It == OMPInfoCache.RuntimeFunctionIDMap.end()) {
4308       const IRPosition &FnPos = IRPosition::function(*F);
4309       auto &FnAA = A.getAAFor<AAKernelInfo>(*this, FnPos, DepClassTy::REQUIRED);
4310       if (getState() == FnAA.getState())
4311         return ChangeStatus::UNCHANGED;
4312       getState() = FnAA.getState();
4313       return ChangeStatus::CHANGED;
4314     }
4315 
4316     // F is a runtime function that allocates or frees memory, check
4317     // AAHeapToStack and AAHeapToShared.
4318     KernelInfoState StateBefore = getState();
4319     assert((It->getSecond() == OMPRTL___kmpc_alloc_shared ||
4320             It->getSecond() == OMPRTL___kmpc_free_shared) &&
4321            "Expected a __kmpc_alloc_shared or __kmpc_free_shared runtime call");
4322 
4323     CallBase &CB = cast<CallBase>(getAssociatedValue());
4324 
4325     auto &HeapToStackAA = A.getAAFor<AAHeapToStack>(
4326         *this, IRPosition::function(*CB.getCaller()), DepClassTy::OPTIONAL);
4327     auto &HeapToSharedAA = A.getAAFor<AAHeapToShared>(
4328         *this, IRPosition::function(*CB.getCaller()), DepClassTy::OPTIONAL);
4329 
4330     RuntimeFunction RF = It->getSecond();
4331 
4332     switch (RF) {
4333     // If neither HeapToStack nor HeapToShared assume the call is removed,
4334     // assume SPMD incompatibility.
4335     case OMPRTL___kmpc_alloc_shared:
4336       if (!HeapToStackAA.isAssumedHeapToStack(CB) &&
4337           !HeapToSharedAA.isAssumedHeapToShared(CB))
4338         SPMDCompatibilityTracker.insert(&CB);
4339       break;
4340     case OMPRTL___kmpc_free_shared:
4341       if (!HeapToStackAA.isAssumedHeapToStackRemovedFree(CB) &&
4342           !HeapToSharedAA.isAssumedHeapToSharedRemovedFree(CB))
4343         SPMDCompatibilityTracker.insert(&CB);
4344       break;
4345     default:
4346       SPMDCompatibilityTracker.indicatePessimisticFixpoint();
4347       SPMDCompatibilityTracker.insert(&CB);
4348     }
4349 
4350     return StateBefore == getState() ? ChangeStatus::UNCHANGED
4351                                      : ChangeStatus::CHANGED;
4352   }
4353 };
4354 
4355 struct AAFoldRuntimeCall
4356     : public StateWrapper<BooleanState, AbstractAttribute> {
4357   using Base = StateWrapper<BooleanState, AbstractAttribute>;
4358 
4359   AAFoldRuntimeCall(const IRPosition &IRP, Attributor &A) : Base(IRP) {}
4360 
4361   /// Statistics are tracked as part of manifest for now.
4362   void trackStatistics() const override {}
4363 
4364   /// Create an abstract attribute biew for the position \p IRP.
4365   static AAFoldRuntimeCall &createForPosition(const IRPosition &IRP,
4366                                               Attributor &A);
4367 
4368   /// See AbstractAttribute::getName()
4369   const std::string getName() const override { return "AAFoldRuntimeCall"; }
4370 
4371   /// See AbstractAttribute::getIdAddr()
4372   const char *getIdAddr() const override { return &ID; }
4373 
4374   /// This function should return true if the type of the \p AA is
4375   /// AAFoldRuntimeCall
4376   static bool classof(const AbstractAttribute *AA) {
4377     return (AA->getIdAddr() == &ID);
4378   }
4379 
4380   static const char ID;
4381 };
4382 
4383 struct AAFoldRuntimeCallCallSiteReturned : AAFoldRuntimeCall {
4384   AAFoldRuntimeCallCallSiteReturned(const IRPosition &IRP, Attributor &A)
4385       : AAFoldRuntimeCall(IRP, A) {}
4386 
4387   /// See AbstractAttribute::getAsStr()
4388   const std::string getAsStr() const override {
4389     if (!isValidState())
4390       return "<invalid>";
4391 
4392     std::string Str("simplified value: ");
4393 
4394     if (!SimplifiedValue.hasValue())
4395       return Str + std::string("none");
4396 
4397     if (!SimplifiedValue.getValue())
4398       return Str + std::string("nullptr");
4399 
4400     if (ConstantInt *CI = dyn_cast<ConstantInt>(SimplifiedValue.getValue()))
4401       return Str + std::to_string(CI->getSExtValue());
4402 
4403     return Str + std::string("unknown");
4404   }
4405 
4406   void initialize(Attributor &A) override {
4407     if (DisableOpenMPOptFolding)
4408       indicatePessimisticFixpoint();
4409 
4410     Function *Callee = getAssociatedFunction();
4411 
4412     auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache());
4413     const auto &It = OMPInfoCache.RuntimeFunctionIDMap.find(Callee);
4414     assert(It != OMPInfoCache.RuntimeFunctionIDMap.end() &&
4415            "Expected a known OpenMP runtime function");
4416 
4417     RFKind = It->getSecond();
4418 
4419     CallBase &CB = cast<CallBase>(getAssociatedValue());
4420     A.registerSimplificationCallback(
4421         IRPosition::callsite_returned(CB),
4422         [&](const IRPosition &IRP, const AbstractAttribute *AA,
4423             bool &UsedAssumedInformation) -> Optional<Value *> {
4424           assert((isValidState() || (SimplifiedValue.hasValue() &&
4425                                      SimplifiedValue.getValue() == nullptr)) &&
4426                  "Unexpected invalid state!");
4427 
4428           if (!isAtFixpoint()) {
4429             UsedAssumedInformation = true;
4430             if (AA)
4431               A.recordDependence(*this, *AA, DepClassTy::OPTIONAL);
4432           }
4433           return SimplifiedValue;
4434         });
4435   }
4436 
4437   ChangeStatus updateImpl(Attributor &A) override {
4438     ChangeStatus Changed = ChangeStatus::UNCHANGED;
4439     switch (RFKind) {
4440     case OMPRTL___kmpc_is_spmd_exec_mode:
4441       Changed |= foldIsSPMDExecMode(A);
4442       break;
4443     case OMPRTL___kmpc_is_generic_main_thread_id:
4444       Changed |= foldIsGenericMainThread(A);
4445       break;
4446     case OMPRTL___kmpc_parallel_level:
4447       Changed |= foldParallelLevel(A);
4448       break;
4449     case OMPRTL___kmpc_get_hardware_num_threads_in_block:
4450       Changed = Changed | foldKernelFnAttribute(A, "omp_target_thread_limit");
4451       break;
4452     case OMPRTL___kmpc_get_hardware_num_blocks:
4453       Changed = Changed | foldKernelFnAttribute(A, "omp_target_num_teams");
4454       break;
4455     default:
4456       llvm_unreachable("Unhandled OpenMP runtime function!");
4457     }
4458 
4459     return Changed;
4460   }
4461 
4462   ChangeStatus manifest(Attributor &A) override {
4463     ChangeStatus Changed = ChangeStatus::UNCHANGED;
4464 
4465     if (SimplifiedValue.hasValue() && SimplifiedValue.getValue()) {
4466       Instruction &I = *getCtxI();
4467       A.changeValueAfterManifest(I, **SimplifiedValue);
4468       A.deleteAfterManifest(I);
4469 
4470       CallBase *CB = dyn_cast<CallBase>(&I);
4471       auto Remark = [&](OptimizationRemark OR) {
4472         if (auto *C = dyn_cast<ConstantInt>(*SimplifiedValue))
4473           return OR << "Replacing OpenMP runtime call "
4474                     << CB->getCalledFunction()->getName() << " with "
4475                     << ore::NV("FoldedValue", C->getZExtValue()) << ".";
4476         return OR << "Replacing OpenMP runtime call "
4477                   << CB->getCalledFunction()->getName() << ".";
4478       };
4479 
4480       if (CB && EnableVerboseRemarks)
4481         A.emitRemark<OptimizationRemark>(CB, "OMP180", Remark);
4482 
4483       LLVM_DEBUG(dbgs() << TAG << "Replacing runtime call: " << I << " with "
4484                         << **SimplifiedValue << "\n");
4485 
4486       Changed = ChangeStatus::CHANGED;
4487     }
4488 
4489     return Changed;
4490   }
4491 
4492   ChangeStatus indicatePessimisticFixpoint() override {
4493     SimplifiedValue = nullptr;
4494     return AAFoldRuntimeCall::indicatePessimisticFixpoint();
4495   }
4496 
4497 private:
4498   /// Fold __kmpc_is_spmd_exec_mode into a constant if possible.
4499   ChangeStatus foldIsSPMDExecMode(Attributor &A) {
4500     Optional<Value *> SimplifiedValueBefore = SimplifiedValue;
4501 
4502     unsigned AssumedSPMDCount = 0, KnownSPMDCount = 0;
4503     unsigned AssumedNonSPMDCount = 0, KnownNonSPMDCount = 0;
4504     auto &CallerKernelInfoAA = A.getAAFor<AAKernelInfo>(
4505         *this, IRPosition::function(*getAnchorScope()), DepClassTy::REQUIRED);
4506 
4507     if (!CallerKernelInfoAA.ReachingKernelEntries.isValidState())
4508       return indicatePessimisticFixpoint();
4509 
4510     for (Kernel K : CallerKernelInfoAA.ReachingKernelEntries) {
4511       auto &AA = A.getAAFor<AAKernelInfo>(*this, IRPosition::function(*K),
4512                                           DepClassTy::REQUIRED);
4513 
4514       if (!AA.isValidState()) {
4515         SimplifiedValue = nullptr;
4516         return indicatePessimisticFixpoint();
4517       }
4518 
4519       if (AA.SPMDCompatibilityTracker.isAssumed()) {
4520         if (AA.SPMDCompatibilityTracker.isAtFixpoint())
4521           ++KnownSPMDCount;
4522         else
4523           ++AssumedSPMDCount;
4524       } else {
4525         if (AA.SPMDCompatibilityTracker.isAtFixpoint())
4526           ++KnownNonSPMDCount;
4527         else
4528           ++AssumedNonSPMDCount;
4529       }
4530     }
4531 
4532     if ((AssumedSPMDCount + KnownSPMDCount) &&
4533         (AssumedNonSPMDCount + KnownNonSPMDCount))
4534       return indicatePessimisticFixpoint();
4535 
4536     auto &Ctx = getAnchorValue().getContext();
4537     if (KnownSPMDCount || AssumedSPMDCount) {
4538       assert(KnownNonSPMDCount == 0 && AssumedNonSPMDCount == 0 &&
4539              "Expected only SPMD kernels!");
4540       // All reaching kernels are in SPMD mode. Update all function calls to
4541       // __kmpc_is_spmd_exec_mode to 1.
4542       SimplifiedValue = ConstantInt::get(Type::getInt8Ty(Ctx), true);
4543     } else if (KnownNonSPMDCount || AssumedNonSPMDCount) {
4544       assert(KnownSPMDCount == 0 && AssumedSPMDCount == 0 &&
4545              "Expected only non-SPMD kernels!");
4546       // All reaching kernels are in non-SPMD mode. Update all function
4547       // calls to __kmpc_is_spmd_exec_mode to 0.
4548       SimplifiedValue = ConstantInt::get(Type::getInt8Ty(Ctx), false);
4549     } else {
4550       // We have empty reaching kernels, therefore we cannot tell if the
4551       // associated call site can be folded. At this moment, SimplifiedValue
4552       // must be none.
4553       assert(!SimplifiedValue.hasValue() && "SimplifiedValue should be none");
4554     }
4555 
4556     return SimplifiedValue == SimplifiedValueBefore ? ChangeStatus::UNCHANGED
4557                                                     : ChangeStatus::CHANGED;
4558   }
4559 
4560   /// Fold __kmpc_is_generic_main_thread_id into a constant if possible.
4561   ChangeStatus foldIsGenericMainThread(Attributor &A) {
4562     Optional<Value *> SimplifiedValueBefore = SimplifiedValue;
4563 
4564     CallBase &CB = cast<CallBase>(getAssociatedValue());
4565     Function *F = CB.getFunction();
4566     const auto &ExecutionDomainAA = A.getAAFor<AAExecutionDomain>(
4567         *this, IRPosition::function(*F), DepClassTy::REQUIRED);
4568 
4569     if (!ExecutionDomainAA.isValidState())
4570       return indicatePessimisticFixpoint();
4571 
4572     auto &Ctx = getAnchorValue().getContext();
4573     if (ExecutionDomainAA.isExecutedByInitialThreadOnly(CB))
4574       SimplifiedValue = ConstantInt::get(Type::getInt8Ty(Ctx), true);
4575     else
4576       return indicatePessimisticFixpoint();
4577 
4578     return SimplifiedValue == SimplifiedValueBefore ? ChangeStatus::UNCHANGED
4579                                                     : ChangeStatus::CHANGED;
4580   }
4581 
4582   /// Fold __kmpc_parallel_level into a constant if possible.
4583   ChangeStatus foldParallelLevel(Attributor &A) {
4584     Optional<Value *> SimplifiedValueBefore = SimplifiedValue;
4585 
4586     auto &CallerKernelInfoAA = A.getAAFor<AAKernelInfo>(
4587         *this, IRPosition::function(*getAnchorScope()), DepClassTy::REQUIRED);
4588 
4589     if (!CallerKernelInfoAA.ParallelLevels.isValidState())
4590       return indicatePessimisticFixpoint();
4591 
4592     if (!CallerKernelInfoAA.ReachingKernelEntries.isValidState())
4593       return indicatePessimisticFixpoint();
4594 
4595     if (CallerKernelInfoAA.ReachingKernelEntries.empty()) {
4596       assert(!SimplifiedValue.hasValue() &&
4597              "SimplifiedValue should keep none at this point");
4598       return ChangeStatus::UNCHANGED;
4599     }
4600 
4601     unsigned AssumedSPMDCount = 0, KnownSPMDCount = 0;
4602     unsigned AssumedNonSPMDCount = 0, KnownNonSPMDCount = 0;
4603     for (Kernel K : CallerKernelInfoAA.ReachingKernelEntries) {
4604       auto &AA = A.getAAFor<AAKernelInfo>(*this, IRPosition::function(*K),
4605                                           DepClassTy::REQUIRED);
4606       if (!AA.SPMDCompatibilityTracker.isValidState())
4607         return indicatePessimisticFixpoint();
4608 
4609       if (AA.SPMDCompatibilityTracker.isAssumed()) {
4610         if (AA.SPMDCompatibilityTracker.isAtFixpoint())
4611           ++KnownSPMDCount;
4612         else
4613           ++AssumedSPMDCount;
4614       } else {
4615         if (AA.SPMDCompatibilityTracker.isAtFixpoint())
4616           ++KnownNonSPMDCount;
4617         else
4618           ++AssumedNonSPMDCount;
4619       }
4620     }
4621 
4622     if ((AssumedSPMDCount + KnownSPMDCount) &&
4623         (AssumedNonSPMDCount + KnownNonSPMDCount))
4624       return indicatePessimisticFixpoint();
4625 
4626     auto &Ctx = getAnchorValue().getContext();
4627     // If the caller can only be reached by SPMD kernel entries, the parallel
4628     // level is 1. Similarly, if the caller can only be reached by non-SPMD
4629     // kernel entries, it is 0.
4630     if (AssumedSPMDCount || KnownSPMDCount) {
4631       assert(KnownNonSPMDCount == 0 && AssumedNonSPMDCount == 0 &&
4632              "Expected only SPMD kernels!");
4633       SimplifiedValue = ConstantInt::get(Type::getInt8Ty(Ctx), 1);
4634     } else {
4635       assert(KnownSPMDCount == 0 && AssumedSPMDCount == 0 &&
4636              "Expected only non-SPMD kernels!");
4637       SimplifiedValue = ConstantInt::get(Type::getInt8Ty(Ctx), 0);
4638     }
4639     return SimplifiedValue == SimplifiedValueBefore ? ChangeStatus::UNCHANGED
4640                                                     : ChangeStatus::CHANGED;
4641   }
4642 
4643   ChangeStatus foldKernelFnAttribute(Attributor &A, llvm::StringRef Attr) {
4644     // Specialize only if all the calls agree with the attribute constant value
4645     int32_t CurrentAttrValue = -1;
4646     Optional<Value *> SimplifiedValueBefore = SimplifiedValue;
4647 
4648     auto &CallerKernelInfoAA = A.getAAFor<AAKernelInfo>(
4649         *this, IRPosition::function(*getAnchorScope()), DepClassTy::REQUIRED);
4650 
4651     if (!CallerKernelInfoAA.ReachingKernelEntries.isValidState())
4652       return indicatePessimisticFixpoint();
4653 
4654     // Iterate over the kernels that reach this function
4655     for (Kernel K : CallerKernelInfoAA.ReachingKernelEntries) {
4656       int32_t NextAttrVal = -1;
4657       if (K->hasFnAttribute(Attr))
4658         NextAttrVal =
4659             std::stoi(K->getFnAttribute(Attr).getValueAsString().str());
4660 
4661       if (NextAttrVal == -1 ||
4662           (CurrentAttrValue != -1 && CurrentAttrValue != NextAttrVal))
4663         return indicatePessimisticFixpoint();
4664       CurrentAttrValue = NextAttrVal;
4665     }
4666 
4667     if (CurrentAttrValue != -1) {
4668       auto &Ctx = getAnchorValue().getContext();
4669       SimplifiedValue =
4670           ConstantInt::get(Type::getInt32Ty(Ctx), CurrentAttrValue);
4671     }
4672     return SimplifiedValue == SimplifiedValueBefore ? ChangeStatus::UNCHANGED
4673                                                     : ChangeStatus::CHANGED;
4674   }
4675 
4676   /// An optional value the associated value is assumed to fold to. That is, we
4677   /// assume the associated value (which is a call) can be replaced by this
4678   /// simplified value.
4679   Optional<Value *> SimplifiedValue;
4680 
4681   /// The runtime function kind of the callee of the associated call site.
4682   RuntimeFunction RFKind;
4683 };
4684 
4685 } // namespace
4686 
4687 /// Register folding callsite
4688 void OpenMPOpt::registerFoldRuntimeCall(RuntimeFunction RF) {
4689   auto &RFI = OMPInfoCache.RFIs[RF];
4690   RFI.foreachUse(SCC, [&](Use &U, Function &F) {
4691     CallInst *CI = OpenMPOpt::getCallIfRegularCall(U, &RFI);
4692     if (!CI)
4693       return false;
4694     A.getOrCreateAAFor<AAFoldRuntimeCall>(
4695         IRPosition::callsite_returned(*CI), /* QueryingAA */ nullptr,
4696         DepClassTy::NONE, /* ForceUpdate */ false,
4697         /* UpdateAfterInit */ false);
4698     return false;
4699   });
4700 }
4701 
4702 void OpenMPOpt::registerAAs(bool IsModulePass) {
4703   if (SCC.empty())
4704 
4705     return;
4706   if (IsModulePass) {
4707     // Ensure we create the AAKernelInfo AAs first and without triggering an
4708     // update. This will make sure we register all value simplification
4709     // callbacks before any other AA has the chance to create an AAValueSimplify
4710     // or similar.
4711     for (Function *Kernel : OMPInfoCache.Kernels)
4712       A.getOrCreateAAFor<AAKernelInfo>(
4713           IRPosition::function(*Kernel), /* QueryingAA */ nullptr,
4714           DepClassTy::NONE, /* ForceUpdate */ false,
4715           /* UpdateAfterInit */ false);
4716 
4717     registerFoldRuntimeCall(OMPRTL___kmpc_is_generic_main_thread_id);
4718     registerFoldRuntimeCall(OMPRTL___kmpc_is_spmd_exec_mode);
4719     registerFoldRuntimeCall(OMPRTL___kmpc_parallel_level);
4720     registerFoldRuntimeCall(OMPRTL___kmpc_get_hardware_num_threads_in_block);
4721     registerFoldRuntimeCall(OMPRTL___kmpc_get_hardware_num_blocks);
4722   }
4723 
4724   // Create CallSite AA for all Getters.
4725   for (int Idx = 0; Idx < OMPInfoCache.ICVs.size() - 1; ++Idx) {
4726     auto ICVInfo = OMPInfoCache.ICVs[static_cast<InternalControlVar>(Idx)];
4727 
4728     auto &GetterRFI = OMPInfoCache.RFIs[ICVInfo.Getter];
4729 
4730     auto CreateAA = [&](Use &U, Function &Caller) {
4731       CallInst *CI = OpenMPOpt::getCallIfRegularCall(U, &GetterRFI);
4732       if (!CI)
4733         return false;
4734 
4735       auto &CB = cast<CallBase>(*CI);
4736 
4737       IRPosition CBPos = IRPosition::callsite_function(CB);
4738       A.getOrCreateAAFor<AAICVTracker>(CBPos);
4739       return false;
4740     };
4741 
4742     GetterRFI.foreachUse(SCC, CreateAA);
4743   }
4744   auto &GlobalizationRFI = OMPInfoCache.RFIs[OMPRTL___kmpc_alloc_shared];
4745   auto CreateAA = [&](Use &U, Function &F) {
4746     A.getOrCreateAAFor<AAHeapToShared>(IRPosition::function(F));
4747     return false;
4748   };
4749   if (!DisableOpenMPOptDeglobalization)
4750     GlobalizationRFI.foreachUse(SCC, CreateAA);
4751 
4752   // Create an ExecutionDomain AA for every function and a HeapToStack AA for
4753   // every function if there is a device kernel.
4754   if (!isOpenMPDevice(M))
4755     return;
4756 
4757   for (auto *F : SCC) {
4758     if (F->isDeclaration())
4759       continue;
4760 
4761     A.getOrCreateAAFor<AAExecutionDomain>(IRPosition::function(*F));
4762     if (!DisableOpenMPOptDeglobalization)
4763       A.getOrCreateAAFor<AAHeapToStack>(IRPosition::function(*F));
4764 
4765     for (auto &I : instructions(*F)) {
4766       if (auto *LI = dyn_cast<LoadInst>(&I)) {
4767         bool UsedAssumedInformation = false;
4768         A.getAssumedSimplified(IRPosition::value(*LI), /* AA */ nullptr,
4769                                UsedAssumedInformation);
4770       } else if (auto *SI = dyn_cast<StoreInst>(&I)) {
4771         A.getOrCreateAAFor<AAIsDead>(IRPosition::value(*SI));
4772       }
4773     }
4774   }
4775 }
4776 
4777 const char AAICVTracker::ID = 0;
4778 const char AAKernelInfo::ID = 0;
4779 const char AAExecutionDomain::ID = 0;
4780 const char AAHeapToShared::ID = 0;
4781 const char AAFoldRuntimeCall::ID = 0;
4782 
4783 AAICVTracker &AAICVTracker::createForPosition(const IRPosition &IRP,
4784                                               Attributor &A) {
4785   AAICVTracker *AA = nullptr;
4786   switch (IRP.getPositionKind()) {
4787   case IRPosition::IRP_INVALID:
4788   case IRPosition::IRP_FLOAT:
4789   case IRPosition::IRP_ARGUMENT:
4790   case IRPosition::IRP_CALL_SITE_ARGUMENT:
4791     llvm_unreachable("ICVTracker can only be created for function position!");
4792   case IRPosition::IRP_RETURNED:
4793     AA = new (A.Allocator) AAICVTrackerFunctionReturned(IRP, A);
4794     break;
4795   case IRPosition::IRP_CALL_SITE_RETURNED:
4796     AA = new (A.Allocator) AAICVTrackerCallSiteReturned(IRP, A);
4797     break;
4798   case IRPosition::IRP_CALL_SITE:
4799     AA = new (A.Allocator) AAICVTrackerCallSite(IRP, A);
4800     break;
4801   case IRPosition::IRP_FUNCTION:
4802     AA = new (A.Allocator) AAICVTrackerFunction(IRP, A);
4803     break;
4804   }
4805 
4806   return *AA;
4807 }
4808 
4809 AAExecutionDomain &AAExecutionDomain::createForPosition(const IRPosition &IRP,
4810                                                         Attributor &A) {
4811   AAExecutionDomainFunction *AA = nullptr;
4812   switch (IRP.getPositionKind()) {
4813   case IRPosition::IRP_INVALID:
4814   case IRPosition::IRP_FLOAT:
4815   case IRPosition::IRP_ARGUMENT:
4816   case IRPosition::IRP_CALL_SITE_ARGUMENT:
4817   case IRPosition::IRP_RETURNED:
4818   case IRPosition::IRP_CALL_SITE_RETURNED:
4819   case IRPosition::IRP_CALL_SITE:
4820     llvm_unreachable(
4821         "AAExecutionDomain can only be created for function position!");
4822   case IRPosition::IRP_FUNCTION:
4823     AA = new (A.Allocator) AAExecutionDomainFunction(IRP, A);
4824     break;
4825   }
4826 
4827   return *AA;
4828 }
4829 
4830 AAHeapToShared &AAHeapToShared::createForPosition(const IRPosition &IRP,
4831                                                   Attributor &A) {
4832   AAHeapToSharedFunction *AA = nullptr;
4833   switch (IRP.getPositionKind()) {
4834   case IRPosition::IRP_INVALID:
4835   case IRPosition::IRP_FLOAT:
4836   case IRPosition::IRP_ARGUMENT:
4837   case IRPosition::IRP_CALL_SITE_ARGUMENT:
4838   case IRPosition::IRP_RETURNED:
4839   case IRPosition::IRP_CALL_SITE_RETURNED:
4840   case IRPosition::IRP_CALL_SITE:
4841     llvm_unreachable(
4842         "AAHeapToShared can only be created for function position!");
4843   case IRPosition::IRP_FUNCTION:
4844     AA = new (A.Allocator) AAHeapToSharedFunction(IRP, A);
4845     break;
4846   }
4847 
4848   return *AA;
4849 }
4850 
4851 AAKernelInfo &AAKernelInfo::createForPosition(const IRPosition &IRP,
4852                                               Attributor &A) {
4853   AAKernelInfo *AA = nullptr;
4854   switch (IRP.getPositionKind()) {
4855   case IRPosition::IRP_INVALID:
4856   case IRPosition::IRP_FLOAT:
4857   case IRPosition::IRP_ARGUMENT:
4858   case IRPosition::IRP_RETURNED:
4859   case IRPosition::IRP_CALL_SITE_RETURNED:
4860   case IRPosition::IRP_CALL_SITE_ARGUMENT:
4861     llvm_unreachable("KernelInfo can only be created for function position!");
4862   case IRPosition::IRP_CALL_SITE:
4863     AA = new (A.Allocator) AAKernelInfoCallSite(IRP, A);
4864     break;
4865   case IRPosition::IRP_FUNCTION:
4866     AA = new (A.Allocator) AAKernelInfoFunction(IRP, A);
4867     break;
4868   }
4869 
4870   return *AA;
4871 }
4872 
4873 AAFoldRuntimeCall &AAFoldRuntimeCall::createForPosition(const IRPosition &IRP,
4874                                                         Attributor &A) {
4875   AAFoldRuntimeCall *AA = nullptr;
4876   switch (IRP.getPositionKind()) {
4877   case IRPosition::IRP_INVALID:
4878   case IRPosition::IRP_FLOAT:
4879   case IRPosition::IRP_ARGUMENT:
4880   case IRPosition::IRP_RETURNED:
4881   case IRPosition::IRP_FUNCTION:
4882   case IRPosition::IRP_CALL_SITE:
4883   case IRPosition::IRP_CALL_SITE_ARGUMENT:
4884     llvm_unreachable("KernelInfo can only be created for call site position!");
4885   case IRPosition::IRP_CALL_SITE_RETURNED:
4886     AA = new (A.Allocator) AAFoldRuntimeCallCallSiteReturned(IRP, A);
4887     break;
4888   }
4889 
4890   return *AA;
4891 }
4892 
4893 PreservedAnalyses OpenMPOptPass::run(Module &M, ModuleAnalysisManager &AM) {
4894   if (!containsOpenMP(M))
4895     return PreservedAnalyses::all();
4896   if (DisableOpenMPOptimizations)
4897     return PreservedAnalyses::all();
4898 
4899   FunctionAnalysisManager &FAM =
4900       AM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager();
4901   KernelSet Kernels = getDeviceKernels(M);
4902 
4903   auto IsCalled = [&](Function &F) {
4904     if (Kernels.contains(&F))
4905       return true;
4906     for (const User *U : F.users())
4907       if (!isa<BlockAddress>(U))
4908         return true;
4909     return false;
4910   };
4911 
4912   auto EmitRemark = [&](Function &F) {
4913     auto &ORE = FAM.getResult<OptimizationRemarkEmitterAnalysis>(F);
4914     ORE.emit([&]() {
4915       OptimizationRemarkAnalysis ORA(DEBUG_TYPE, "OMP140", &F);
4916       return ORA << "Could not internalize function. "
4917                  << "Some optimizations may not be possible. [OMP140]";
4918     });
4919   };
4920 
4921   // Create internal copies of each function if this is a kernel Module. This
4922   // allows iterprocedural passes to see every call edge.
4923   DenseMap<Function *, Function *> InternalizedMap;
4924   if (isOpenMPDevice(M)) {
4925     SmallPtrSet<Function *, 16> InternalizeFns;
4926     for (Function &F : M)
4927       if (!F.isDeclaration() && !Kernels.contains(&F) && IsCalled(F) &&
4928           !DisableInternalization) {
4929         if (Attributor::isInternalizable(F)) {
4930           InternalizeFns.insert(&F);
4931         } else if (!F.hasLocalLinkage() && !F.hasFnAttribute(Attribute::Cold)) {
4932           EmitRemark(F);
4933         }
4934       }
4935 
4936     Attributor::internalizeFunctions(InternalizeFns, InternalizedMap);
4937   }
4938 
4939   // Look at every function in the Module unless it was internalized.
4940   SmallVector<Function *, 16> SCC;
4941   for (Function &F : M)
4942     if (!F.isDeclaration() && !InternalizedMap.lookup(&F))
4943       SCC.push_back(&F);
4944 
4945   if (SCC.empty())
4946     return PreservedAnalyses::all();
4947 
4948   AnalysisGetter AG(FAM);
4949 
4950   auto OREGetter = [&FAM](Function *F) -> OptimizationRemarkEmitter & {
4951     return FAM.getResult<OptimizationRemarkEmitterAnalysis>(*F);
4952   };
4953 
4954   BumpPtrAllocator Allocator;
4955   CallGraphUpdater CGUpdater;
4956 
4957   SetVector<Function *> Functions(SCC.begin(), SCC.end());
4958   OMPInformationCache InfoCache(M, AG, Allocator, /*CGSCC*/ Functions, Kernels);
4959 
4960   unsigned MaxFixpointIterations =
4961       (isOpenMPDevice(M)) ? SetFixpointIterations : 32;
4962   Attributor A(Functions, InfoCache, CGUpdater, nullptr, true, false,
4963                MaxFixpointIterations, OREGetter, DEBUG_TYPE);
4964 
4965   OpenMPOpt OMPOpt(SCC, CGUpdater, OREGetter, InfoCache, A);
4966   bool Changed = OMPOpt.run(true);
4967 
4968   // Optionally inline device functions for potentially better performance.
4969   if (AlwaysInlineDeviceFunctions && isOpenMPDevice(M))
4970     for (Function &F : M)
4971       if (!F.isDeclaration() && !Kernels.contains(&F) &&
4972           !F.hasFnAttribute(Attribute::NoInline))
4973         F.addFnAttr(Attribute::AlwaysInline);
4974 
4975   if (PrintModuleAfterOptimizations)
4976     LLVM_DEBUG(dbgs() << TAG << "Module after OpenMPOpt Module Pass:\n" << M);
4977 
4978   if (Changed)
4979     return PreservedAnalyses::none();
4980 
4981   return PreservedAnalyses::all();
4982 }
4983 
4984 PreservedAnalyses OpenMPOptCGSCCPass::run(LazyCallGraph::SCC &C,
4985                                           CGSCCAnalysisManager &AM,
4986                                           LazyCallGraph &CG,
4987                                           CGSCCUpdateResult &UR) {
4988   if (!containsOpenMP(*C.begin()->getFunction().getParent()))
4989     return PreservedAnalyses::all();
4990   if (DisableOpenMPOptimizations)
4991     return PreservedAnalyses::all();
4992 
4993   SmallVector<Function *, 16> SCC;
4994   // If there are kernels in the module, we have to run on all SCC's.
4995   for (LazyCallGraph::Node &N : C) {
4996     Function *Fn = &N.getFunction();
4997     SCC.push_back(Fn);
4998   }
4999 
5000   if (SCC.empty())
5001     return PreservedAnalyses::all();
5002 
5003   Module &M = *C.begin()->getFunction().getParent();
5004 
5005   KernelSet Kernels = getDeviceKernels(M);
5006 
5007   FunctionAnalysisManager &FAM =
5008       AM.getResult<FunctionAnalysisManagerCGSCCProxy>(C, CG).getManager();
5009 
5010   AnalysisGetter AG(FAM);
5011 
5012   auto OREGetter = [&FAM](Function *F) -> OptimizationRemarkEmitter & {
5013     return FAM.getResult<OptimizationRemarkEmitterAnalysis>(*F);
5014   };
5015 
5016   BumpPtrAllocator Allocator;
5017   CallGraphUpdater CGUpdater;
5018   CGUpdater.initialize(CG, C, AM, UR);
5019 
5020   SetVector<Function *> Functions(SCC.begin(), SCC.end());
5021   OMPInformationCache InfoCache(*(Functions.back()->getParent()), AG, Allocator,
5022                                 /*CGSCC*/ Functions, Kernels);
5023 
5024   unsigned MaxFixpointIterations =
5025       (isOpenMPDevice(M)) ? SetFixpointIterations : 32;
5026   Attributor A(Functions, InfoCache, CGUpdater, nullptr, false, true,
5027                MaxFixpointIterations, OREGetter, DEBUG_TYPE);
5028 
5029   OpenMPOpt OMPOpt(SCC, CGUpdater, OREGetter, InfoCache, A);
5030   bool Changed = OMPOpt.run(false);
5031 
5032   if (PrintModuleAfterOptimizations)
5033     LLVM_DEBUG(dbgs() << TAG << "Module after OpenMPOpt CGSCC Pass:\n" << M);
5034 
5035   if (Changed)
5036     return PreservedAnalyses::none();
5037 
5038   return PreservedAnalyses::all();
5039 }
5040 
5041 namespace {
5042 
5043 struct OpenMPOptCGSCCLegacyPass : public CallGraphSCCPass {
5044   CallGraphUpdater CGUpdater;
5045   static char ID;
5046 
5047   OpenMPOptCGSCCLegacyPass() : CallGraphSCCPass(ID) {
5048     initializeOpenMPOptCGSCCLegacyPassPass(*PassRegistry::getPassRegistry());
5049   }
5050 
5051   void getAnalysisUsage(AnalysisUsage &AU) const override {
5052     CallGraphSCCPass::getAnalysisUsage(AU);
5053   }
5054 
5055   bool runOnSCC(CallGraphSCC &CGSCC) override {
5056     if (!containsOpenMP(CGSCC.getCallGraph().getModule()))
5057       return false;
5058     if (DisableOpenMPOptimizations || skipSCC(CGSCC))
5059       return false;
5060 
5061     SmallVector<Function *, 16> SCC;
5062     // If there are kernels in the module, we have to run on all SCC's.
5063     for (CallGraphNode *CGN : CGSCC) {
5064       Function *Fn = CGN->getFunction();
5065       if (!Fn || Fn->isDeclaration())
5066         continue;
5067       SCC.push_back(Fn);
5068     }
5069 
5070     if (SCC.empty())
5071       return false;
5072 
5073     Module &M = CGSCC.getCallGraph().getModule();
5074     KernelSet Kernels = getDeviceKernels(M);
5075 
5076     CallGraph &CG = getAnalysis<CallGraphWrapperPass>().getCallGraph();
5077     CGUpdater.initialize(CG, CGSCC);
5078 
5079     // Maintain a map of functions to avoid rebuilding the ORE
5080     DenseMap<Function *, std::unique_ptr<OptimizationRemarkEmitter>> OREMap;
5081     auto OREGetter = [&OREMap](Function *F) -> OptimizationRemarkEmitter & {
5082       std::unique_ptr<OptimizationRemarkEmitter> &ORE = OREMap[F];
5083       if (!ORE)
5084         ORE = std::make_unique<OptimizationRemarkEmitter>(F);
5085       return *ORE;
5086     };
5087 
5088     AnalysisGetter AG;
5089     SetVector<Function *> Functions(SCC.begin(), SCC.end());
5090     BumpPtrAllocator Allocator;
5091     OMPInformationCache InfoCache(*(Functions.back()->getParent()), AG,
5092                                   Allocator,
5093                                   /*CGSCC*/ Functions, Kernels);
5094 
5095     unsigned MaxFixpointIterations =
5096         (isOpenMPDevice(M)) ? SetFixpointIterations : 32;
5097     Attributor A(Functions, InfoCache, CGUpdater, nullptr, false, true,
5098                  MaxFixpointIterations, OREGetter, DEBUG_TYPE);
5099 
5100     OpenMPOpt OMPOpt(SCC, CGUpdater, OREGetter, InfoCache, A);
5101     bool Result = OMPOpt.run(false);
5102 
5103     if (PrintModuleAfterOptimizations)
5104       LLVM_DEBUG(dbgs() << TAG << "Module after OpenMPOpt CGSCC Pass:\n" << M);
5105 
5106     return Result;
5107   }
5108 
5109   bool doFinalization(CallGraph &CG) override { return CGUpdater.finalize(); }
5110 };
5111 
5112 } // end anonymous namespace
5113 
5114 KernelSet llvm::omp::getDeviceKernels(Module &M) {
5115   // TODO: Create a more cross-platform way of determining device kernels.
5116   NamedMDNode *MD = M.getOrInsertNamedMetadata("nvvm.annotations");
5117   KernelSet Kernels;
5118 
5119   if (!MD)
5120     return Kernels;
5121 
5122   for (auto *Op : MD->operands()) {
5123     if (Op->getNumOperands() < 2)
5124       continue;
5125     MDString *KindID = dyn_cast<MDString>(Op->getOperand(1));
5126     if (!KindID || KindID->getString() != "kernel")
5127       continue;
5128 
5129     Function *KernelFn =
5130         mdconst::dyn_extract_or_null<Function>(Op->getOperand(0));
5131     if (!KernelFn)
5132       continue;
5133 
5134     ++NumOpenMPTargetRegionKernels;
5135 
5136     Kernels.insert(KernelFn);
5137   }
5138 
5139   return Kernels;
5140 }
5141 
5142 bool llvm::omp::containsOpenMP(Module &M) {
5143   Metadata *MD = M.getModuleFlag("openmp");
5144   if (!MD)
5145     return false;
5146 
5147   return true;
5148 }
5149 
5150 bool llvm::omp::isOpenMPDevice(Module &M) {
5151   Metadata *MD = M.getModuleFlag("openmp-device");
5152   if (!MD)
5153     return false;
5154 
5155   return true;
5156 }
5157 
5158 char OpenMPOptCGSCCLegacyPass::ID = 0;
5159 
5160 INITIALIZE_PASS_BEGIN(OpenMPOptCGSCCLegacyPass, "openmp-opt-cgscc",
5161                       "OpenMP specific optimizations", false, false)
5162 INITIALIZE_PASS_DEPENDENCY(CallGraphWrapperPass)
5163 INITIALIZE_PASS_END(OpenMPOptCGSCCLegacyPass, "openmp-opt-cgscc",
5164                     "OpenMP specific optimizations", false, false)
5165 
5166 Pass *llvm::createOpenMPOptCGSCCLegacyPass() {
5167   return new OpenMPOptCGSCCLegacyPass();
5168 }
5169