1 //===----- CGCUDANV.cpp - Interface to NVIDIA CUDA Runtime ----------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This provides a class for CUDA code generation targeting the NVIDIA CUDA
10 // runtime library.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "CGCUDARuntime.h"
15 #include "CGCXXABI.h"
16 #include "CodeGenFunction.h"
17 #include "CodeGenModule.h"
18 #include "clang/AST/Decl.h"
19 #include "clang/Basic/Cuda.h"
20 #include "clang/CodeGen/CodeGenABITypes.h"
21 #include "clang/CodeGen/ConstantInitBuilder.h"
22 #include "llvm/IR/BasicBlock.h"
23 #include "llvm/IR/Constants.h"
24 #include "llvm/IR/DerivedTypes.h"
25 #include "llvm/IR/ReplaceConstant.h"
26 #include "llvm/Support/Format.h"
27 
28 using namespace clang;
29 using namespace CodeGen;
30 
31 namespace {
32 constexpr unsigned CudaFatMagic = 0x466243b1;
33 constexpr unsigned HIPFatMagic = 0x48495046; // "HIPF"
34 
35 class CGNVCUDARuntime : public CGCUDARuntime {
36 
37 private:
38   llvm::IntegerType *IntTy, *SizeTy;
39   llvm::Type *VoidTy;
40   llvm::PointerType *CharPtrTy, *VoidPtrTy, *VoidPtrPtrTy;
41 
42   /// Convenience reference to LLVM Context
43   llvm::LLVMContext &Context;
44   /// Convenience reference to the current module
45   llvm::Module &TheModule;
46   /// Keeps track of kernel launch stubs and handles emitted in this module
47   struct KernelInfo {
48     llvm::Function *Kernel; // stub function to help launch kernel
49     const Decl *D;
50   };
51   llvm::SmallVector<KernelInfo, 16> EmittedKernels;
52   // Map a kernel mangled name to a symbol for identifying kernel in host code
53   // For CUDA, the symbol for identifying the kernel is the same as the device
54   // stub function. For HIP, they are different.
55   llvm::DenseMap<StringRef, llvm::GlobalValue *> KernelHandles;
56   // Map a kernel handle to the kernel stub.
57   llvm::DenseMap<llvm::GlobalValue *, llvm::Function *> KernelStubs;
58   struct VarInfo {
59     llvm::GlobalVariable *Var;
60     const VarDecl *D;
61     DeviceVarFlags Flags;
62   };
63   llvm::SmallVector<VarInfo, 16> DeviceVars;
64   /// Keeps track of variable containing handle of GPU binary. Populated by
65   /// ModuleCtorFunction() and used to create corresponding cleanup calls in
66   /// ModuleDtorFunction()
67   llvm::GlobalVariable *GpuBinaryHandle = nullptr;
68   /// Whether we generate relocatable device code.
69   bool RelocatableDeviceCode;
70   /// Mangle context for device.
71   std::unique_ptr<MangleContext> DeviceMC;
72   /// Some zeros used for GEPs.
73   llvm::Constant *Zeros[2];
74 
75   llvm::FunctionCallee getSetupArgumentFn() const;
76   llvm::FunctionCallee getLaunchFn() const;
77 
78   llvm::FunctionType *getRegisterGlobalsFnTy() const;
79   llvm::FunctionType *getCallbackFnTy() const;
80   llvm::FunctionType *getRegisterLinkedBinaryFnTy() const;
81   std::string addPrefixToName(StringRef FuncName) const;
82   std::string addUnderscoredPrefixToName(StringRef FuncName) const;
83 
84   /// Creates a function to register all kernel stubs generated in this module.
85   llvm::Function *makeRegisterGlobalsFn();
86 
87   /// Helper function that generates a constant string and returns a pointer to
88   /// the start of the string.  The result of this function can be used anywhere
89   /// where the C code specifies const char*.
90   llvm::Constant *makeConstantString(const std::string &Str,
91                                      const std::string &Name = "") {
92     auto ConstStr = CGM.GetAddrOfConstantCString(Str, Name.c_str());
93     return llvm::ConstantExpr::getGetElementPtr(ConstStr.getElementType(),
94                                                 ConstStr.getPointer(), Zeros);
95   }
96 
97   /// Helper function which generates an initialized constant array from Str,
98   /// and optionally sets section name and alignment. AddNull specifies whether
99   /// the array should nave NUL termination.
100   llvm::Constant *makeConstantArray(StringRef Str,
101                                     StringRef Name = "",
102                                     StringRef SectionName = "",
103                                     unsigned Alignment = 0,
104                                     bool AddNull = false) {
105     llvm::Constant *Value =
106         llvm::ConstantDataArray::getString(Context, Str, AddNull);
107     auto *GV = new llvm::GlobalVariable(
108         TheModule, Value->getType(), /*isConstant=*/true,
109         llvm::GlobalValue::PrivateLinkage, Value, Name);
110     if (!SectionName.empty()) {
111       GV->setSection(SectionName);
112       // Mark the address as used which make sure that this section isn't
113       // merged and we will really have it in the object file.
114       GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::None);
115     }
116     if (Alignment)
117       GV->setAlignment(llvm::Align(Alignment));
118     return llvm::ConstantExpr::getGetElementPtr(GV->getValueType(), GV, Zeros);
119   }
120 
121   /// Helper function that generates an empty dummy function returning void.
122   llvm::Function *makeDummyFunction(llvm::FunctionType *FnTy) {
123     assert(FnTy->getReturnType()->isVoidTy() &&
124            "Can only generate dummy functions returning void!");
125     llvm::Function *DummyFunc = llvm::Function::Create(
126         FnTy, llvm::GlobalValue::InternalLinkage, "dummy", &TheModule);
127 
128     llvm::BasicBlock *DummyBlock =
129         llvm::BasicBlock::Create(Context, "", DummyFunc);
130     CGBuilderTy FuncBuilder(CGM, Context);
131     FuncBuilder.SetInsertPoint(DummyBlock);
132     FuncBuilder.CreateRetVoid();
133 
134     return DummyFunc;
135   }
136 
137   void emitDeviceStubBodyLegacy(CodeGenFunction &CGF, FunctionArgList &Args);
138   void emitDeviceStubBodyNew(CodeGenFunction &CGF, FunctionArgList &Args);
139   std::string getDeviceSideName(const NamedDecl *ND) override;
140 
141   void registerDeviceVar(const VarDecl *VD, llvm::GlobalVariable &Var,
142                          bool Extern, bool Constant) {
143     DeviceVars.push_back({&Var,
144                           VD,
145                           {DeviceVarFlags::Variable, Extern, Constant,
146                            VD->hasAttr<HIPManagedAttr>(),
147                            /*Normalized*/ false, 0}});
148   }
149   void registerDeviceSurf(const VarDecl *VD, llvm::GlobalVariable &Var,
150                           bool Extern, int Type) {
151     DeviceVars.push_back({&Var,
152                           VD,
153                           {DeviceVarFlags::Surface, Extern, /*Constant*/ false,
154                            /*Managed*/ false,
155                            /*Normalized*/ false, Type}});
156   }
157   void registerDeviceTex(const VarDecl *VD, llvm::GlobalVariable &Var,
158                          bool Extern, int Type, bool Normalized) {
159     DeviceVars.push_back({&Var,
160                           VD,
161                           {DeviceVarFlags::Texture, Extern, /*Constant*/ false,
162                            /*Managed*/ false, Normalized, Type}});
163   }
164 
165   /// Creates module constructor function
166   llvm::Function *makeModuleCtorFunction();
167   /// Creates module destructor function
168   llvm::Function *makeModuleDtorFunction();
169   /// Transform managed variables for device compilation.
170   void transformManagedVars();
171   /// Create offloading entries to register globals in RDC mode.
172   void createOffloadingEntries();
173 
174 public:
175   CGNVCUDARuntime(CodeGenModule &CGM);
176 
177   llvm::GlobalValue *getKernelHandle(llvm::Function *F, GlobalDecl GD) override;
178   llvm::Function *getKernelStub(llvm::GlobalValue *Handle) override {
179     auto Loc = KernelStubs.find(Handle);
180     assert(Loc != KernelStubs.end());
181     return Loc->second;
182   }
183   void emitDeviceStub(CodeGenFunction &CGF, FunctionArgList &Args) override;
184   void handleVarRegistration(const VarDecl *VD,
185                              llvm::GlobalVariable &Var) override;
186   void
187   internalizeDeviceSideVar(const VarDecl *D,
188                            llvm::GlobalValue::LinkageTypes &Linkage) override;
189 
190   llvm::Function *finalizeModule() override;
191 };
192 
193 } // end anonymous namespace
194 
195 std::string CGNVCUDARuntime::addPrefixToName(StringRef FuncName) const {
196   if (CGM.getLangOpts().HIP)
197     return ((Twine("hip") + Twine(FuncName)).str());
198   return ((Twine("cuda") + Twine(FuncName)).str());
199 }
200 std::string
201 CGNVCUDARuntime::addUnderscoredPrefixToName(StringRef FuncName) const {
202   if (CGM.getLangOpts().HIP)
203     return ((Twine("__hip") + Twine(FuncName)).str());
204   return ((Twine("__cuda") + Twine(FuncName)).str());
205 }
206 
207 static std::unique_ptr<MangleContext> InitDeviceMC(CodeGenModule &CGM) {
208   // If the host and device have different C++ ABIs, mark it as the device
209   // mangle context so that the mangling needs to retrieve the additional
210   // device lambda mangling number instead of the regular host one.
211   if (CGM.getContext().getAuxTargetInfo() &&
212       CGM.getContext().getTargetInfo().getCXXABI().isMicrosoft() &&
213       CGM.getContext().getAuxTargetInfo()->getCXXABI().isItaniumFamily()) {
214     return std::unique_ptr<MangleContext>(
215         CGM.getContext().createDeviceMangleContext(
216             *CGM.getContext().getAuxTargetInfo()));
217   }
218 
219   return std::unique_ptr<MangleContext>(CGM.getContext().createMangleContext(
220       CGM.getContext().getAuxTargetInfo()));
221 }
222 
223 CGNVCUDARuntime::CGNVCUDARuntime(CodeGenModule &CGM)
224     : CGCUDARuntime(CGM), Context(CGM.getLLVMContext()),
225       TheModule(CGM.getModule()),
226       RelocatableDeviceCode(CGM.getLangOpts().GPURelocatableDeviceCode),
227       DeviceMC(InitDeviceMC(CGM)) {
228   CodeGen::CodeGenTypes &Types = CGM.getTypes();
229   ASTContext &Ctx = CGM.getContext();
230 
231   IntTy = CGM.IntTy;
232   SizeTy = CGM.SizeTy;
233   VoidTy = CGM.VoidTy;
234   Zeros[0] = llvm::ConstantInt::get(SizeTy, 0);
235   Zeros[1] = Zeros[0];
236 
237   CharPtrTy = llvm::PointerType::getUnqual(Types.ConvertType(Ctx.CharTy));
238   VoidPtrTy = cast<llvm::PointerType>(Types.ConvertType(Ctx.VoidPtrTy));
239   VoidPtrPtrTy = VoidPtrTy->getPointerTo();
240 }
241 
242 llvm::FunctionCallee CGNVCUDARuntime::getSetupArgumentFn() const {
243   // cudaError_t cudaSetupArgument(void *, size_t, size_t)
244   llvm::Type *Params[] = {VoidPtrTy, SizeTy, SizeTy};
245   return CGM.CreateRuntimeFunction(
246       llvm::FunctionType::get(IntTy, Params, false),
247       addPrefixToName("SetupArgument"));
248 }
249 
250 llvm::FunctionCallee CGNVCUDARuntime::getLaunchFn() const {
251   if (CGM.getLangOpts().HIP) {
252     // hipError_t hipLaunchByPtr(char *);
253     return CGM.CreateRuntimeFunction(
254         llvm::FunctionType::get(IntTy, CharPtrTy, false), "hipLaunchByPtr");
255   }
256   // cudaError_t cudaLaunch(char *);
257   return CGM.CreateRuntimeFunction(
258       llvm::FunctionType::get(IntTy, CharPtrTy, false), "cudaLaunch");
259 }
260 
261 llvm::FunctionType *CGNVCUDARuntime::getRegisterGlobalsFnTy() const {
262   return llvm::FunctionType::get(VoidTy, VoidPtrPtrTy, false);
263 }
264 
265 llvm::FunctionType *CGNVCUDARuntime::getCallbackFnTy() const {
266   return llvm::FunctionType::get(VoidTy, VoidPtrTy, false);
267 }
268 
269 llvm::FunctionType *CGNVCUDARuntime::getRegisterLinkedBinaryFnTy() const {
270   auto *CallbackFnTy = getCallbackFnTy();
271   auto *RegisterGlobalsFnTy = getRegisterGlobalsFnTy();
272   llvm::Type *Params[] = {RegisterGlobalsFnTy->getPointerTo(), VoidPtrTy,
273                           VoidPtrTy, CallbackFnTy->getPointerTo()};
274   return llvm::FunctionType::get(VoidTy, Params, false);
275 }
276 
277 std::string CGNVCUDARuntime::getDeviceSideName(const NamedDecl *ND) {
278   GlobalDecl GD;
279   // D could be either a kernel or a variable.
280   if (auto *FD = dyn_cast<FunctionDecl>(ND))
281     GD = GlobalDecl(FD, KernelReferenceKind::Kernel);
282   else
283     GD = GlobalDecl(ND);
284   std::string DeviceSideName;
285   MangleContext *MC;
286   if (CGM.getLangOpts().CUDAIsDevice)
287     MC = &CGM.getCXXABI().getMangleContext();
288   else
289     MC = DeviceMC.get();
290   if (MC->shouldMangleDeclName(ND)) {
291     SmallString<256> Buffer;
292     llvm::raw_svector_ostream Out(Buffer);
293     MC->mangleName(GD, Out);
294     DeviceSideName = std::string(Out.str());
295   } else
296     DeviceSideName = std::string(ND->getIdentifier()->getName());
297 
298   // Make unique name for device side static file-scope variable for HIP.
299   if (CGM.getContext().shouldExternalize(ND) &&
300       CGM.getLangOpts().GPURelocatableDeviceCode) {
301     SmallString<256> Buffer;
302     llvm::raw_svector_ostream Out(Buffer);
303     Out << DeviceSideName;
304     CGM.printPostfixForExternalizedDecl(Out, ND);
305     DeviceSideName = std::string(Out.str());
306   }
307   return DeviceSideName;
308 }
309 
310 void CGNVCUDARuntime::emitDeviceStub(CodeGenFunction &CGF,
311                                      FunctionArgList &Args) {
312   EmittedKernels.push_back({CGF.CurFn, CGF.CurFuncDecl});
313   if (auto *GV =
314           dyn_cast<llvm::GlobalVariable>(KernelHandles[CGF.CurFn->getName()])) {
315     GV->setLinkage(CGF.CurFn->getLinkage());
316     GV->setInitializer(CGF.CurFn);
317   }
318   if (CudaFeatureEnabled(CGM.getTarget().getSDKVersion(),
319                          CudaFeature::CUDA_USES_NEW_LAUNCH) ||
320       (CGF.getLangOpts().HIP && CGF.getLangOpts().HIPUseNewLaunchAPI))
321     emitDeviceStubBodyNew(CGF, Args);
322   else
323     emitDeviceStubBodyLegacy(CGF, Args);
324 }
325 
326 // CUDA 9.0+ uses new way to launch kernels. Parameters are packed in a local
327 // array and kernels are launched using cudaLaunchKernel().
328 void CGNVCUDARuntime::emitDeviceStubBodyNew(CodeGenFunction &CGF,
329                                             FunctionArgList &Args) {
330   // Build the shadow stack entry at the very start of the function.
331 
332   // Calculate amount of space we will need for all arguments.  If we have no
333   // args, allocate a single pointer so we still have a valid pointer to the
334   // argument array that we can pass to runtime, even if it will be unused.
335   Address KernelArgs = CGF.CreateTempAlloca(
336       VoidPtrTy, CharUnits::fromQuantity(16), "kernel_args",
337       llvm::ConstantInt::get(SizeTy, std::max<size_t>(1, Args.size())));
338   // Store pointers to the arguments in a locally allocated launch_args.
339   for (unsigned i = 0; i < Args.size(); ++i) {
340     llvm::Value* VarPtr = CGF.GetAddrOfLocalVar(Args[i]).getPointer();
341     llvm::Value *VoidVarPtr = CGF.Builder.CreatePointerCast(VarPtr, VoidPtrTy);
342     CGF.Builder.CreateDefaultAlignedStore(
343         VoidVarPtr,
344         CGF.Builder.CreateConstGEP1_32(VoidPtrTy, KernelArgs.getPointer(), i));
345   }
346 
347   llvm::BasicBlock *EndBlock = CGF.createBasicBlock("setup.end");
348 
349   // Lookup cudaLaunchKernel/hipLaunchKernel function.
350   // HIP kernel launching API name depends on -fgpu-default-stream option. For
351   // the default value 'legacy', it is hipLaunchKernel. For 'per-thread',
352   // it is hipLaunchKernel_spt.
353   // cudaError_t cudaLaunchKernel(const void *func, dim3 gridDim, dim3 blockDim,
354   //                              void **args, size_t sharedMem,
355   //                              cudaStream_t stream);
356   // hipError_t hipLaunchKernel[_spt](const void *func, dim3 gridDim,
357   //                                  dim3 blockDim, void **args,
358   //                                  size_t sharedMem, hipStream_t stream);
359   TranslationUnitDecl *TUDecl = CGM.getContext().getTranslationUnitDecl();
360   DeclContext *DC = TranslationUnitDecl::castToDeclContext(TUDecl);
361   std::string KernelLaunchAPI = "LaunchKernel";
362   if (CGF.getLangOpts().HIP && CGF.getLangOpts().GPUDefaultStream ==
363                                    LangOptions::GPUDefaultStreamKind::PerThread)
364     KernelLaunchAPI = KernelLaunchAPI + "_spt";
365   auto LaunchKernelName = addPrefixToName(KernelLaunchAPI);
366   IdentifierInfo &cudaLaunchKernelII =
367       CGM.getContext().Idents.get(LaunchKernelName);
368   FunctionDecl *cudaLaunchKernelFD = nullptr;
369   for (auto *Result : DC->lookup(&cudaLaunchKernelII)) {
370     if (FunctionDecl *FD = dyn_cast<FunctionDecl>(Result))
371       cudaLaunchKernelFD = FD;
372   }
373 
374   if (cudaLaunchKernelFD == nullptr) {
375     CGM.Error(CGF.CurFuncDecl->getLocation(),
376               "Can't find declaration for " + LaunchKernelName);
377     return;
378   }
379   // Create temporary dim3 grid_dim, block_dim.
380   ParmVarDecl *GridDimParam = cudaLaunchKernelFD->getParamDecl(1);
381   QualType Dim3Ty = GridDimParam->getType();
382   Address GridDim =
383       CGF.CreateMemTemp(Dim3Ty, CharUnits::fromQuantity(8), "grid_dim");
384   Address BlockDim =
385       CGF.CreateMemTemp(Dim3Ty, CharUnits::fromQuantity(8), "block_dim");
386   Address ShmemSize =
387       CGF.CreateTempAlloca(SizeTy, CGM.getSizeAlign(), "shmem_size");
388   Address Stream =
389       CGF.CreateTempAlloca(VoidPtrTy, CGM.getPointerAlign(), "stream");
390   llvm::FunctionCallee cudaPopConfigFn = CGM.CreateRuntimeFunction(
391       llvm::FunctionType::get(IntTy,
392                               {/*gridDim=*/GridDim.getType(),
393                                /*blockDim=*/BlockDim.getType(),
394                                /*ShmemSize=*/ShmemSize.getType(),
395                                /*Stream=*/Stream.getType()},
396                               /*isVarArg=*/false),
397       addUnderscoredPrefixToName("PopCallConfiguration"));
398 
399   CGF.EmitRuntimeCallOrInvoke(cudaPopConfigFn,
400                               {GridDim.getPointer(), BlockDim.getPointer(),
401                                ShmemSize.getPointer(), Stream.getPointer()});
402 
403   // Emit the call to cudaLaunch
404   llvm::Value *Kernel = CGF.Builder.CreatePointerCast(
405       KernelHandles[CGF.CurFn->getName()], VoidPtrTy);
406   CallArgList LaunchKernelArgs;
407   LaunchKernelArgs.add(RValue::get(Kernel),
408                        cudaLaunchKernelFD->getParamDecl(0)->getType());
409   LaunchKernelArgs.add(RValue::getAggregate(GridDim), Dim3Ty);
410   LaunchKernelArgs.add(RValue::getAggregate(BlockDim), Dim3Ty);
411   LaunchKernelArgs.add(RValue::get(KernelArgs.getPointer()),
412                        cudaLaunchKernelFD->getParamDecl(3)->getType());
413   LaunchKernelArgs.add(RValue::get(CGF.Builder.CreateLoad(ShmemSize)),
414                        cudaLaunchKernelFD->getParamDecl(4)->getType());
415   LaunchKernelArgs.add(RValue::get(CGF.Builder.CreateLoad(Stream)),
416                        cudaLaunchKernelFD->getParamDecl(5)->getType());
417 
418   QualType QT = cudaLaunchKernelFD->getType();
419   QualType CQT = QT.getCanonicalType();
420   llvm::Type *Ty = CGM.getTypes().ConvertType(CQT);
421   llvm::FunctionType *FTy = cast<llvm::FunctionType>(Ty);
422 
423   const CGFunctionInfo &FI =
424       CGM.getTypes().arrangeFunctionDeclaration(cudaLaunchKernelFD);
425   llvm::FunctionCallee cudaLaunchKernelFn =
426       CGM.CreateRuntimeFunction(FTy, LaunchKernelName);
427   CGF.EmitCall(FI, CGCallee::forDirect(cudaLaunchKernelFn), ReturnValueSlot(),
428                LaunchKernelArgs);
429   CGF.EmitBranch(EndBlock);
430 
431   CGF.EmitBlock(EndBlock);
432 }
433 
434 void CGNVCUDARuntime::emitDeviceStubBodyLegacy(CodeGenFunction &CGF,
435                                                FunctionArgList &Args) {
436   // Emit a call to cudaSetupArgument for each arg in Args.
437   llvm::FunctionCallee cudaSetupArgFn = getSetupArgumentFn();
438   llvm::BasicBlock *EndBlock = CGF.createBasicBlock("setup.end");
439   CharUnits Offset = CharUnits::Zero();
440   for (const VarDecl *A : Args) {
441     auto TInfo = CGM.getContext().getTypeInfoInChars(A->getType());
442     Offset = Offset.alignTo(TInfo.Align);
443     llvm::Value *Args[] = {
444         CGF.Builder.CreatePointerCast(CGF.GetAddrOfLocalVar(A).getPointer(),
445                                       VoidPtrTy),
446         llvm::ConstantInt::get(SizeTy, TInfo.Width.getQuantity()),
447         llvm::ConstantInt::get(SizeTy, Offset.getQuantity()),
448     };
449     llvm::CallBase *CB = CGF.EmitRuntimeCallOrInvoke(cudaSetupArgFn, Args);
450     llvm::Constant *Zero = llvm::ConstantInt::get(IntTy, 0);
451     llvm::Value *CBZero = CGF.Builder.CreateICmpEQ(CB, Zero);
452     llvm::BasicBlock *NextBlock = CGF.createBasicBlock("setup.next");
453     CGF.Builder.CreateCondBr(CBZero, NextBlock, EndBlock);
454     CGF.EmitBlock(NextBlock);
455     Offset += TInfo.Width;
456   }
457 
458   // Emit the call to cudaLaunch
459   llvm::FunctionCallee cudaLaunchFn = getLaunchFn();
460   llvm::Value *Arg = CGF.Builder.CreatePointerCast(
461       KernelHandles[CGF.CurFn->getName()], CharPtrTy);
462   CGF.EmitRuntimeCallOrInvoke(cudaLaunchFn, Arg);
463   CGF.EmitBranch(EndBlock);
464 
465   CGF.EmitBlock(EndBlock);
466 }
467 
468 // Replace the original variable Var with the address loaded from variable
469 // ManagedVar populated by HIP runtime.
470 static void replaceManagedVar(llvm::GlobalVariable *Var,
471                               llvm::GlobalVariable *ManagedVar) {
472   SmallVector<SmallVector<llvm::User *, 8>, 8> WorkList;
473   for (auto &&VarUse : Var->uses()) {
474     WorkList.push_back({VarUse.getUser()});
475   }
476   while (!WorkList.empty()) {
477     auto &&WorkItem = WorkList.pop_back_val();
478     auto *U = WorkItem.back();
479     if (isa<llvm::ConstantExpr>(U)) {
480       for (auto &&UU : U->uses()) {
481         WorkItem.push_back(UU.getUser());
482         WorkList.push_back(WorkItem);
483         WorkItem.pop_back();
484       }
485       continue;
486     }
487     if (auto *I = dyn_cast<llvm::Instruction>(U)) {
488       llvm::Value *OldV = Var;
489       llvm::Instruction *NewV =
490           new llvm::LoadInst(Var->getType(), ManagedVar, "ld.managed", false,
491                              llvm::Align(Var->getAlignment()), I);
492       WorkItem.pop_back();
493       // Replace constant expressions directly or indirectly using the managed
494       // variable with instructions.
495       for (auto &&Op : WorkItem) {
496         auto *CE = cast<llvm::ConstantExpr>(Op);
497         auto *NewInst = CE->getAsInstruction(I);
498         NewInst->replaceUsesOfWith(OldV, NewV);
499         OldV = CE;
500         NewV = NewInst;
501       }
502       I->replaceUsesOfWith(OldV, NewV);
503     } else {
504       llvm_unreachable("Invalid use of managed variable");
505     }
506   }
507 }
508 
509 /// Creates a function that sets up state on the host side for CUDA objects that
510 /// have a presence on both the host and device sides. Specifically, registers
511 /// the host side of kernel functions and device global variables with the CUDA
512 /// runtime.
513 /// \code
514 /// void __cuda_register_globals(void** GpuBinaryHandle) {
515 ///    __cudaRegisterFunction(GpuBinaryHandle,Kernel0,...);
516 ///    ...
517 ///    __cudaRegisterFunction(GpuBinaryHandle,KernelM,...);
518 ///    __cudaRegisterVar(GpuBinaryHandle, GlobalVar0, ...);
519 ///    ...
520 ///    __cudaRegisterVar(GpuBinaryHandle, GlobalVarN, ...);
521 /// }
522 /// \endcode
523 llvm::Function *CGNVCUDARuntime::makeRegisterGlobalsFn() {
524   // No need to register anything
525   if (EmittedKernels.empty() && DeviceVars.empty())
526     return nullptr;
527 
528   llvm::Function *RegisterKernelsFunc = llvm::Function::Create(
529       getRegisterGlobalsFnTy(), llvm::GlobalValue::InternalLinkage,
530       addUnderscoredPrefixToName("_register_globals"), &TheModule);
531   llvm::BasicBlock *EntryBB =
532       llvm::BasicBlock::Create(Context, "entry", RegisterKernelsFunc);
533   CGBuilderTy Builder(CGM, Context);
534   Builder.SetInsertPoint(EntryBB);
535 
536   // void __cudaRegisterFunction(void **, const char *, char *, const char *,
537   //                             int, uint3*, uint3*, dim3*, dim3*, int*)
538   llvm::Type *RegisterFuncParams[] = {
539       VoidPtrPtrTy, CharPtrTy, CharPtrTy, CharPtrTy, IntTy,
540       VoidPtrTy,    VoidPtrTy, VoidPtrTy, VoidPtrTy, IntTy->getPointerTo()};
541   llvm::FunctionCallee RegisterFunc = CGM.CreateRuntimeFunction(
542       llvm::FunctionType::get(IntTy, RegisterFuncParams, false),
543       addUnderscoredPrefixToName("RegisterFunction"));
544 
545   // Extract GpuBinaryHandle passed as the first argument passed to
546   // __cuda_register_globals() and generate __cudaRegisterFunction() call for
547   // each emitted kernel.
548   llvm::Argument &GpuBinaryHandlePtr = *RegisterKernelsFunc->arg_begin();
549   for (auto &&I : EmittedKernels) {
550     llvm::Constant *KernelName =
551         makeConstantString(getDeviceSideName(cast<NamedDecl>(I.D)));
552     llvm::Constant *NullPtr = llvm::ConstantPointerNull::get(VoidPtrTy);
553     llvm::Value *Args[] = {
554         &GpuBinaryHandlePtr,
555         Builder.CreateBitCast(KernelHandles[I.Kernel->getName()], VoidPtrTy),
556         KernelName,
557         KernelName,
558         llvm::ConstantInt::get(IntTy, -1),
559         NullPtr,
560         NullPtr,
561         NullPtr,
562         NullPtr,
563         llvm::ConstantPointerNull::get(IntTy->getPointerTo())};
564     Builder.CreateCall(RegisterFunc, Args);
565   }
566 
567   llvm::Type *VarSizeTy = IntTy;
568   // For HIP or CUDA 9.0+, device variable size is type of `size_t`.
569   if (CGM.getLangOpts().HIP ||
570       ToCudaVersion(CGM.getTarget().getSDKVersion()) >= CudaVersion::CUDA_90)
571     VarSizeTy = SizeTy;
572 
573   // void __cudaRegisterVar(void **, char *, char *, const char *,
574   //                        int, int, int, int)
575   llvm::Type *RegisterVarParams[] = {VoidPtrPtrTy, CharPtrTy, CharPtrTy,
576                                      CharPtrTy,    IntTy,     VarSizeTy,
577                                      IntTy,        IntTy};
578   llvm::FunctionCallee RegisterVar = CGM.CreateRuntimeFunction(
579       llvm::FunctionType::get(VoidTy, RegisterVarParams, false),
580       addUnderscoredPrefixToName("RegisterVar"));
581   // void __hipRegisterManagedVar(void **, char *, char *, const char *,
582   //                              size_t, unsigned)
583   llvm::Type *RegisterManagedVarParams[] = {VoidPtrPtrTy, CharPtrTy, CharPtrTy,
584                                             CharPtrTy,    VarSizeTy, IntTy};
585   llvm::FunctionCallee RegisterManagedVar = CGM.CreateRuntimeFunction(
586       llvm::FunctionType::get(VoidTy, RegisterManagedVarParams, false),
587       addUnderscoredPrefixToName("RegisterManagedVar"));
588   // void __cudaRegisterSurface(void **, const struct surfaceReference *,
589   //                            const void **, const char *, int, int);
590   llvm::FunctionCallee RegisterSurf = CGM.CreateRuntimeFunction(
591       llvm::FunctionType::get(
592           VoidTy, {VoidPtrPtrTy, VoidPtrTy, CharPtrTy, CharPtrTy, IntTy, IntTy},
593           false),
594       addUnderscoredPrefixToName("RegisterSurface"));
595   // void __cudaRegisterTexture(void **, const struct textureReference *,
596   //                            const void **, const char *, int, int, int)
597   llvm::FunctionCallee RegisterTex = CGM.CreateRuntimeFunction(
598       llvm::FunctionType::get(
599           VoidTy,
600           {VoidPtrPtrTy, VoidPtrTy, CharPtrTy, CharPtrTy, IntTy, IntTy, IntTy},
601           false),
602       addUnderscoredPrefixToName("RegisterTexture"));
603   for (auto &&Info : DeviceVars) {
604     llvm::GlobalVariable *Var = Info.Var;
605     assert((!Var->isDeclaration() || Info.Flags.isManaged()) &&
606            "External variables should not show up here, except HIP managed "
607            "variables");
608     llvm::Constant *VarName = makeConstantString(getDeviceSideName(Info.D));
609     switch (Info.Flags.getKind()) {
610     case DeviceVarFlags::Variable: {
611       uint64_t VarSize =
612           CGM.getDataLayout().getTypeAllocSize(Var->getValueType());
613       if (Info.Flags.isManaged()) {
614         auto *ManagedVar = new llvm::GlobalVariable(
615             CGM.getModule(), Var->getType(),
616             /*isConstant=*/false, Var->getLinkage(),
617             /*Init=*/Var->isDeclaration()
618                 ? nullptr
619                 : llvm::ConstantPointerNull::get(Var->getType()),
620             /*Name=*/"", /*InsertBefore=*/nullptr,
621             llvm::GlobalVariable::NotThreadLocal);
622         ManagedVar->setDSOLocal(Var->isDSOLocal());
623         ManagedVar->setVisibility(Var->getVisibility());
624         ManagedVar->setExternallyInitialized(true);
625         ManagedVar->takeName(Var);
626         Var->setName(Twine(ManagedVar->getName() + ".managed"));
627         replaceManagedVar(Var, ManagedVar);
628         llvm::Value *Args[] = {
629             &GpuBinaryHandlePtr,
630             Builder.CreateBitCast(ManagedVar, VoidPtrTy),
631             Builder.CreateBitCast(Var, VoidPtrTy),
632             VarName,
633             llvm::ConstantInt::get(VarSizeTy, VarSize),
634             llvm::ConstantInt::get(IntTy, Var->getAlignment())};
635         if (!Var->isDeclaration())
636           Builder.CreateCall(RegisterManagedVar, Args);
637       } else {
638         llvm::Value *Args[] = {
639             &GpuBinaryHandlePtr,
640             Builder.CreateBitCast(Var, VoidPtrTy),
641             VarName,
642             VarName,
643             llvm::ConstantInt::get(IntTy, Info.Flags.isExtern()),
644             llvm::ConstantInt::get(VarSizeTy, VarSize),
645             llvm::ConstantInt::get(IntTy, Info.Flags.isConstant()),
646             llvm::ConstantInt::get(IntTy, 0)};
647         Builder.CreateCall(RegisterVar, Args);
648       }
649       break;
650     }
651     case DeviceVarFlags::Surface:
652       Builder.CreateCall(
653           RegisterSurf,
654           {&GpuBinaryHandlePtr, Builder.CreateBitCast(Var, VoidPtrTy), VarName,
655            VarName, llvm::ConstantInt::get(IntTy, Info.Flags.getSurfTexType()),
656            llvm::ConstantInt::get(IntTy, Info.Flags.isExtern())});
657       break;
658     case DeviceVarFlags::Texture:
659       Builder.CreateCall(
660           RegisterTex,
661           {&GpuBinaryHandlePtr, Builder.CreateBitCast(Var, VoidPtrTy), VarName,
662            VarName, llvm::ConstantInt::get(IntTy, Info.Flags.getSurfTexType()),
663            llvm::ConstantInt::get(IntTy, Info.Flags.isNormalized()),
664            llvm::ConstantInt::get(IntTy, Info.Flags.isExtern())});
665       break;
666     }
667   }
668 
669   Builder.CreateRetVoid();
670   return RegisterKernelsFunc;
671 }
672 
673 /// Creates a global constructor function for the module:
674 ///
675 /// For CUDA:
676 /// \code
677 /// void __cuda_module_ctor() {
678 ///     Handle = __cudaRegisterFatBinary(GpuBinaryBlob);
679 ///     __cuda_register_globals(Handle);
680 /// }
681 /// \endcode
682 ///
683 /// For HIP:
684 /// \code
685 /// void __hip_module_ctor() {
686 ///     if (__hip_gpubin_handle == 0) {
687 ///         __hip_gpubin_handle  = __hipRegisterFatBinary(GpuBinaryBlob);
688 ///         __hip_register_globals(__hip_gpubin_handle);
689 ///     }
690 /// }
691 /// \endcode
692 llvm::Function *CGNVCUDARuntime::makeModuleCtorFunction() {
693   bool IsHIP = CGM.getLangOpts().HIP;
694   bool IsCUDA = CGM.getLangOpts().CUDA;
695   // No need to generate ctors/dtors if there is no GPU binary.
696   StringRef CudaGpuBinaryFileName = CGM.getCodeGenOpts().CudaGpuBinaryFileName;
697   if (CudaGpuBinaryFileName.empty() && !IsHIP)
698     return nullptr;
699   if ((IsHIP || (IsCUDA && !RelocatableDeviceCode)) && EmittedKernels.empty() &&
700       DeviceVars.empty())
701     return nullptr;
702 
703   // void __{cuda|hip}_register_globals(void* handle);
704   llvm::Function *RegisterGlobalsFunc = makeRegisterGlobalsFn();
705   // We always need a function to pass in as callback. Create a dummy
706   // implementation if we don't need to register anything.
707   if (RelocatableDeviceCode && !RegisterGlobalsFunc)
708     RegisterGlobalsFunc = makeDummyFunction(getRegisterGlobalsFnTy());
709 
710   // void ** __{cuda|hip}RegisterFatBinary(void *);
711   llvm::FunctionCallee RegisterFatbinFunc = CGM.CreateRuntimeFunction(
712       llvm::FunctionType::get(VoidPtrPtrTy, VoidPtrTy, false),
713       addUnderscoredPrefixToName("RegisterFatBinary"));
714   // struct { int magic, int version, void * gpu_binary, void * dont_care };
715   llvm::StructType *FatbinWrapperTy =
716       llvm::StructType::get(IntTy, IntTy, VoidPtrTy, VoidPtrTy);
717 
718   // Register GPU binary with the CUDA runtime, store returned handle in a
719   // global variable and save a reference in GpuBinaryHandle to be cleaned up
720   // in destructor on exit. Then associate all known kernels with the GPU binary
721   // handle so CUDA runtime can figure out what to call on the GPU side.
722   std::unique_ptr<llvm::MemoryBuffer> CudaGpuBinary = nullptr;
723   if (!CudaGpuBinaryFileName.empty()) {
724     llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> CudaGpuBinaryOrErr =
725         llvm::MemoryBuffer::getFileOrSTDIN(CudaGpuBinaryFileName);
726     if (std::error_code EC = CudaGpuBinaryOrErr.getError()) {
727       CGM.getDiags().Report(diag::err_cannot_open_file)
728           << CudaGpuBinaryFileName << EC.message();
729       return nullptr;
730     }
731     CudaGpuBinary = std::move(CudaGpuBinaryOrErr.get());
732   }
733 
734   llvm::Function *ModuleCtorFunc = llvm::Function::Create(
735       llvm::FunctionType::get(VoidTy, false),
736       llvm::GlobalValue::InternalLinkage,
737       addUnderscoredPrefixToName("_module_ctor"), &TheModule);
738   llvm::BasicBlock *CtorEntryBB =
739       llvm::BasicBlock::Create(Context, "entry", ModuleCtorFunc);
740   CGBuilderTy CtorBuilder(CGM, Context);
741 
742   CtorBuilder.SetInsertPoint(CtorEntryBB);
743 
744   const char *FatbinConstantName;
745   const char *FatbinSectionName;
746   const char *ModuleIDSectionName;
747   StringRef ModuleIDPrefix;
748   llvm::Constant *FatBinStr;
749   unsigned FatMagic;
750   if (IsHIP) {
751     FatbinConstantName = ".hip_fatbin";
752     FatbinSectionName = ".hipFatBinSegment";
753 
754     ModuleIDSectionName = "__hip_module_id";
755     ModuleIDPrefix = "__hip_";
756 
757     if (CudaGpuBinary) {
758       // If fatbin is available from early finalization, create a string
759       // literal containing the fat binary loaded from the given file.
760       const unsigned HIPCodeObjectAlign = 4096;
761       FatBinStr = makeConstantArray(std::string(CudaGpuBinary->getBuffer()), "",
762                                     FatbinConstantName, HIPCodeObjectAlign);
763     } else {
764       // If fatbin is not available, create an external symbol
765       // __hip_fatbin in section .hip_fatbin. The external symbol is supposed
766       // to contain the fat binary but will be populated somewhere else,
767       // e.g. by lld through link script.
768       FatBinStr = new llvm::GlobalVariable(
769         CGM.getModule(), CGM.Int8Ty,
770         /*isConstant=*/true, llvm::GlobalValue::ExternalLinkage, nullptr,
771         "__hip_fatbin", nullptr,
772         llvm::GlobalVariable::NotThreadLocal);
773       cast<llvm::GlobalVariable>(FatBinStr)->setSection(FatbinConstantName);
774     }
775 
776     FatMagic = HIPFatMagic;
777   } else {
778     if (RelocatableDeviceCode)
779       FatbinConstantName = CGM.getTriple().isMacOSX()
780                                ? "__NV_CUDA,__nv_relfatbin"
781                                : "__nv_relfatbin";
782     else
783       FatbinConstantName =
784           CGM.getTriple().isMacOSX() ? "__NV_CUDA,__nv_fatbin" : ".nv_fatbin";
785     // NVIDIA's cuobjdump looks for fatbins in this section.
786     FatbinSectionName =
787         CGM.getTriple().isMacOSX() ? "__NV_CUDA,__fatbin" : ".nvFatBinSegment";
788 
789     ModuleIDSectionName = CGM.getTriple().isMacOSX()
790                               ? "__NV_CUDA,__nv_module_id"
791                               : "__nv_module_id";
792     ModuleIDPrefix = "__nv_";
793 
794     // For CUDA, create a string literal containing the fat binary loaded from
795     // the given file.
796     FatBinStr = makeConstantArray(std::string(CudaGpuBinary->getBuffer()), "",
797                                   FatbinConstantName, 8);
798     FatMagic = CudaFatMagic;
799   }
800 
801   // Create initialized wrapper structure that points to the loaded GPU binary
802   ConstantInitBuilder Builder(CGM);
803   auto Values = Builder.beginStruct(FatbinWrapperTy);
804   // Fatbin wrapper magic.
805   Values.addInt(IntTy, FatMagic);
806   // Fatbin version.
807   Values.addInt(IntTy, 1);
808   // Data.
809   Values.add(FatBinStr);
810   // Unused in fatbin v1.
811   Values.add(llvm::ConstantPointerNull::get(VoidPtrTy));
812   llvm::GlobalVariable *FatbinWrapper = Values.finishAndCreateGlobal(
813       addUnderscoredPrefixToName("_fatbin_wrapper"), CGM.getPointerAlign(),
814       /*constant*/ true);
815   FatbinWrapper->setSection(FatbinSectionName);
816 
817   // There is only one HIP fat binary per linked module, however there are
818   // multiple constructor functions. Make sure the fat binary is registered
819   // only once. The constructor functions are executed by the dynamic loader
820   // before the program gains control. The dynamic loader cannot execute the
821   // constructor functions concurrently since doing that would not guarantee
822   // thread safety of the loaded program. Therefore we can assume sequential
823   // execution of constructor functions here.
824   if (IsHIP) {
825     auto Linkage = CudaGpuBinary ? llvm::GlobalValue::InternalLinkage :
826         llvm::GlobalValue::LinkOnceAnyLinkage;
827     llvm::BasicBlock *IfBlock =
828         llvm::BasicBlock::Create(Context, "if", ModuleCtorFunc);
829     llvm::BasicBlock *ExitBlock =
830         llvm::BasicBlock::Create(Context, "exit", ModuleCtorFunc);
831     // The name, size, and initialization pattern of this variable is part
832     // of HIP ABI.
833     GpuBinaryHandle = new llvm::GlobalVariable(
834         TheModule, VoidPtrPtrTy, /*isConstant=*/false,
835         Linkage,
836         /*Initializer=*/llvm::ConstantPointerNull::get(VoidPtrPtrTy),
837         "__hip_gpubin_handle");
838     if (Linkage == llvm::GlobalValue::LinkOnceAnyLinkage)
839       GpuBinaryHandle->setComdat(
840           CGM.getModule().getOrInsertComdat(GpuBinaryHandle->getName()));
841     GpuBinaryHandle->setAlignment(CGM.getPointerAlign().getAsAlign());
842     // Prevent the weak symbol in different shared libraries being merged.
843     if (Linkage != llvm::GlobalValue::InternalLinkage)
844       GpuBinaryHandle->setVisibility(llvm::GlobalValue::HiddenVisibility);
845     Address GpuBinaryAddr(
846         GpuBinaryHandle, VoidPtrPtrTy,
847         CharUnits::fromQuantity(GpuBinaryHandle->getAlignment()));
848     {
849       auto *HandleValue = CtorBuilder.CreateLoad(GpuBinaryAddr);
850       llvm::Constant *Zero =
851           llvm::Constant::getNullValue(HandleValue->getType());
852       llvm::Value *EQZero = CtorBuilder.CreateICmpEQ(HandleValue, Zero);
853       CtorBuilder.CreateCondBr(EQZero, IfBlock, ExitBlock);
854     }
855     {
856       CtorBuilder.SetInsertPoint(IfBlock);
857       // GpuBinaryHandle = __hipRegisterFatBinary(&FatbinWrapper);
858       llvm::CallInst *RegisterFatbinCall = CtorBuilder.CreateCall(
859           RegisterFatbinFunc,
860           CtorBuilder.CreateBitCast(FatbinWrapper, VoidPtrTy));
861       CtorBuilder.CreateStore(RegisterFatbinCall, GpuBinaryAddr);
862       CtorBuilder.CreateBr(ExitBlock);
863     }
864     {
865       CtorBuilder.SetInsertPoint(ExitBlock);
866       // Call __hip_register_globals(GpuBinaryHandle);
867       if (RegisterGlobalsFunc) {
868         auto *HandleValue = CtorBuilder.CreateLoad(GpuBinaryAddr);
869         CtorBuilder.CreateCall(RegisterGlobalsFunc, HandleValue);
870       }
871     }
872   } else if (!RelocatableDeviceCode) {
873     // Register binary with CUDA runtime. This is substantially different in
874     // default mode vs. separate compilation!
875     // GpuBinaryHandle = __cudaRegisterFatBinary(&FatbinWrapper);
876     llvm::CallInst *RegisterFatbinCall = CtorBuilder.CreateCall(
877         RegisterFatbinFunc,
878         CtorBuilder.CreateBitCast(FatbinWrapper, VoidPtrTy));
879     GpuBinaryHandle = new llvm::GlobalVariable(
880         TheModule, VoidPtrPtrTy, false, llvm::GlobalValue::InternalLinkage,
881         llvm::ConstantPointerNull::get(VoidPtrPtrTy), "__cuda_gpubin_handle");
882     GpuBinaryHandle->setAlignment(CGM.getPointerAlign().getAsAlign());
883     CtorBuilder.CreateAlignedStore(RegisterFatbinCall, GpuBinaryHandle,
884                                    CGM.getPointerAlign());
885 
886     // Call __cuda_register_globals(GpuBinaryHandle);
887     if (RegisterGlobalsFunc)
888       CtorBuilder.CreateCall(RegisterGlobalsFunc, RegisterFatbinCall);
889 
890     // Call __cudaRegisterFatBinaryEnd(Handle) if this CUDA version needs it.
891     if (CudaFeatureEnabled(CGM.getTarget().getSDKVersion(),
892                            CudaFeature::CUDA_USES_FATBIN_REGISTER_END)) {
893       // void __cudaRegisterFatBinaryEnd(void **);
894       llvm::FunctionCallee RegisterFatbinEndFunc = CGM.CreateRuntimeFunction(
895           llvm::FunctionType::get(VoidTy, VoidPtrPtrTy, false),
896           "__cudaRegisterFatBinaryEnd");
897       CtorBuilder.CreateCall(RegisterFatbinEndFunc, RegisterFatbinCall);
898     }
899   } else {
900     // Generate a unique module ID.
901     SmallString<64> ModuleID;
902     llvm::raw_svector_ostream OS(ModuleID);
903     OS << ModuleIDPrefix << llvm::format("%" PRIx64, FatbinWrapper->getGUID());
904     llvm::Constant *ModuleIDConstant = makeConstantArray(
905         std::string(ModuleID.str()), "", ModuleIDSectionName, 32, /*AddNull=*/true);
906 
907     // Create an alias for the FatbinWrapper that nvcc will look for.
908     llvm::GlobalAlias::create(llvm::GlobalValue::ExternalLinkage,
909                               Twine("__fatbinwrap") + ModuleID, FatbinWrapper);
910 
911     // void __cudaRegisterLinkedBinary%ModuleID%(void (*)(void *), void *,
912     // void *, void (*)(void **))
913     SmallString<128> RegisterLinkedBinaryName("__cudaRegisterLinkedBinary");
914     RegisterLinkedBinaryName += ModuleID;
915     llvm::FunctionCallee RegisterLinkedBinaryFunc = CGM.CreateRuntimeFunction(
916         getRegisterLinkedBinaryFnTy(), RegisterLinkedBinaryName);
917 
918     assert(RegisterGlobalsFunc && "Expecting at least dummy function!");
919     llvm::Value *Args[] = {RegisterGlobalsFunc,
920                            CtorBuilder.CreateBitCast(FatbinWrapper, VoidPtrTy),
921                            ModuleIDConstant,
922                            makeDummyFunction(getCallbackFnTy())};
923     CtorBuilder.CreateCall(RegisterLinkedBinaryFunc, Args);
924   }
925 
926   // Create destructor and register it with atexit() the way NVCC does it. Doing
927   // it during regular destructor phase worked in CUDA before 9.2 but results in
928   // double-free in 9.2.
929   if (llvm::Function *CleanupFn = makeModuleDtorFunction()) {
930     // extern "C" int atexit(void (*f)(void));
931     llvm::FunctionType *AtExitTy =
932         llvm::FunctionType::get(IntTy, CleanupFn->getType(), false);
933     llvm::FunctionCallee AtExitFunc =
934         CGM.CreateRuntimeFunction(AtExitTy, "atexit", llvm::AttributeList(),
935                                   /*Local=*/true);
936     CtorBuilder.CreateCall(AtExitFunc, CleanupFn);
937   }
938 
939   CtorBuilder.CreateRetVoid();
940   return ModuleCtorFunc;
941 }
942 
943 /// Creates a global destructor function that unregisters the GPU code blob
944 /// registered by constructor.
945 ///
946 /// For CUDA:
947 /// \code
948 /// void __cuda_module_dtor() {
949 ///     __cudaUnregisterFatBinary(Handle);
950 /// }
951 /// \endcode
952 ///
953 /// For HIP:
954 /// \code
955 /// void __hip_module_dtor() {
956 ///     if (__hip_gpubin_handle) {
957 ///         __hipUnregisterFatBinary(__hip_gpubin_handle);
958 ///         __hip_gpubin_handle = 0;
959 ///     }
960 /// }
961 /// \endcode
962 llvm::Function *CGNVCUDARuntime::makeModuleDtorFunction() {
963   // No need for destructor if we don't have a handle to unregister.
964   if (!GpuBinaryHandle)
965     return nullptr;
966 
967   // void __cudaUnregisterFatBinary(void ** handle);
968   llvm::FunctionCallee UnregisterFatbinFunc = CGM.CreateRuntimeFunction(
969       llvm::FunctionType::get(VoidTy, VoidPtrPtrTy, false),
970       addUnderscoredPrefixToName("UnregisterFatBinary"));
971 
972   llvm::Function *ModuleDtorFunc = llvm::Function::Create(
973       llvm::FunctionType::get(VoidTy, false),
974       llvm::GlobalValue::InternalLinkage,
975       addUnderscoredPrefixToName("_module_dtor"), &TheModule);
976 
977   llvm::BasicBlock *DtorEntryBB =
978       llvm::BasicBlock::Create(Context, "entry", ModuleDtorFunc);
979   CGBuilderTy DtorBuilder(CGM, Context);
980   DtorBuilder.SetInsertPoint(DtorEntryBB);
981 
982   Address GpuBinaryAddr(
983       GpuBinaryHandle, GpuBinaryHandle->getValueType(),
984       CharUnits::fromQuantity(GpuBinaryHandle->getAlignment()));
985   auto *HandleValue = DtorBuilder.CreateLoad(GpuBinaryAddr);
986   // There is only one HIP fat binary per linked module, however there are
987   // multiple destructor functions. Make sure the fat binary is unregistered
988   // only once.
989   if (CGM.getLangOpts().HIP) {
990     llvm::BasicBlock *IfBlock =
991         llvm::BasicBlock::Create(Context, "if", ModuleDtorFunc);
992     llvm::BasicBlock *ExitBlock =
993         llvm::BasicBlock::Create(Context, "exit", ModuleDtorFunc);
994     llvm::Constant *Zero = llvm::Constant::getNullValue(HandleValue->getType());
995     llvm::Value *NEZero = DtorBuilder.CreateICmpNE(HandleValue, Zero);
996     DtorBuilder.CreateCondBr(NEZero, IfBlock, ExitBlock);
997 
998     DtorBuilder.SetInsertPoint(IfBlock);
999     DtorBuilder.CreateCall(UnregisterFatbinFunc, HandleValue);
1000     DtorBuilder.CreateStore(Zero, GpuBinaryAddr);
1001     DtorBuilder.CreateBr(ExitBlock);
1002 
1003     DtorBuilder.SetInsertPoint(ExitBlock);
1004   } else {
1005     DtorBuilder.CreateCall(UnregisterFatbinFunc, HandleValue);
1006   }
1007   DtorBuilder.CreateRetVoid();
1008   return ModuleDtorFunc;
1009 }
1010 
1011 CGCUDARuntime *CodeGen::CreateNVCUDARuntime(CodeGenModule &CGM) {
1012   return new CGNVCUDARuntime(CGM);
1013 }
1014 
1015 void CGNVCUDARuntime::internalizeDeviceSideVar(
1016     const VarDecl *D, llvm::GlobalValue::LinkageTypes &Linkage) {
1017   // For -fno-gpu-rdc, host-side shadows of external declarations of device-side
1018   // global variables become internal definitions. These have to be internal in
1019   // order to prevent name conflicts with global host variables with the same
1020   // name in a different TUs.
1021   //
1022   // For -fgpu-rdc, the shadow variables should not be internalized because
1023   // they may be accessed by different TU.
1024   if (CGM.getLangOpts().GPURelocatableDeviceCode)
1025     return;
1026 
1027   // __shared__ variables are odd. Shadows do get created, but
1028   // they are not registered with the CUDA runtime, so they
1029   // can't really be used to access their device-side
1030   // counterparts. It's not clear yet whether it's nvcc's bug or
1031   // a feature, but we've got to do the same for compatibility.
1032   if (D->hasAttr<CUDADeviceAttr>() || D->hasAttr<CUDAConstantAttr>() ||
1033       D->hasAttr<CUDASharedAttr>() ||
1034       D->getType()->isCUDADeviceBuiltinSurfaceType() ||
1035       D->getType()->isCUDADeviceBuiltinTextureType()) {
1036     Linkage = llvm::GlobalValue::InternalLinkage;
1037   }
1038 }
1039 
1040 void CGNVCUDARuntime::handleVarRegistration(const VarDecl *D,
1041                                             llvm::GlobalVariable &GV) {
1042   if (D->hasAttr<CUDADeviceAttr>() || D->hasAttr<CUDAConstantAttr>()) {
1043     // Shadow variables and their properties must be registered with CUDA
1044     // runtime. Skip Extern global variables, which will be registered in
1045     // the TU where they are defined.
1046     //
1047     // Don't register a C++17 inline variable. The local symbol can be
1048     // discarded and referencing a discarded local symbol from outside the
1049     // comdat (__cuda_register_globals) is disallowed by the ELF spec.
1050     //
1051     // HIP managed variables need to be always recorded in device and host
1052     // compilations for transformation.
1053     //
1054     // HIP managed variables and variables in CUDADeviceVarODRUsedByHost are
1055     // added to llvm.compiler-used, therefore they are safe to be registered.
1056     if ((!D->hasExternalStorage() && !D->isInline()) ||
1057         CGM.getContext().CUDADeviceVarODRUsedByHost.contains(D) ||
1058         D->hasAttr<HIPManagedAttr>()) {
1059       registerDeviceVar(D, GV, !D->hasDefinition(),
1060                         D->hasAttr<CUDAConstantAttr>());
1061     }
1062   } else if (D->getType()->isCUDADeviceBuiltinSurfaceType() ||
1063              D->getType()->isCUDADeviceBuiltinTextureType()) {
1064     // Builtin surfaces and textures and their template arguments are
1065     // also registered with CUDA runtime.
1066     const auto *TD = cast<ClassTemplateSpecializationDecl>(
1067         D->getType()->castAs<RecordType>()->getDecl());
1068     const TemplateArgumentList &Args = TD->getTemplateArgs();
1069     if (TD->hasAttr<CUDADeviceBuiltinSurfaceTypeAttr>()) {
1070       assert(Args.size() == 2 &&
1071              "Unexpected number of template arguments of CUDA device "
1072              "builtin surface type.");
1073       auto SurfType = Args[1].getAsIntegral();
1074       if (!D->hasExternalStorage())
1075         registerDeviceSurf(D, GV, !D->hasDefinition(), SurfType.getSExtValue());
1076     } else {
1077       assert(Args.size() == 3 &&
1078              "Unexpected number of template arguments of CUDA device "
1079              "builtin texture type.");
1080       auto TexType = Args[1].getAsIntegral();
1081       auto Normalized = Args[2].getAsIntegral();
1082       if (!D->hasExternalStorage())
1083         registerDeviceTex(D, GV, !D->hasDefinition(), TexType.getSExtValue(),
1084                           Normalized.getZExtValue());
1085     }
1086   }
1087 }
1088 
1089 // Transform managed variables to pointers to managed variables in device code.
1090 // Each use of the original managed variable is replaced by a load from the
1091 // transformed managed variable. The transformed managed variable contains
1092 // the address of managed memory which will be allocated by the runtime.
1093 void CGNVCUDARuntime::transformManagedVars() {
1094   for (auto &&Info : DeviceVars) {
1095     llvm::GlobalVariable *Var = Info.Var;
1096     if (Info.Flags.getKind() == DeviceVarFlags::Variable &&
1097         Info.Flags.isManaged()) {
1098       auto *ManagedVar = new llvm::GlobalVariable(
1099           CGM.getModule(), Var->getType(),
1100           /*isConstant=*/false, Var->getLinkage(),
1101           /*Init=*/Var->isDeclaration()
1102               ? nullptr
1103               : llvm::ConstantPointerNull::get(Var->getType()),
1104           /*Name=*/"", /*InsertBefore=*/nullptr,
1105           llvm::GlobalVariable::NotThreadLocal,
1106           CGM.getContext().getTargetAddressSpace(LangAS::cuda_device));
1107       ManagedVar->setDSOLocal(Var->isDSOLocal());
1108       ManagedVar->setVisibility(Var->getVisibility());
1109       ManagedVar->setExternallyInitialized(true);
1110       replaceManagedVar(Var, ManagedVar);
1111       ManagedVar->takeName(Var);
1112       Var->setName(Twine(ManagedVar->getName()) + ".managed");
1113       // Keep managed variables even if they are not used in device code since
1114       // they need to be allocated by the runtime.
1115       if (!Var->isDeclaration()) {
1116         assert(!ManagedVar->isDeclaration());
1117         CGM.addCompilerUsedGlobal(Var);
1118         CGM.addCompilerUsedGlobal(ManagedVar);
1119       }
1120     }
1121   }
1122 }
1123 
1124 // Creates offloading entries for all the kernels and globals that must be
1125 // registered. The linker will provide a pointer to this section so we can
1126 // register the symbols with the linked device image.
1127 void CGNVCUDARuntime::createOffloadingEntries() {
1128   llvm::OpenMPIRBuilder OMPBuilder(CGM.getModule());
1129   OMPBuilder.initialize();
1130 
1131   StringRef Section = CGM.getLangOpts().HIP ? "hip_offloading_entries"
1132                                             : "cuda_offloading_entries";
1133   for (KernelInfo &I : EmittedKernels)
1134     OMPBuilder.emitOffloadingEntry(KernelHandles[I.Kernel->getName()],
1135                                    getDeviceSideName(cast<NamedDecl>(I.D)), 0,
1136                                    DeviceVarFlags::OffloadGlobalEntry, Section);
1137 
1138   for (VarInfo &I : DeviceVars) {
1139     uint64_t VarSize =
1140         CGM.getDataLayout().getTypeAllocSize(I.Var->getValueType());
1141     if (I.Flags.getKind() == DeviceVarFlags::Variable) {
1142       OMPBuilder.emitOffloadingEntry(
1143           I.Var, getDeviceSideName(I.D), VarSize,
1144           I.Flags.isManaged() ? DeviceVarFlags::OffloadGlobalManagedEntry
1145                               : DeviceVarFlags::OffloadGlobalEntry,
1146           Section);
1147     } else if (I.Flags.getKind() == DeviceVarFlags::Surface) {
1148       OMPBuilder.emitOffloadingEntry(I.Var, getDeviceSideName(I.D), VarSize,
1149                                      DeviceVarFlags::OffloadGlobalSurfaceEntry,
1150                                      Section);
1151     } else if (I.Flags.getKind() == DeviceVarFlags::Texture) {
1152       OMPBuilder.emitOffloadingEntry(I.Var, getDeviceSideName(I.D), VarSize,
1153                                      DeviceVarFlags::OffloadGlobalTextureEntry,
1154                                      Section);
1155     }
1156   }
1157 }
1158 
1159 // Returns module constructor to be added.
1160 llvm::Function *CGNVCUDARuntime::finalizeModule() {
1161   if (CGM.getLangOpts().CUDAIsDevice) {
1162     transformManagedVars();
1163 
1164     // Mark ODR-used device variables as compiler used to prevent it from being
1165     // eliminated by optimization. This is necessary for device variables
1166     // ODR-used by host functions. Sema correctly marks them as ODR-used no
1167     // matter whether they are ODR-used by device or host functions.
1168     //
1169     // We do not need to do this if the variable has used attribute since it
1170     // has already been added.
1171     //
1172     // Static device variables have been externalized at this point, therefore
1173     // variables with LLVM private or internal linkage need not be added.
1174     for (auto &&Info : DeviceVars) {
1175       auto Kind = Info.Flags.getKind();
1176       if (!Info.Var->isDeclaration() &&
1177           !llvm::GlobalValue::isLocalLinkage(Info.Var->getLinkage()) &&
1178           (Kind == DeviceVarFlags::Variable ||
1179            Kind == DeviceVarFlags::Surface ||
1180            Kind == DeviceVarFlags::Texture) &&
1181           Info.D->isUsed() && !Info.D->hasAttr<UsedAttr>()) {
1182         CGM.addCompilerUsedGlobal(Info.Var);
1183       }
1184     }
1185     return nullptr;
1186   }
1187   if (CGM.getLangOpts().OffloadingNewDriver && RelocatableDeviceCode)
1188     createOffloadingEntries();
1189   else
1190     return makeModuleCtorFunction();
1191 
1192   return nullptr;
1193 }
1194 
1195 llvm::GlobalValue *CGNVCUDARuntime::getKernelHandle(llvm::Function *F,
1196                                                     GlobalDecl GD) {
1197   auto Loc = KernelHandles.find(F->getName());
1198   if (Loc != KernelHandles.end())
1199     return Loc->second;
1200 
1201   if (!CGM.getLangOpts().HIP) {
1202     KernelHandles[F->getName()] = F;
1203     KernelStubs[F] = F;
1204     return F;
1205   }
1206 
1207   auto *Var = new llvm::GlobalVariable(
1208       TheModule, F->getType(), /*isConstant=*/true, F->getLinkage(),
1209       /*Initializer=*/nullptr,
1210       CGM.getMangledName(
1211           GD.getWithKernelReferenceKind(KernelReferenceKind::Kernel)));
1212   Var->setAlignment(CGM.getPointerAlign().getAsAlign());
1213   Var->setDSOLocal(F->isDSOLocal());
1214   Var->setVisibility(F->getVisibility());
1215   CGM.maybeSetTrivialComdat(*GD.getDecl(), *Var);
1216   KernelHandles[F->getName()] = Var;
1217   KernelStubs[Var] = F;
1218   return Var;
1219 }
1220