1 //===--- CGDeclCXX.cpp - Emit LLVM Code for C++ declarations --------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This contains code dealing with code generation of C++ declarations
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "CGCXXABI.h"
14 #include "CGHLSLRuntime.h"
15 #include "CGObjCRuntime.h"
16 #include "CGOpenMPRuntime.h"
17 #include "CodeGenFunction.h"
18 #include "TargetInfo.h"
19 #include "clang/AST/Attr.h"
20 #include "clang/Basic/LangOptions.h"
21 #include "llvm/ADT/StringExtras.h"
22 #include "llvm/IR/Intrinsics.h"
23 #include "llvm/IR/MDBuilder.h"
24 #include "llvm/Support/Path.h"
25 
26 using namespace clang;
27 using namespace CodeGen;
28 
29 static void EmitDeclInit(CodeGenFunction &CGF, const VarDecl &D,
30                          ConstantAddress DeclPtr) {
31   assert(
32       (D.hasGlobalStorage() ||
33        (D.hasLocalStorage() && CGF.getContext().getLangOpts().OpenCLCPlusPlus)) &&
34       "VarDecl must have global or local (in the case of OpenCL) storage!");
35   assert(!D.getType()->isReferenceType() &&
36          "Should not call EmitDeclInit on a reference!");
37 
38   QualType type = D.getType();
39   LValue lv = CGF.MakeAddrLValue(DeclPtr, type);
40 
41   const Expr *Init = D.getInit();
42   switch (CGF.getEvaluationKind(type)) {
43   case TEK_Scalar: {
44     CodeGenModule &CGM = CGF.CGM;
45     if (lv.isObjCStrong())
46       CGM.getObjCRuntime().EmitObjCGlobalAssign(CGF, CGF.EmitScalarExpr(Init),
47                                                 DeclPtr, D.getTLSKind());
48     else if (lv.isObjCWeak())
49       CGM.getObjCRuntime().EmitObjCWeakAssign(CGF, CGF.EmitScalarExpr(Init),
50                                               DeclPtr);
51     else
52       CGF.EmitScalarInit(Init, &D, lv, false);
53     return;
54   }
55   case TEK_Complex:
56     CGF.EmitComplexExprIntoLValue(Init, lv, /*isInit*/ true);
57     return;
58   case TEK_Aggregate:
59     CGF.EmitAggExpr(Init,
60                     AggValueSlot::forLValue(lv, CGF, AggValueSlot::IsDestructed,
61                                             AggValueSlot::DoesNotNeedGCBarriers,
62                                             AggValueSlot::IsNotAliased,
63                                             AggValueSlot::DoesNotOverlap));
64     return;
65   }
66   llvm_unreachable("bad evaluation kind");
67 }
68 
69 /// Emit code to cause the destruction of the given variable with
70 /// static storage duration.
71 static void EmitDeclDestroy(CodeGenFunction &CGF, const VarDecl &D,
72                             ConstantAddress Addr) {
73   // Honor __attribute__((no_destroy)) and bail instead of attempting
74   // to emit a reference to a possibly nonexistent destructor, which
75   // in turn can cause a crash. This will result in a global constructor
76   // that isn't balanced out by a destructor call as intended by the
77   // attribute. This also checks for -fno-c++-static-destructors and
78   // bails even if the attribute is not present.
79   QualType::DestructionKind DtorKind = D.needsDestruction(CGF.getContext());
80 
81   // FIXME:  __attribute__((cleanup)) ?
82 
83   switch (DtorKind) {
84   case QualType::DK_none:
85     return;
86 
87   case QualType::DK_cxx_destructor:
88     break;
89 
90   case QualType::DK_objc_strong_lifetime:
91   case QualType::DK_objc_weak_lifetime:
92   case QualType::DK_nontrivial_c_struct:
93     // We don't care about releasing objects during process teardown.
94     assert(!D.getTLSKind() && "should have rejected this");
95     return;
96   }
97 
98   llvm::FunctionCallee Func;
99   llvm::Constant *Argument;
100 
101   CodeGenModule &CGM = CGF.CGM;
102   QualType Type = D.getType();
103 
104   // Special-case non-array C++ destructors, if they have the right signature.
105   // Under some ABIs, destructors return this instead of void, and cannot be
106   // passed directly to __cxa_atexit if the target does not allow this
107   // mismatch.
108   const CXXRecordDecl *Record = Type->getAsCXXRecordDecl();
109   bool CanRegisterDestructor =
110       Record && (!CGM.getCXXABI().HasThisReturn(
111                      GlobalDecl(Record->getDestructor(), Dtor_Complete)) ||
112                  CGM.getCXXABI().canCallMismatchedFunctionType());
113   // If __cxa_atexit is disabled via a flag, a different helper function is
114   // generated elsewhere which uses atexit instead, and it takes the destructor
115   // directly.
116   bool UsingExternalHelper = !CGM.getCodeGenOpts().CXAAtExit;
117   if (Record && (CanRegisterDestructor || UsingExternalHelper)) {
118     assert(!Record->hasTrivialDestructor());
119     CXXDestructorDecl *Dtor = Record->getDestructor();
120 
121     Func = CGM.getAddrAndTypeOfCXXStructor(GlobalDecl(Dtor, Dtor_Complete));
122     if (CGF.getContext().getLangOpts().OpenCL) {
123       auto DestAS =
124           CGM.getTargetCodeGenInfo().getAddrSpaceOfCxaAtexitPtrParam();
125       auto DestTy = llvm::PointerType::get(
126           CGM.getLLVMContext(), CGM.getContext().getTargetAddressSpace(DestAS));
127       auto SrcAS = D.getType().getQualifiers().getAddressSpace();
128       if (DestAS == SrcAS)
129         Argument = llvm::ConstantExpr::getBitCast(Addr.getPointer(), DestTy);
130       else
131         // FIXME: On addr space mismatch we are passing NULL. The generation
132         // of the global destructor function should be adjusted accordingly.
133         Argument = llvm::ConstantPointerNull::get(DestTy);
134     } else {
135       Argument = Addr.getPointer();
136     }
137   // Otherwise, the standard logic requires a helper function.
138   } else {
139     Addr = Addr.withElementType(CGF.ConvertTypeForMem(Type));
140     Func = CodeGenFunction(CGM)
141            .generateDestroyHelper(Addr, Type, CGF.getDestroyer(DtorKind),
142                                   CGF.needsEHCleanup(DtorKind), &D);
143     Argument = llvm::Constant::getNullValue(CGF.Int8PtrTy);
144   }
145 
146   CGM.getCXXABI().registerGlobalDtor(CGF, D, Func, Argument);
147 }
148 
149 /// Emit code to cause the variable at the given address to be considered as
150 /// constant from this point onwards.
151 static void EmitDeclInvariant(CodeGenFunction &CGF, const VarDecl &D,
152                               llvm::Constant *Addr) {
153   return CGF.EmitInvariantStart(
154       Addr, CGF.getContext().getTypeSizeInChars(D.getType()));
155 }
156 
157 void CodeGenFunction::EmitInvariantStart(llvm::Constant *Addr, CharUnits Size) {
158   // Do not emit the intrinsic if we're not optimizing.
159   if (!CGM.getCodeGenOpts().OptimizationLevel)
160     return;
161 
162   // Grab the llvm.invariant.start intrinsic.
163   llvm::Intrinsic::ID InvStartID = llvm::Intrinsic::invariant_start;
164   // Overloaded address space type.
165   llvm::Type *ObjectPtr[1] = {Int8PtrTy};
166   llvm::Function *InvariantStart = CGM.getIntrinsic(InvStartID, ObjectPtr);
167 
168   // Emit a call with the size in bytes of the object.
169   uint64_t Width = Size.getQuantity();
170   llvm::Value *Args[2] = { llvm::ConstantInt::getSigned(Int64Ty, Width),
171                            llvm::ConstantExpr::getBitCast(Addr, Int8PtrTy)};
172   Builder.CreateCall(InvariantStart, Args);
173 }
174 
175 void CodeGenFunction::EmitCXXGlobalVarDeclInit(const VarDecl &D,
176                                                llvm::GlobalVariable *GV,
177                                                bool PerformInit) {
178 
179   const Expr *Init = D.getInit();
180   QualType T = D.getType();
181 
182   // The address space of a static local variable (DeclPtr) may be different
183   // from the address space of the "this" argument of the constructor. In that
184   // case, we need an addrspacecast before calling the constructor.
185   //
186   // struct StructWithCtor {
187   //   __device__ StructWithCtor() {...}
188   // };
189   // __device__ void foo() {
190   //   __shared__ StructWithCtor s;
191   //   ...
192   // }
193   //
194   // For example, in the above CUDA code, the static local variable s has a
195   // "shared" address space qualifier, but the constructor of StructWithCtor
196   // expects "this" in the "generic" address space.
197   unsigned ExpectedAddrSpace = getTypes().getTargetAddressSpace(T);
198   unsigned ActualAddrSpace = GV->getAddressSpace();
199   llvm::Constant *DeclPtr = GV;
200   if (ActualAddrSpace != ExpectedAddrSpace) {
201     llvm::PointerType *PTy =
202         llvm::PointerType::get(getLLVMContext(), ExpectedAddrSpace);
203     DeclPtr = llvm::ConstantExpr::getAddrSpaceCast(DeclPtr, PTy);
204   }
205 
206   ConstantAddress DeclAddr(
207       DeclPtr, GV->getValueType(), getContext().getDeclAlign(&D));
208 
209   if (!T->isReferenceType()) {
210     if (getLangOpts().OpenMP && !getLangOpts().OpenMPSimd &&
211         D.hasAttr<OMPThreadPrivateDeclAttr>()) {
212       (void)CGM.getOpenMPRuntime().emitThreadPrivateVarDefinition(
213           &D, DeclAddr, D.getAttr<OMPThreadPrivateDeclAttr>()->getLocation(),
214           PerformInit, this);
215     }
216     bool NeedsDtor =
217         D.needsDestruction(getContext()) == QualType::DK_cxx_destructor;
218     if (PerformInit)
219       EmitDeclInit(*this, D, DeclAddr);
220     if (CGM.isTypeConstant(D.getType(), true, !NeedsDtor))
221       EmitDeclInvariant(*this, D, DeclPtr);
222     else
223       EmitDeclDestroy(*this, D, DeclAddr);
224     return;
225   }
226 
227   assert(PerformInit && "cannot have constant initializer which needs "
228          "destruction for reference");
229   RValue RV = EmitReferenceBindingToExpr(Init);
230   EmitStoreOfScalar(RV.getScalarVal(), DeclAddr, false, T);
231 }
232 
233 /// Create a stub function, suitable for being passed to atexit,
234 /// which passes the given address to the given destructor function.
235 llvm::Function *CodeGenFunction::createAtExitStub(const VarDecl &VD,
236                                                   llvm::FunctionCallee dtor,
237                                                   llvm::Constant *addr) {
238   // Get the destructor function type, void(*)(void).
239   llvm::FunctionType *ty = llvm::FunctionType::get(CGM.VoidTy, false);
240   SmallString<256> FnName;
241   {
242     llvm::raw_svector_ostream Out(FnName);
243     CGM.getCXXABI().getMangleContext().mangleDynamicAtExitDestructor(&VD, Out);
244   }
245 
246   const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
247   llvm::Function *fn = CGM.CreateGlobalInitOrCleanUpFunction(
248       ty, FnName.str(), FI, VD.getLocation());
249 
250   CodeGenFunction CGF(CGM);
251 
252   CGF.StartFunction(GlobalDecl(&VD, DynamicInitKind::AtExit),
253                     CGM.getContext().VoidTy, fn, FI, FunctionArgList(),
254                     VD.getLocation(), VD.getInit()->getExprLoc());
255   // Emit an artificial location for this function.
256   auto AL = ApplyDebugLocation::CreateArtificial(CGF);
257 
258   llvm::CallInst *call = CGF.Builder.CreateCall(dtor, addr);
259 
260   // Make sure the call and the callee agree on calling convention.
261   if (auto *dtorFn = dyn_cast<llvm::Function>(
262           dtor.getCallee()->stripPointerCastsAndAliases()))
263     call->setCallingConv(dtorFn->getCallingConv());
264 
265   CGF.FinishFunction();
266 
267   return fn;
268 }
269 
270 /// Create a stub function, suitable for being passed to __pt_atexit_np,
271 /// which passes the given address to the given destructor function.
272 llvm::Function *CodeGenFunction::createTLSAtExitStub(
273     const VarDecl &D, llvm::FunctionCallee Dtor, llvm::Constant *Addr,
274     llvm::FunctionCallee &AtExit) {
275   SmallString<256> FnName;
276   {
277     llvm::raw_svector_ostream Out(FnName);
278     CGM.getCXXABI().getMangleContext().mangleDynamicAtExitDestructor(&D, Out);
279   }
280 
281   const CGFunctionInfo &FI = CGM.getTypes().arrangeLLVMFunctionInfo(
282       getContext().IntTy, FnInfoOpts::None, {getContext().IntTy},
283       FunctionType::ExtInfo(), {}, RequiredArgs::All);
284 
285   // Get the stub function type, int(*)(int,...).
286   llvm::FunctionType *StubTy =
287       llvm::FunctionType::get(CGM.IntTy, {CGM.IntTy}, true);
288 
289   llvm::Function *DtorStub = CGM.CreateGlobalInitOrCleanUpFunction(
290       StubTy, FnName.str(), FI, D.getLocation());
291 
292   CodeGenFunction CGF(CGM);
293 
294   FunctionArgList Args;
295   ImplicitParamDecl IPD(CGM.getContext(), CGM.getContext().IntTy,
296                         ImplicitParamDecl::Other);
297   Args.push_back(&IPD);
298   QualType ResTy = CGM.getContext().IntTy;
299 
300   CGF.StartFunction(GlobalDecl(&D, DynamicInitKind::AtExit), ResTy, DtorStub,
301                     FI, Args, D.getLocation(), D.getInit()->getExprLoc());
302 
303   // Emit an artificial location for this function.
304   auto AL = ApplyDebugLocation::CreateArtificial(CGF);
305 
306   llvm::CallInst *call = CGF.Builder.CreateCall(Dtor, Addr);
307 
308   // Make sure the call and the callee agree on calling convention.
309   if (auto *DtorFn = dyn_cast<llvm::Function>(
310           Dtor.getCallee()->stripPointerCastsAndAliases()))
311     call->setCallingConv(DtorFn->getCallingConv());
312 
313   // Return 0 from function
314   CGF.Builder.CreateStore(llvm::Constant::getNullValue(CGM.IntTy),
315                           CGF.ReturnValue);
316 
317   CGF.FinishFunction();
318 
319   return DtorStub;
320 }
321 
322 /// Register a global destructor using the C atexit runtime function.
323 void CodeGenFunction::registerGlobalDtorWithAtExit(const VarDecl &VD,
324                                                    llvm::FunctionCallee dtor,
325                                                    llvm::Constant *addr) {
326   // Create a function which calls the destructor.
327   llvm::Constant *dtorStub = createAtExitStub(VD, dtor, addr);
328   registerGlobalDtorWithAtExit(dtorStub);
329 }
330 
331 void CodeGenFunction::registerGlobalDtorWithAtExit(llvm::Constant *dtorStub) {
332   // extern "C" int atexit(void (*f)(void));
333   assert(dtorStub->getType() ==
334              llvm::PointerType::get(
335                  llvm::FunctionType::get(CGM.VoidTy, false),
336                  dtorStub->getType()->getPointerAddressSpace()) &&
337          "Argument to atexit has a wrong type.");
338 
339   llvm::FunctionType *atexitTy =
340       llvm::FunctionType::get(IntTy, dtorStub->getType(), false);
341 
342   llvm::FunctionCallee atexit =
343       CGM.CreateRuntimeFunction(atexitTy, "atexit", llvm::AttributeList(),
344                                 /*Local=*/true);
345   if (llvm::Function *atexitFn = dyn_cast<llvm::Function>(atexit.getCallee()))
346     atexitFn->setDoesNotThrow();
347 
348   EmitNounwindRuntimeCall(atexit, dtorStub);
349 }
350 
351 llvm::Value *
352 CodeGenFunction::unregisterGlobalDtorWithUnAtExit(llvm::Constant *dtorStub) {
353   // The unatexit subroutine unregisters __dtor functions that were previously
354   // registered by the atexit subroutine. If the referenced function is found,
355   // it is removed from the list of functions that are called at normal program
356   // termination and the unatexit returns a value of 0, otherwise a non-zero
357   // value is returned.
358   //
359   // extern "C" int unatexit(void (*f)(void));
360   assert(dtorStub->getType() ==
361              llvm::PointerType::get(
362                  llvm::FunctionType::get(CGM.VoidTy, false),
363                  dtorStub->getType()->getPointerAddressSpace()) &&
364          "Argument to unatexit has a wrong type.");
365 
366   llvm::FunctionType *unatexitTy =
367       llvm::FunctionType::get(IntTy, {dtorStub->getType()}, /*isVarArg=*/false);
368 
369   llvm::FunctionCallee unatexit =
370       CGM.CreateRuntimeFunction(unatexitTy, "unatexit", llvm::AttributeList());
371 
372   cast<llvm::Function>(unatexit.getCallee())->setDoesNotThrow();
373 
374   return EmitNounwindRuntimeCall(unatexit, dtorStub);
375 }
376 
377 void CodeGenFunction::EmitCXXGuardedInit(const VarDecl &D,
378                                          llvm::GlobalVariable *DeclPtr,
379                                          bool PerformInit) {
380   // If we've been asked to forbid guard variables, emit an error now.
381   // This diagnostic is hard-coded for Darwin's use case;  we can find
382   // better phrasing if someone else needs it.
383   if (CGM.getCodeGenOpts().ForbidGuardVariables)
384     CGM.Error(D.getLocation(),
385               "this initialization requires a guard variable, which "
386               "the kernel does not support");
387 
388   CGM.getCXXABI().EmitGuardedInit(*this, D, DeclPtr, PerformInit);
389 }
390 
391 void CodeGenFunction::EmitCXXGuardedInitBranch(llvm::Value *NeedsInit,
392                                                llvm::BasicBlock *InitBlock,
393                                                llvm::BasicBlock *NoInitBlock,
394                                                GuardKind Kind,
395                                                const VarDecl *D) {
396   assert((Kind == GuardKind::TlsGuard || D) && "no guarded variable");
397 
398   // A guess at how many times we will enter the initialization of a
399   // variable, depending on the kind of variable.
400   static const uint64_t InitsPerTLSVar = 1024;
401   static const uint64_t InitsPerLocalVar = 1024 * 1024;
402 
403   llvm::MDNode *Weights;
404   if (Kind == GuardKind::VariableGuard && !D->isLocalVarDecl()) {
405     // For non-local variables, don't apply any weighting for now. Due to our
406     // use of COMDATs, we expect there to be at most one initialization of the
407     // variable per DSO, but we have no way to know how many DSOs will try to
408     // initialize the variable.
409     Weights = nullptr;
410   } else {
411     uint64_t NumInits;
412     // FIXME: For the TLS case, collect and use profiling information to
413     // determine a more accurate brach weight.
414     if (Kind == GuardKind::TlsGuard || D->getTLSKind())
415       NumInits = InitsPerTLSVar;
416     else
417       NumInits = InitsPerLocalVar;
418 
419     // The probability of us entering the initializer is
420     //   1 / (total number of times we attempt to initialize the variable).
421     llvm::MDBuilder MDHelper(CGM.getLLVMContext());
422     Weights = MDHelper.createBranchWeights(1, NumInits - 1);
423   }
424 
425   Builder.CreateCondBr(NeedsInit, InitBlock, NoInitBlock, Weights);
426 }
427 
428 llvm::Function *CodeGenModule::CreateGlobalInitOrCleanUpFunction(
429     llvm::FunctionType *FTy, const Twine &Name, const CGFunctionInfo &FI,
430     SourceLocation Loc, bool TLS, llvm::GlobalVariable::LinkageTypes Linkage) {
431   llvm::Function *Fn = llvm::Function::Create(FTy, Linkage, Name, &getModule());
432 
433   if (!getLangOpts().AppleKext && !TLS) {
434     // Set the section if needed.
435     if (const char *Section = getTarget().getStaticInitSectionSpecifier())
436       Fn->setSection(Section);
437   }
438 
439   if (Linkage == llvm::GlobalVariable::InternalLinkage)
440     SetInternalFunctionAttributes(GlobalDecl(), Fn, FI);
441 
442   Fn->setCallingConv(getRuntimeCC());
443 
444   if (!getLangOpts().Exceptions)
445     Fn->setDoesNotThrow();
446 
447   if (getLangOpts().Sanitize.has(SanitizerKind::Address) &&
448       !isInNoSanitizeList(SanitizerKind::Address, Fn, Loc))
449     Fn->addFnAttr(llvm::Attribute::SanitizeAddress);
450 
451   if (getLangOpts().Sanitize.has(SanitizerKind::KernelAddress) &&
452       !isInNoSanitizeList(SanitizerKind::KernelAddress, Fn, Loc))
453     Fn->addFnAttr(llvm::Attribute::SanitizeAddress);
454 
455   if (getLangOpts().Sanitize.has(SanitizerKind::HWAddress) &&
456       !isInNoSanitizeList(SanitizerKind::HWAddress, Fn, Loc))
457     Fn->addFnAttr(llvm::Attribute::SanitizeHWAddress);
458 
459   if (getLangOpts().Sanitize.has(SanitizerKind::KernelHWAddress) &&
460       !isInNoSanitizeList(SanitizerKind::KernelHWAddress, Fn, Loc))
461     Fn->addFnAttr(llvm::Attribute::SanitizeHWAddress);
462 
463   if (getLangOpts().Sanitize.has(SanitizerKind::MemtagStack) &&
464       !isInNoSanitizeList(SanitizerKind::MemtagStack, Fn, Loc))
465     Fn->addFnAttr(llvm::Attribute::SanitizeMemTag);
466 
467   if (getLangOpts().Sanitize.has(SanitizerKind::Thread) &&
468       !isInNoSanitizeList(SanitizerKind::Thread, Fn, Loc))
469     Fn->addFnAttr(llvm::Attribute::SanitizeThread);
470 
471   if (getLangOpts().Sanitize.has(SanitizerKind::Memory) &&
472       !isInNoSanitizeList(SanitizerKind::Memory, Fn, Loc))
473     Fn->addFnAttr(llvm::Attribute::SanitizeMemory);
474 
475   if (getLangOpts().Sanitize.has(SanitizerKind::KernelMemory) &&
476       !isInNoSanitizeList(SanitizerKind::KernelMemory, Fn, Loc))
477     Fn->addFnAttr(llvm::Attribute::SanitizeMemory);
478 
479   if (getLangOpts().Sanitize.has(SanitizerKind::SafeStack) &&
480       !isInNoSanitizeList(SanitizerKind::SafeStack, Fn, Loc))
481     Fn->addFnAttr(llvm::Attribute::SafeStack);
482 
483   if (getLangOpts().Sanitize.has(SanitizerKind::ShadowCallStack) &&
484       !isInNoSanitizeList(SanitizerKind::ShadowCallStack, Fn, Loc))
485     Fn->addFnAttr(llvm::Attribute::ShadowCallStack);
486 
487   return Fn;
488 }
489 
490 /// Create a global pointer to a function that will initialize a global
491 /// variable.  The user has requested that this pointer be emitted in a specific
492 /// section.
493 void CodeGenModule::EmitPointerToInitFunc(const VarDecl *D,
494                                           llvm::GlobalVariable *GV,
495                                           llvm::Function *InitFunc,
496                                           InitSegAttr *ISA) {
497   llvm::GlobalVariable *PtrArray = new llvm::GlobalVariable(
498       TheModule, InitFunc->getType(), /*isConstant=*/true,
499       llvm::GlobalValue::PrivateLinkage, InitFunc, "__cxx_init_fn_ptr");
500   PtrArray->setSection(ISA->getSection());
501   addUsedGlobal(PtrArray);
502 
503   // If the GV is already in a comdat group, then we have to join it.
504   if (llvm::Comdat *C = GV->getComdat())
505     PtrArray->setComdat(C);
506 }
507 
508 void
509 CodeGenModule::EmitCXXGlobalVarDeclInitFunc(const VarDecl *D,
510                                             llvm::GlobalVariable *Addr,
511                                             bool PerformInit) {
512 
513   // According to E.2.3.1 in CUDA-7.5 Programming guide: __device__,
514   // __constant__ and __shared__ variables defined in namespace scope,
515   // that are of class type, cannot have a non-empty constructor. All
516   // the checks have been done in Sema by now. Whatever initializers
517   // are allowed are empty and we just need to ignore them here.
518   if (getLangOpts().CUDAIsDevice && !getLangOpts().GPUAllowDeviceInit &&
519       (D->hasAttr<CUDADeviceAttr>() || D->hasAttr<CUDAConstantAttr>() ||
520        D->hasAttr<CUDASharedAttr>()))
521     return;
522 
523   if (getLangOpts().OpenMP &&
524       getOpenMPRuntime().emitDeclareTargetVarDefinition(D, Addr, PerformInit))
525     return;
526 
527   // Check if we've already initialized this decl.
528   auto I = DelayedCXXInitPosition.find(D);
529   if (I != DelayedCXXInitPosition.end() && I->second == ~0U)
530     return;
531 
532   llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false);
533   SmallString<256> FnName;
534   {
535     llvm::raw_svector_ostream Out(FnName);
536     getCXXABI().getMangleContext().mangleDynamicInitializer(D, Out);
537   }
538 
539   // Create a variable initialization function.
540   llvm::Function *Fn = CreateGlobalInitOrCleanUpFunction(
541       FTy, FnName.str(), getTypes().arrangeNullaryFunction(), D->getLocation());
542 
543   auto *ISA = D->getAttr<InitSegAttr>();
544   CodeGenFunction(*this).GenerateCXXGlobalVarDeclInitFunc(Fn, D, Addr,
545                                                           PerformInit);
546 
547   llvm::GlobalVariable *COMDATKey =
548       supportsCOMDAT() && D->isExternallyVisible() ? Addr : nullptr;
549 
550   if (D->getTLSKind()) {
551     // FIXME: Should we support init_priority for thread_local?
552     // FIXME: We only need to register one __cxa_thread_atexit function for the
553     // entire TU.
554     CXXThreadLocalInits.push_back(Fn);
555     CXXThreadLocalInitVars.push_back(D);
556   } else if (PerformInit && ISA) {
557     // Contract with backend that "init_seg(compiler)" corresponds to priority
558     // 200 and "init_seg(lib)" corresponds to priority 400.
559     int Priority = -1;
560     if (ISA->getSection() == ".CRT$XCC")
561       Priority = 200;
562     else if (ISA->getSection() == ".CRT$XCL")
563       Priority = 400;
564 
565     if (Priority != -1)
566       AddGlobalCtor(Fn, Priority, ~0U, COMDATKey);
567     else
568       EmitPointerToInitFunc(D, Addr, Fn, ISA);
569   } else if (auto *IPA = D->getAttr<InitPriorityAttr>()) {
570     OrderGlobalInitsOrStermFinalizers Key(IPA->getPriority(),
571                                           PrioritizedCXXGlobalInits.size());
572     PrioritizedCXXGlobalInits.push_back(std::make_pair(Key, Fn));
573   } else if (isTemplateInstantiation(D->getTemplateSpecializationKind()) ||
574              getContext().GetGVALinkageForVariable(D) == GVA_DiscardableODR ||
575              D->hasAttr<SelectAnyAttr>()) {
576     // C++ [basic.start.init]p2:
577     //   Definitions of explicitly specialized class template static data
578     //   members have ordered initialization. Other class template static data
579     //   members (i.e., implicitly or explicitly instantiated specializations)
580     //   have unordered initialization.
581     //
582     // As a consequence, we can put them into their own llvm.global_ctors entry.
583     //
584     // If the global is externally visible, put the initializer into a COMDAT
585     // group with the global being initialized.  On most platforms, this is a
586     // minor startup time optimization.  In the MS C++ ABI, there are no guard
587     // variables, so this COMDAT key is required for correctness.
588     //
589     // SelectAny globals will be comdat-folded. Put the initializer into a
590     // COMDAT group associated with the global, so the initializers get folded
591     // too.
592     I = DelayedCXXInitPosition.find(D);
593     // CXXGlobalInits.size() is the lex order number for the next deferred
594     // VarDecl. Use it when the current VarDecl is non-deferred. Although this
595     // lex order number is shared between current VarDecl and some following
596     // VarDecls, their order of insertion into `llvm.global_ctors` is the same
597     // as the lexing order and the following stable sort would preserve such
598     // order.
599     unsigned LexOrder =
600         I == DelayedCXXInitPosition.end() ? CXXGlobalInits.size() : I->second;
601     AddGlobalCtor(Fn, 65535, LexOrder, COMDATKey);
602     if (COMDATKey && (getTriple().isOSBinFormatELF() ||
603                       getTarget().getCXXABI().isMicrosoft())) {
604       // When COMDAT is used on ELF or in the MS C++ ABI, the key must be in
605       // llvm.used to prevent linker GC.
606       addUsedGlobal(COMDATKey);
607     }
608 
609     // If we used a COMDAT key for the global ctor, the init function can be
610     // discarded if the global ctor entry is discarded.
611     // FIXME: Do we need to restrict this to ELF and Wasm?
612     llvm::Comdat *C = Addr->getComdat();
613     if (COMDATKey && C &&
614         (getTarget().getTriple().isOSBinFormatELF() ||
615          getTarget().getTriple().isOSBinFormatWasm())) {
616       Fn->setComdat(C);
617     }
618   } else {
619     I = DelayedCXXInitPosition.find(D); // Re-do lookup in case of re-hash.
620     if (I == DelayedCXXInitPosition.end()) {
621       CXXGlobalInits.push_back(Fn);
622     } else if (I->second != ~0U) {
623       assert(I->second < CXXGlobalInits.size() &&
624              CXXGlobalInits[I->second] == nullptr);
625       CXXGlobalInits[I->second] = Fn;
626     }
627   }
628 
629   // Remember that we already emitted the initializer for this global.
630   DelayedCXXInitPosition[D] = ~0U;
631 }
632 
633 void CodeGenModule::EmitCXXThreadLocalInitFunc() {
634   getCXXABI().EmitThreadLocalInitFuncs(
635       *this, CXXThreadLocals, CXXThreadLocalInits, CXXThreadLocalInitVars);
636 
637   CXXThreadLocalInits.clear();
638   CXXThreadLocalInitVars.clear();
639   CXXThreadLocals.clear();
640 }
641 
642 /* Build the initializer for a C++20 module:
643    This is arranged to be run only once regardless of how many times the module
644    might be included transitively.  This arranged by using a guard variable.
645 
646    If there are no initializers at all (and also no imported modules) we reduce
647    this to an empty function (since the Itanium ABI requires that this function
648    be available to a caller, which might be produced by a different
649    implementation).
650 
651    First we call any initializers for imported modules.
652    We then call initializers for the Global Module Fragment (if present)
653    We then call initializers for the current module.
654    We then call initializers for the Private Module Fragment (if present)
655 */
656 
657 void CodeGenModule::EmitCXXModuleInitFunc(Module *Primary) {
658   while (!CXXGlobalInits.empty() && !CXXGlobalInits.back())
659     CXXGlobalInits.pop_back();
660 
661   // As noted above, we create the function, even if it is empty.
662   // Module initializers for imported modules are emitted first.
663 
664   // Collect all the modules that we import
665   SmallVector<Module *> AllImports;
666   // Ones that we export
667   for (auto I : Primary->Exports)
668     AllImports.push_back(I.getPointer());
669   // Ones that we only import.
670   for (Module *M : Primary->Imports)
671     AllImports.push_back(M);
672 
673   SmallVector<llvm::Function *, 8> ModuleInits;
674   for (Module *M : AllImports) {
675     // No Itanium initializer in header like modules.
676     if (M->isHeaderLikeModule())
677       continue; // TODO: warn of mixed use of module map modules and C++20?
678     llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false);
679     SmallString<256> FnName;
680     {
681       llvm::raw_svector_ostream Out(FnName);
682       cast<ItaniumMangleContext>(getCXXABI().getMangleContext())
683           .mangleModuleInitializer(M, Out);
684     }
685     assert(!GetGlobalValue(FnName.str()) &&
686            "We should only have one use of the initializer call");
687     llvm::Function *Fn = llvm::Function::Create(
688         FTy, llvm::Function::ExternalLinkage, FnName.str(), &getModule());
689     ModuleInits.push_back(Fn);
690   }
691 
692   // Add any initializers with specified priority; this uses the same  approach
693   // as EmitCXXGlobalInitFunc().
694   if (!PrioritizedCXXGlobalInits.empty()) {
695     SmallVector<llvm::Function *, 8> LocalCXXGlobalInits;
696     llvm::array_pod_sort(PrioritizedCXXGlobalInits.begin(),
697                          PrioritizedCXXGlobalInits.end());
698     for (SmallVectorImpl<GlobalInitData>::iterator
699              I = PrioritizedCXXGlobalInits.begin(),
700              E = PrioritizedCXXGlobalInits.end();
701          I != E;) {
702       SmallVectorImpl<GlobalInitData>::iterator PrioE =
703           std::upper_bound(I + 1, E, *I, GlobalInitPriorityCmp());
704 
705       for (; I < PrioE; ++I)
706         ModuleInits.push_back(I->second);
707     }
708   }
709 
710   // Now append the ones without specified priority.
711   for (auto *F : CXXGlobalInits)
712     ModuleInits.push_back(F);
713 
714   llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false);
715   const CGFunctionInfo &FI = getTypes().arrangeNullaryFunction();
716 
717   // We now build the initializer for this module, which has a mangled name
718   // as per the Itanium ABI .  The action of the initializer is guarded so that
719   // each init is run just once (even though a module might be imported
720   // multiple times via nested use).
721   llvm::Function *Fn;
722   {
723     SmallString<256> InitFnName;
724     llvm::raw_svector_ostream Out(InitFnName);
725     cast<ItaniumMangleContext>(getCXXABI().getMangleContext())
726         .mangleModuleInitializer(Primary, Out);
727     Fn = CreateGlobalInitOrCleanUpFunction(
728         FTy, llvm::Twine(InitFnName), FI, SourceLocation(), false,
729         llvm::GlobalVariable::ExternalLinkage);
730 
731     // If we have a completely empty initializer then we do not want to create
732     // the guard variable.
733     ConstantAddress GuardAddr = ConstantAddress::invalid();
734     if (!AllImports.empty() || !PrioritizedCXXGlobalInits.empty() ||
735         !CXXGlobalInits.empty()) {
736       // Create the guard var.
737       llvm::GlobalVariable *Guard = new llvm::GlobalVariable(
738           getModule(), Int8Ty, /*isConstant=*/false,
739           llvm::GlobalVariable::InternalLinkage,
740           llvm::ConstantInt::get(Int8Ty, 0), InitFnName.str() + "__in_chrg");
741       CharUnits GuardAlign = CharUnits::One();
742       Guard->setAlignment(GuardAlign.getAsAlign());
743       GuardAddr = ConstantAddress(Guard, Int8Ty, GuardAlign);
744     }
745     CodeGenFunction(*this).GenerateCXXGlobalInitFunc(Fn, ModuleInits,
746                                                      GuardAddr);
747   }
748 
749   // We allow for the case that a module object is added to a linked binary
750   // without a specific call to the the initializer.  This also ensures that
751   // implementation partition initializers are called when the partition
752   // is not imported as an interface.
753   AddGlobalCtor(Fn);
754 
755   // See the comment in EmitCXXGlobalInitFunc about OpenCL global init
756   // functions.
757   if (getLangOpts().OpenCL) {
758     GenKernelArgMetadata(Fn);
759     Fn->setCallingConv(llvm::CallingConv::SPIR_KERNEL);
760   }
761 
762   assert(!getLangOpts().CUDA || !getLangOpts().CUDAIsDevice ||
763          getLangOpts().GPUAllowDeviceInit);
764   if (getLangOpts().HIP && getLangOpts().CUDAIsDevice) {
765     Fn->setCallingConv(llvm::CallingConv::AMDGPU_KERNEL);
766     Fn->addFnAttr("device-init");
767   }
768 
769   // We are done with the inits.
770   AllImports.clear();
771   PrioritizedCXXGlobalInits.clear();
772   CXXGlobalInits.clear();
773   ModuleInits.clear();
774 }
775 
776 static SmallString<128> getTransformedFileName(llvm::Module &M) {
777   SmallString<128> FileName = llvm::sys::path::filename(M.getName());
778 
779   if (FileName.empty())
780     FileName = "<null>";
781 
782   for (size_t i = 0; i < FileName.size(); ++i) {
783     // Replace everything that's not [a-zA-Z0-9._] with a _. This set happens
784     // to be the set of C preprocessing numbers.
785     if (!isPreprocessingNumberBody(FileName[i]))
786       FileName[i] = '_';
787   }
788 
789   return FileName;
790 }
791 
792 static std::string getPrioritySuffix(unsigned int Priority) {
793   assert(Priority <= 65535 && "Priority should always be <= 65535.");
794 
795   // Compute the function suffix from priority. Prepend with zeroes to make
796   // sure the function names are also ordered as priorities.
797   std::string PrioritySuffix = llvm::utostr(Priority);
798   PrioritySuffix = std::string(6 - PrioritySuffix.size(), '0') + PrioritySuffix;
799 
800   return PrioritySuffix;
801 }
802 
803 void
804 CodeGenModule::EmitCXXGlobalInitFunc() {
805   while (!CXXGlobalInits.empty() && !CXXGlobalInits.back())
806     CXXGlobalInits.pop_back();
807 
808   // When we import C++20 modules, we must run their initializers first.
809   SmallVector<llvm::Function *, 8> ModuleInits;
810   if (CXX20ModuleInits)
811     for (Module *M : ImportedModules) {
812       // No Itanium initializer in header like modules.
813       if (M->isHeaderLikeModule())
814         continue;
815       llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false);
816       SmallString<256> FnName;
817       {
818         llvm::raw_svector_ostream Out(FnName);
819         cast<ItaniumMangleContext>(getCXXABI().getMangleContext())
820             .mangleModuleInitializer(M, Out);
821       }
822       assert(!GetGlobalValue(FnName.str()) &&
823              "We should only have one use of the initializer call");
824       llvm::Function *Fn = llvm::Function::Create(
825           FTy, llvm::Function::ExternalLinkage, FnName.str(), &getModule());
826       ModuleInits.push_back(Fn);
827     }
828 
829   if (ModuleInits.empty() && CXXGlobalInits.empty() &&
830       PrioritizedCXXGlobalInits.empty())
831     return;
832 
833   llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false);
834   const CGFunctionInfo &FI = getTypes().arrangeNullaryFunction();
835 
836   // Create our global prioritized initialization function.
837   if (!PrioritizedCXXGlobalInits.empty()) {
838     SmallVector<llvm::Function *, 8> LocalCXXGlobalInits;
839     llvm::array_pod_sort(PrioritizedCXXGlobalInits.begin(),
840                          PrioritizedCXXGlobalInits.end());
841     // Iterate over "chunks" of ctors with same priority and emit each chunk
842     // into separate function. Note - everything is sorted first by priority,
843     // second - by lex order, so we emit ctor functions in proper order.
844     for (SmallVectorImpl<GlobalInitData >::iterator
845            I = PrioritizedCXXGlobalInits.begin(),
846            E = PrioritizedCXXGlobalInits.end(); I != E; ) {
847       SmallVectorImpl<GlobalInitData >::iterator
848         PrioE = std::upper_bound(I + 1, E, *I, GlobalInitPriorityCmp());
849 
850       LocalCXXGlobalInits.clear();
851 
852       unsigned int Priority = I->first.priority;
853       llvm::Function *Fn = CreateGlobalInitOrCleanUpFunction(
854           FTy, "_GLOBAL__I_" + getPrioritySuffix(Priority), FI);
855 
856       // Prepend the module inits to the highest priority set.
857       if (!ModuleInits.empty()) {
858         for (auto *F : ModuleInits)
859           LocalCXXGlobalInits.push_back(F);
860         ModuleInits.clear();
861       }
862 
863       for (; I < PrioE; ++I)
864         LocalCXXGlobalInits.push_back(I->second);
865 
866       CodeGenFunction(*this).GenerateCXXGlobalInitFunc(Fn, LocalCXXGlobalInits);
867       AddGlobalCtor(Fn, Priority);
868     }
869     PrioritizedCXXGlobalInits.clear();
870   }
871 
872   if (getCXXABI().useSinitAndSterm() && ModuleInits.empty() &&
873       CXXGlobalInits.empty())
874     return;
875 
876   for (auto *F : CXXGlobalInits)
877     ModuleInits.push_back(F);
878   CXXGlobalInits.clear();
879 
880   // Include the filename in the symbol name. Including "sub_" matches gcc
881   // and makes sure these symbols appear lexicographically behind the symbols
882   // with priority emitted above.  Module implementation units behave the same
883   // way as a non-modular TU with imports.
884   llvm::Function *Fn;
885   if (CXX20ModuleInits && getContext().getCurrentNamedModule() &&
886       !getContext().getCurrentNamedModule()->isModuleImplementation()) {
887     SmallString<256> InitFnName;
888     llvm::raw_svector_ostream Out(InitFnName);
889     cast<ItaniumMangleContext>(getCXXABI().getMangleContext())
890         .mangleModuleInitializer(getContext().getCurrentNamedModule(), Out);
891     Fn = CreateGlobalInitOrCleanUpFunction(
892         FTy, llvm::Twine(InitFnName), FI, SourceLocation(), false,
893         llvm::GlobalVariable::ExternalLinkage);
894   } else
895     Fn = CreateGlobalInitOrCleanUpFunction(
896         FTy,
897         llvm::Twine("_GLOBAL__sub_I_", getTransformedFileName(getModule())),
898         FI);
899 
900   CodeGenFunction(*this).GenerateCXXGlobalInitFunc(Fn, ModuleInits);
901   AddGlobalCtor(Fn);
902 
903   // In OpenCL global init functions must be converted to kernels in order to
904   // be able to launch them from the host.
905   // FIXME: Some more work might be needed to handle destructors correctly.
906   // Current initialization function makes use of function pointers callbacks.
907   // We can't support function pointers especially between host and device.
908   // However it seems global destruction has little meaning without any
909   // dynamic resource allocation on the device and program scope variables are
910   // destroyed by the runtime when program is released.
911   if (getLangOpts().OpenCL) {
912     GenKernelArgMetadata(Fn);
913     Fn->setCallingConv(llvm::CallingConv::SPIR_KERNEL);
914   }
915 
916   assert(!getLangOpts().CUDA || !getLangOpts().CUDAIsDevice ||
917          getLangOpts().GPUAllowDeviceInit);
918   if (getLangOpts().HIP && getLangOpts().CUDAIsDevice) {
919     Fn->setCallingConv(llvm::CallingConv::AMDGPU_KERNEL);
920     Fn->addFnAttr("device-init");
921   }
922 
923   ModuleInits.clear();
924 }
925 
926 void CodeGenModule::EmitCXXGlobalCleanUpFunc() {
927   if (CXXGlobalDtorsOrStermFinalizers.empty() &&
928       PrioritizedCXXStermFinalizers.empty())
929     return;
930 
931   llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false);
932   const CGFunctionInfo &FI = getTypes().arrangeNullaryFunction();
933 
934   // Create our global prioritized cleanup function.
935   if (!PrioritizedCXXStermFinalizers.empty()) {
936     SmallVector<CXXGlobalDtorsOrStermFinalizer_t, 8> LocalCXXStermFinalizers;
937     llvm::array_pod_sort(PrioritizedCXXStermFinalizers.begin(),
938                          PrioritizedCXXStermFinalizers.end());
939     // Iterate over "chunks" of dtors with same priority and emit each chunk
940     // into separate function. Note - everything is sorted first by priority,
941     // second - by lex order, so we emit dtor functions in proper order.
942     for (SmallVectorImpl<StermFinalizerData>::iterator
943              I = PrioritizedCXXStermFinalizers.begin(),
944              E = PrioritizedCXXStermFinalizers.end();
945          I != E;) {
946       SmallVectorImpl<StermFinalizerData>::iterator PrioE =
947           std::upper_bound(I + 1, E, *I, StermFinalizerPriorityCmp());
948 
949       LocalCXXStermFinalizers.clear();
950 
951       unsigned int Priority = I->first.priority;
952       llvm::Function *Fn = CreateGlobalInitOrCleanUpFunction(
953           FTy, "_GLOBAL__a_" + getPrioritySuffix(Priority), FI);
954 
955       for (; I < PrioE; ++I) {
956         llvm::FunctionCallee DtorFn = I->second;
957         LocalCXXStermFinalizers.emplace_back(DtorFn.getFunctionType(),
958                                              DtorFn.getCallee(), nullptr);
959       }
960 
961       CodeGenFunction(*this).GenerateCXXGlobalCleanUpFunc(
962           Fn, LocalCXXStermFinalizers);
963       AddGlobalDtor(Fn, Priority);
964     }
965     PrioritizedCXXStermFinalizers.clear();
966   }
967 
968   if (CXXGlobalDtorsOrStermFinalizers.empty())
969     return;
970 
971   // Create our global cleanup function.
972   llvm::Function *Fn =
973       CreateGlobalInitOrCleanUpFunction(FTy, "_GLOBAL__D_a", FI);
974 
975   CodeGenFunction(*this).GenerateCXXGlobalCleanUpFunc(
976       Fn, CXXGlobalDtorsOrStermFinalizers);
977   AddGlobalDtor(Fn);
978   CXXGlobalDtorsOrStermFinalizers.clear();
979 }
980 
981 /// Emit the code necessary to initialize the given global variable.
982 void CodeGenFunction::GenerateCXXGlobalVarDeclInitFunc(llvm::Function *Fn,
983                                                        const VarDecl *D,
984                                                  llvm::GlobalVariable *Addr,
985                                                        bool PerformInit) {
986   // Check if we need to emit debug info for variable initializer.
987   if (D->hasAttr<NoDebugAttr>())
988     DebugInfo = nullptr; // disable debug info indefinitely for this function
989 
990   CurEHLocation = D->getBeginLoc();
991 
992   StartFunction(GlobalDecl(D, DynamicInitKind::Initializer),
993                 getContext().VoidTy, Fn, getTypes().arrangeNullaryFunction(),
994                 FunctionArgList());
995   // Emit an artificial location for this function.
996   auto AL = ApplyDebugLocation::CreateArtificial(*this);
997 
998   // Use guarded initialization if the global variable is weak. This
999   // occurs for, e.g., instantiated static data members and
1000   // definitions explicitly marked weak.
1001   //
1002   // Also use guarded initialization for a variable with dynamic TLS and
1003   // unordered initialization. (If the initialization is ordered, the ABI
1004   // layer will guard the whole-TU initialization for us.)
1005   if (Addr->hasWeakLinkage() || Addr->hasLinkOnceLinkage() ||
1006       (D->getTLSKind() == VarDecl::TLS_Dynamic &&
1007        isTemplateInstantiation(D->getTemplateSpecializationKind()))) {
1008     EmitCXXGuardedInit(*D, Addr, PerformInit);
1009   } else {
1010     EmitCXXGlobalVarDeclInit(*D, Addr, PerformInit);
1011   }
1012 
1013   if (getLangOpts().HLSL)
1014     CGM.getHLSLRuntime().annotateHLSLResource(D, Addr);
1015 
1016   FinishFunction();
1017 }
1018 
1019 void
1020 CodeGenFunction::GenerateCXXGlobalInitFunc(llvm::Function *Fn,
1021                                            ArrayRef<llvm::Function *> Decls,
1022                                            ConstantAddress Guard) {
1023   {
1024     auto NL = ApplyDebugLocation::CreateEmpty(*this);
1025     StartFunction(GlobalDecl(), getContext().VoidTy, Fn,
1026                   getTypes().arrangeNullaryFunction(), FunctionArgList());
1027     // Emit an artificial location for this function.
1028     auto AL = ApplyDebugLocation::CreateArtificial(*this);
1029 
1030     llvm::BasicBlock *ExitBlock = nullptr;
1031     if (Guard.isValid()) {
1032       // If we have a guard variable, check whether we've already performed
1033       // these initializations. This happens for TLS initialization functions.
1034       llvm::Value *GuardVal = Builder.CreateLoad(Guard);
1035       llvm::Value *Uninit = Builder.CreateIsNull(GuardVal,
1036                                                  "guard.uninitialized");
1037       llvm::BasicBlock *InitBlock = createBasicBlock("init");
1038       ExitBlock = createBasicBlock("exit");
1039       EmitCXXGuardedInitBranch(Uninit, InitBlock, ExitBlock,
1040                                GuardKind::TlsGuard, nullptr);
1041       EmitBlock(InitBlock);
1042       // Mark as initialized before initializing anything else. If the
1043       // initializers use previously-initialized thread_local vars, that's
1044       // probably supposed to be OK, but the standard doesn't say.
1045       Builder.CreateStore(llvm::ConstantInt::get(GuardVal->getType(),1), Guard);
1046 
1047       // The guard variable can't ever change again.
1048       EmitInvariantStart(
1049           Guard.getPointer(),
1050           CharUnits::fromQuantity(
1051               CGM.getDataLayout().getTypeAllocSize(GuardVal->getType())));
1052     }
1053 
1054     RunCleanupsScope Scope(*this);
1055 
1056     // When building in Objective-C++ ARC mode, create an autorelease pool
1057     // around the global initializers.
1058     if (getLangOpts().ObjCAutoRefCount && getLangOpts().CPlusPlus) {
1059       llvm::Value *token = EmitObjCAutoreleasePoolPush();
1060       EmitObjCAutoreleasePoolCleanup(token);
1061     }
1062 
1063     for (unsigned i = 0, e = Decls.size(); i != e; ++i)
1064       if (Decls[i])
1065         EmitRuntimeCall(Decls[i]);
1066 
1067     Scope.ForceCleanup();
1068 
1069     if (ExitBlock) {
1070       Builder.CreateBr(ExitBlock);
1071       EmitBlock(ExitBlock);
1072     }
1073   }
1074 
1075   FinishFunction();
1076 }
1077 
1078 void CodeGenFunction::GenerateCXXGlobalCleanUpFunc(
1079     llvm::Function *Fn,
1080     ArrayRef<std::tuple<llvm::FunctionType *, llvm::WeakTrackingVH,
1081                         llvm::Constant *>>
1082         DtorsOrStermFinalizers) {
1083   {
1084     auto NL = ApplyDebugLocation::CreateEmpty(*this);
1085     StartFunction(GlobalDecl(), getContext().VoidTy, Fn,
1086                   getTypes().arrangeNullaryFunction(), FunctionArgList());
1087     // Emit an artificial location for this function.
1088     auto AL = ApplyDebugLocation::CreateArtificial(*this);
1089 
1090     // Emit the cleanups, in reverse order from construction.
1091     for (unsigned i = 0, e = DtorsOrStermFinalizers.size(); i != e; ++i) {
1092       llvm::FunctionType *CalleeTy;
1093       llvm::Value *Callee;
1094       llvm::Constant *Arg;
1095       std::tie(CalleeTy, Callee, Arg) = DtorsOrStermFinalizers[e - i - 1];
1096 
1097       llvm::CallInst *CI = nullptr;
1098       if (Arg == nullptr) {
1099         assert(
1100             CGM.getCXXABI().useSinitAndSterm() &&
1101             "Arg could not be nullptr unless using sinit and sterm functions.");
1102         CI = Builder.CreateCall(CalleeTy, Callee);
1103       } else
1104         CI = Builder.CreateCall(CalleeTy, Callee, Arg);
1105 
1106       // Make sure the call and the callee agree on calling convention.
1107       if (llvm::Function *F = dyn_cast<llvm::Function>(Callee))
1108         CI->setCallingConv(F->getCallingConv());
1109     }
1110   }
1111 
1112   FinishFunction();
1113 }
1114 
1115 /// generateDestroyHelper - Generates a helper function which, when
1116 /// invoked, destroys the given object.  The address of the object
1117 /// should be in global memory.
1118 llvm::Function *CodeGenFunction::generateDestroyHelper(
1119     Address addr, QualType type, Destroyer *destroyer,
1120     bool useEHCleanupForArray, const VarDecl *VD) {
1121   FunctionArgList args;
1122   ImplicitParamDecl Dst(getContext(), getContext().VoidPtrTy,
1123                         ImplicitParamDecl::Other);
1124   args.push_back(&Dst);
1125 
1126   const CGFunctionInfo &FI =
1127     CGM.getTypes().arrangeBuiltinFunctionDeclaration(getContext().VoidTy, args);
1128   llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI);
1129   llvm::Function *fn = CGM.CreateGlobalInitOrCleanUpFunction(
1130       FTy, "__cxx_global_array_dtor", FI, VD->getLocation());
1131 
1132   CurEHLocation = VD->getBeginLoc();
1133 
1134   StartFunction(GlobalDecl(VD, DynamicInitKind::GlobalArrayDestructor),
1135                 getContext().VoidTy, fn, FI, args);
1136   // Emit an artificial location for this function.
1137   auto AL = ApplyDebugLocation::CreateArtificial(*this);
1138 
1139   emitDestroy(addr, type, destroyer, useEHCleanupForArray);
1140 
1141   FinishFunction();
1142 
1143   return fn;
1144 }
1145