1 //===--- CGDeclCXX.cpp - Emit LLVM Code for C++ declarations --------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This contains code dealing with code generation of C++ declarations
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "CGCXXABI.h"
14 #include "CGObjCRuntime.h"
15 #include "CGOpenMPRuntime.h"
16 #include "CodeGenFunction.h"
17 #include "TargetInfo.h"
18 #include "clang/AST/Attr.h"
19 #include "clang/Basic/LangOptions.h"
20 #include "llvm/ADT/StringExtras.h"
21 #include "llvm/IR/Intrinsics.h"
22 #include "llvm/IR/MDBuilder.h"
23 #include "llvm/Support/Path.h"
24 
25 using namespace clang;
26 using namespace CodeGen;
27 
EmitDeclInit(CodeGenFunction & CGF,const VarDecl & D,ConstantAddress DeclPtr)28 static void EmitDeclInit(CodeGenFunction &CGF, const VarDecl &D,
29                          ConstantAddress DeclPtr) {
30   assert(
31       (D.hasGlobalStorage() ||
32        (D.hasLocalStorage() && CGF.getContext().getLangOpts().OpenCLCPlusPlus)) &&
33       "VarDecl must have global or local (in the case of OpenCL) storage!");
34   assert(!D.getType()->isReferenceType() &&
35          "Should not call EmitDeclInit on a reference!");
36 
37   QualType type = D.getType();
38   LValue lv = CGF.MakeAddrLValue(DeclPtr, type);
39 
40   const Expr *Init = D.getInit();
41   switch (CGF.getEvaluationKind(type)) {
42   case TEK_Scalar: {
43     CodeGenModule &CGM = CGF.CGM;
44     if (lv.isObjCStrong())
45       CGM.getObjCRuntime().EmitObjCGlobalAssign(CGF, CGF.EmitScalarExpr(Init),
46                                                 DeclPtr, D.getTLSKind());
47     else if (lv.isObjCWeak())
48       CGM.getObjCRuntime().EmitObjCWeakAssign(CGF, CGF.EmitScalarExpr(Init),
49                                               DeclPtr);
50     else
51       CGF.EmitScalarInit(Init, &D, lv, false);
52     return;
53   }
54   case TEK_Complex:
55     CGF.EmitComplexExprIntoLValue(Init, lv, /*isInit*/ true);
56     return;
57   case TEK_Aggregate:
58     CGF.EmitAggExpr(Init,
59                     AggValueSlot::forLValue(lv, CGF, AggValueSlot::IsDestructed,
60                                             AggValueSlot::DoesNotNeedGCBarriers,
61                                             AggValueSlot::IsNotAliased,
62                                             AggValueSlot::DoesNotOverlap));
63     return;
64   }
65   llvm_unreachable("bad evaluation kind");
66 }
67 
68 /// Emit code to cause the destruction of the given variable with
69 /// static storage duration.
EmitDeclDestroy(CodeGenFunction & CGF,const VarDecl & D,ConstantAddress Addr)70 static void EmitDeclDestroy(CodeGenFunction &CGF, const VarDecl &D,
71                             ConstantAddress Addr) {
72   // Honor __attribute__((no_destroy)) and bail instead of attempting
73   // to emit a reference to a possibly nonexistent destructor, which
74   // in turn can cause a crash. This will result in a global constructor
75   // that isn't balanced out by a destructor call as intended by the
76   // attribute. This also checks for -fno-c++-static-destructors and
77   // bails even if the attribute is not present.
78   QualType::DestructionKind DtorKind = D.needsDestruction(CGF.getContext());
79 
80   // FIXME:  __attribute__((cleanup)) ?
81 
82   switch (DtorKind) {
83   case QualType::DK_none:
84     return;
85 
86   case QualType::DK_cxx_destructor:
87     break;
88 
89   case QualType::DK_objc_strong_lifetime:
90   case QualType::DK_objc_weak_lifetime:
91   case QualType::DK_nontrivial_c_struct:
92     // We don't care about releasing objects during process teardown.
93     assert(!D.getTLSKind() && "should have rejected this");
94     return;
95   }
96 
97   llvm::FunctionCallee Func;
98   llvm::Constant *Argument;
99 
100   CodeGenModule &CGM = CGF.CGM;
101   QualType Type = D.getType();
102 
103   // Special-case non-array C++ destructors, if they have the right signature.
104   // Under some ABIs, destructors return this instead of void, and cannot be
105   // passed directly to __cxa_atexit if the target does not allow this
106   // mismatch.
107   const CXXRecordDecl *Record = Type->getAsCXXRecordDecl();
108   bool CanRegisterDestructor =
109       Record && (!CGM.getCXXABI().HasThisReturn(
110                      GlobalDecl(Record->getDestructor(), Dtor_Complete)) ||
111                  CGM.getCXXABI().canCallMismatchedFunctionType());
112   // If __cxa_atexit is disabled via a flag, a different helper function is
113   // generated elsewhere which uses atexit instead, and it takes the destructor
114   // directly.
115   bool UsingExternalHelper = !CGM.getCodeGenOpts().CXAAtExit;
116   if (Record && (CanRegisterDestructor || UsingExternalHelper)) {
117     assert(!Record->hasTrivialDestructor());
118     CXXDestructorDecl *Dtor = Record->getDestructor();
119 
120     Func = CGM.getAddrAndTypeOfCXXStructor(GlobalDecl(Dtor, Dtor_Complete));
121     if (CGF.getContext().getLangOpts().OpenCL) {
122       auto DestAS =
123           CGM.getTargetCodeGenInfo().getAddrSpaceOfCxaAtexitPtrParam();
124       auto DestTy = CGF.getTypes().ConvertType(Type)->getPointerTo(
125           CGM.getContext().getTargetAddressSpace(DestAS));
126       auto SrcAS = D.getType().getQualifiers().getAddressSpace();
127       if (DestAS == SrcAS)
128         Argument = llvm::ConstantExpr::getBitCast(Addr.getPointer(), DestTy);
129       else
130         // FIXME: On addr space mismatch we are passing NULL. The generation
131         // of the global destructor function should be adjusted accordingly.
132         Argument = llvm::ConstantPointerNull::get(DestTy);
133     } else {
134       Argument = llvm::ConstantExpr::getBitCast(
135           Addr.getPointer(), CGF.getTypes().ConvertType(Type)->getPointerTo());
136     }
137   // Otherwise, the standard logic requires a helper function.
138   } else {
139     Func = CodeGenFunction(CGM)
140            .generateDestroyHelper(Addr, Type, CGF.getDestroyer(DtorKind),
141                                   CGF.needsEHCleanup(DtorKind), &D);
142     Argument = llvm::Constant::getNullValue(CGF.Int8PtrTy);
143   }
144 
145   CGM.getCXXABI().registerGlobalDtor(CGF, D, Func, Argument);
146 }
147 
148 /// Emit code to cause the variable at the given address to be considered as
149 /// constant from this point onwards.
EmitDeclInvariant(CodeGenFunction & CGF,const VarDecl & D,llvm::Constant * Addr)150 static void EmitDeclInvariant(CodeGenFunction &CGF, const VarDecl &D,
151                               llvm::Constant *Addr) {
152   return CGF.EmitInvariantStart(
153       Addr, CGF.getContext().getTypeSizeInChars(D.getType()));
154 }
155 
EmitInvariantStart(llvm::Constant * Addr,CharUnits Size)156 void CodeGenFunction::EmitInvariantStart(llvm::Constant *Addr, CharUnits Size) {
157   // Do not emit the intrinsic if we're not optimizing.
158   if (!CGM.getCodeGenOpts().OptimizationLevel)
159     return;
160 
161   // Grab the llvm.invariant.start intrinsic.
162   llvm::Intrinsic::ID InvStartID = llvm::Intrinsic::invariant_start;
163   // Overloaded address space type.
164   llvm::Type *ObjectPtr[1] = {Int8PtrTy};
165   llvm::Function *InvariantStart = CGM.getIntrinsic(InvStartID, ObjectPtr);
166 
167   // Emit a call with the size in bytes of the object.
168   uint64_t Width = Size.getQuantity();
169   llvm::Value *Args[2] = { llvm::ConstantInt::getSigned(Int64Ty, Width),
170                            llvm::ConstantExpr::getBitCast(Addr, Int8PtrTy)};
171   Builder.CreateCall(InvariantStart, Args);
172 }
173 
EmitCXXGlobalVarDeclInit(const VarDecl & D,llvm::Constant * DeclPtr,bool PerformInit)174 void CodeGenFunction::EmitCXXGlobalVarDeclInit(const VarDecl &D,
175                                                llvm::Constant *DeclPtr,
176                                                bool PerformInit) {
177 
178   const Expr *Init = D.getInit();
179   QualType T = D.getType();
180 
181   // The address space of a static local variable (DeclPtr) may be different
182   // from the address space of the "this" argument of the constructor. In that
183   // case, we need an addrspacecast before calling the constructor.
184   //
185   // struct StructWithCtor {
186   //   __device__ StructWithCtor() {...}
187   // };
188   // __device__ void foo() {
189   //   __shared__ StructWithCtor s;
190   //   ...
191   // }
192   //
193   // For example, in the above CUDA code, the static local variable s has a
194   // "shared" address space qualifier, but the constructor of StructWithCtor
195   // expects "this" in the "generic" address space.
196   unsigned ExpectedAddrSpace = getContext().getTargetAddressSpace(T);
197   unsigned ActualAddrSpace = DeclPtr->getType()->getPointerAddressSpace();
198   if (ActualAddrSpace != ExpectedAddrSpace) {
199     llvm::Type *LTy = CGM.getTypes().ConvertTypeForMem(T);
200     llvm::PointerType *PTy = llvm::PointerType::get(LTy, ExpectedAddrSpace);
201     DeclPtr = llvm::ConstantExpr::getAddrSpaceCast(DeclPtr, PTy);
202   }
203 
204   ConstantAddress DeclAddr(DeclPtr, getContext().getDeclAlign(&D));
205 
206   if (!T->isReferenceType()) {
207     if (getLangOpts().OpenMP && !getLangOpts().OpenMPSimd &&
208         D.hasAttr<OMPThreadPrivateDeclAttr>()) {
209       (void)CGM.getOpenMPRuntime().emitThreadPrivateVarDefinition(
210           &D, DeclAddr, D.getAttr<OMPThreadPrivateDeclAttr>()->getLocation(),
211           PerformInit, this);
212     }
213     if (PerformInit)
214       EmitDeclInit(*this, D, DeclAddr);
215     if (CGM.isTypeConstant(D.getType(), true))
216       EmitDeclInvariant(*this, D, DeclPtr);
217     else
218       EmitDeclDestroy(*this, D, DeclAddr);
219     return;
220   }
221 
222   assert(PerformInit && "cannot have constant initializer which needs "
223          "destruction for reference");
224   RValue RV = EmitReferenceBindingToExpr(Init);
225   EmitStoreOfScalar(RV.getScalarVal(), DeclAddr, false, T);
226 }
227 
228 /// Create a stub function, suitable for being passed to atexit,
229 /// which passes the given address to the given destructor function.
createAtExitStub(const VarDecl & VD,llvm::FunctionCallee dtor,llvm::Constant * addr)230 llvm::Function *CodeGenFunction::createAtExitStub(const VarDecl &VD,
231                                                   llvm::FunctionCallee dtor,
232                                                   llvm::Constant *addr) {
233   // Get the destructor function type, void(*)(void).
234   llvm::FunctionType *ty = llvm::FunctionType::get(CGM.VoidTy, false);
235   SmallString<256> FnName;
236   {
237     llvm::raw_svector_ostream Out(FnName);
238     CGM.getCXXABI().getMangleContext().mangleDynamicAtExitDestructor(&VD, Out);
239   }
240 
241   const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
242   llvm::Function *fn = CGM.CreateGlobalInitOrCleanUpFunction(
243       ty, FnName.str(), FI, VD.getLocation());
244 
245   CodeGenFunction CGF(CGM);
246 
247   CGF.StartFunction(GlobalDecl(&VD, DynamicInitKind::AtExit),
248                     CGM.getContext().VoidTy, fn, FI, FunctionArgList(),
249                     VD.getLocation(), VD.getInit()->getExprLoc());
250   // Emit an artificial location for this function.
251   auto AL = ApplyDebugLocation::CreateArtificial(CGF);
252 
253   llvm::CallInst *call = CGF.Builder.CreateCall(dtor, addr);
254 
255   // Make sure the call and the callee agree on calling convention.
256   if (auto *dtorFn = dyn_cast<llvm::Function>(
257           dtor.getCallee()->stripPointerCastsAndAliases()))
258     call->setCallingConv(dtorFn->getCallingConv());
259 
260   CGF.FinishFunction();
261 
262   return fn;
263 }
264 
265 /// Register a global destructor using the C atexit runtime function.
registerGlobalDtorWithAtExit(const VarDecl & VD,llvm::FunctionCallee dtor,llvm::Constant * addr)266 void CodeGenFunction::registerGlobalDtorWithAtExit(const VarDecl &VD,
267                                                    llvm::FunctionCallee dtor,
268                                                    llvm::Constant *addr) {
269   // Create a function which calls the destructor.
270   llvm::Constant *dtorStub = createAtExitStub(VD, dtor, addr);
271   registerGlobalDtorWithAtExit(dtorStub);
272 }
273 
registerGlobalDtorWithAtExit(llvm::Constant * dtorStub)274 void CodeGenFunction::registerGlobalDtorWithAtExit(llvm::Constant *dtorStub) {
275   // extern "C" int atexit(void (*f)(void));
276   assert(dtorStub->getType() ==
277              llvm::PointerType::get(
278                  llvm::FunctionType::get(CGM.VoidTy, false),
279                  dtorStub->getType()->getPointerAddressSpace()) &&
280          "Argument to atexit has a wrong type.");
281 
282   llvm::FunctionType *atexitTy =
283       llvm::FunctionType::get(IntTy, dtorStub->getType(), false);
284 
285   llvm::FunctionCallee atexit =
286       CGM.CreateRuntimeFunction(atexitTy, "atexit", llvm::AttributeList(),
287                                 /*Local=*/true);
288   if (llvm::Function *atexitFn = dyn_cast<llvm::Function>(atexit.getCallee()))
289     atexitFn->setDoesNotThrow();
290 
291   EmitNounwindRuntimeCall(atexit, dtorStub);
292 }
293 
294 llvm::Value *
unregisterGlobalDtorWithUnAtExit(llvm::Constant * dtorStub)295 CodeGenFunction::unregisterGlobalDtorWithUnAtExit(llvm::Constant *dtorStub) {
296   // The unatexit subroutine unregisters __dtor functions that were previously
297   // registered by the atexit subroutine. If the referenced function is found,
298   // it is removed from the list of functions that are called at normal program
299   // termination and the unatexit returns a value of 0, otherwise a non-zero
300   // value is returned.
301   //
302   // extern "C" int unatexit(void (*f)(void));
303   assert(dtorStub->getType() ==
304              llvm::PointerType::get(
305                  llvm::FunctionType::get(CGM.VoidTy, false),
306                  dtorStub->getType()->getPointerAddressSpace()) &&
307          "Argument to unatexit has a wrong type.");
308 
309   llvm::FunctionType *unatexitTy =
310       llvm::FunctionType::get(IntTy, {dtorStub->getType()}, /*isVarArg=*/false);
311 
312   llvm::FunctionCallee unatexit =
313       CGM.CreateRuntimeFunction(unatexitTy, "unatexit", llvm::AttributeList());
314 
315   cast<llvm::Function>(unatexit.getCallee())->setDoesNotThrow();
316 
317   return EmitNounwindRuntimeCall(unatexit, dtorStub);
318 }
319 
EmitCXXGuardedInit(const VarDecl & D,llvm::GlobalVariable * DeclPtr,bool PerformInit)320 void CodeGenFunction::EmitCXXGuardedInit(const VarDecl &D,
321                                          llvm::GlobalVariable *DeclPtr,
322                                          bool PerformInit) {
323   // If we've been asked to forbid guard variables, emit an error now.
324   // This diagnostic is hard-coded for Darwin's use case;  we can find
325   // better phrasing if someone else needs it.
326   if (CGM.getCodeGenOpts().ForbidGuardVariables)
327     CGM.Error(D.getLocation(),
328               "this initialization requires a guard variable, which "
329               "the kernel does not support");
330 
331   CGM.getCXXABI().EmitGuardedInit(*this, D, DeclPtr, PerformInit);
332 }
333 
EmitCXXGuardedInitBranch(llvm::Value * NeedsInit,llvm::BasicBlock * InitBlock,llvm::BasicBlock * NoInitBlock,GuardKind Kind,const VarDecl * D)334 void CodeGenFunction::EmitCXXGuardedInitBranch(llvm::Value *NeedsInit,
335                                                llvm::BasicBlock *InitBlock,
336                                                llvm::BasicBlock *NoInitBlock,
337                                                GuardKind Kind,
338                                                const VarDecl *D) {
339   assert((Kind == GuardKind::TlsGuard || D) && "no guarded variable");
340 
341   // A guess at how many times we will enter the initialization of a
342   // variable, depending on the kind of variable.
343   static const uint64_t InitsPerTLSVar = 1024;
344   static const uint64_t InitsPerLocalVar = 1024 * 1024;
345 
346   llvm::MDNode *Weights;
347   if (Kind == GuardKind::VariableGuard && !D->isLocalVarDecl()) {
348     // For non-local variables, don't apply any weighting for now. Due to our
349     // use of COMDATs, we expect there to be at most one initialization of the
350     // variable per DSO, but we have no way to know how many DSOs will try to
351     // initialize the variable.
352     Weights = nullptr;
353   } else {
354     uint64_t NumInits;
355     // FIXME: For the TLS case, collect and use profiling information to
356     // determine a more accurate brach weight.
357     if (Kind == GuardKind::TlsGuard || D->getTLSKind())
358       NumInits = InitsPerTLSVar;
359     else
360       NumInits = InitsPerLocalVar;
361 
362     // The probability of us entering the initializer is
363     //   1 / (total number of times we attempt to initialize the variable).
364     llvm::MDBuilder MDHelper(CGM.getLLVMContext());
365     Weights = MDHelper.createBranchWeights(1, NumInits - 1);
366   }
367 
368   Builder.CreateCondBr(NeedsInit, InitBlock, NoInitBlock, Weights);
369 }
370 
CreateGlobalInitOrCleanUpFunction(llvm::FunctionType * FTy,const Twine & Name,const CGFunctionInfo & FI,SourceLocation Loc,bool TLS)371 llvm::Function *CodeGenModule::CreateGlobalInitOrCleanUpFunction(
372     llvm::FunctionType *FTy, const Twine &Name, const CGFunctionInfo &FI,
373     SourceLocation Loc, bool TLS) {
374   llvm::Function *Fn = llvm::Function::Create(
375       FTy, llvm::GlobalValue::InternalLinkage, Name, &getModule());
376 
377   if (!getLangOpts().AppleKext && !TLS) {
378     // Set the section if needed.
379     if (const char *Section = getTarget().getStaticInitSectionSpecifier())
380       Fn->setSection(Section);
381   }
382 
383   SetInternalFunctionAttributes(GlobalDecl(), Fn, FI);
384 
385   Fn->setCallingConv(getRuntimeCC());
386 
387   if (!getLangOpts().Exceptions)
388     Fn->setDoesNotThrow();
389 
390   if (getLangOpts().Sanitize.has(SanitizerKind::Address) &&
391       !isInNoSanitizeList(SanitizerKind::Address, Fn, Loc))
392     Fn->addFnAttr(llvm::Attribute::SanitizeAddress);
393 
394   if (getLangOpts().Sanitize.has(SanitizerKind::KernelAddress) &&
395       !isInNoSanitizeList(SanitizerKind::KernelAddress, Fn, Loc))
396     Fn->addFnAttr(llvm::Attribute::SanitizeAddress);
397 
398   if (getLangOpts().Sanitize.has(SanitizerKind::HWAddress) &&
399       !isInNoSanitizeList(SanitizerKind::HWAddress, Fn, Loc))
400     Fn->addFnAttr(llvm::Attribute::SanitizeHWAddress);
401 
402   if (getLangOpts().Sanitize.has(SanitizerKind::KernelHWAddress) &&
403       !isInNoSanitizeList(SanitizerKind::KernelHWAddress, Fn, Loc))
404     Fn->addFnAttr(llvm::Attribute::SanitizeHWAddress);
405 
406   if (getLangOpts().Sanitize.has(SanitizerKind::MemTag) &&
407       !isInNoSanitizeList(SanitizerKind::MemTag, Fn, Loc))
408     Fn->addFnAttr(llvm::Attribute::SanitizeMemTag);
409 
410   if (getLangOpts().Sanitize.has(SanitizerKind::Thread) &&
411       !isInNoSanitizeList(SanitizerKind::Thread, Fn, Loc))
412     Fn->addFnAttr(llvm::Attribute::SanitizeThread);
413 
414   if (getLangOpts().Sanitize.has(SanitizerKind::Memory) &&
415       !isInNoSanitizeList(SanitizerKind::Memory, Fn, Loc))
416     Fn->addFnAttr(llvm::Attribute::SanitizeMemory);
417 
418   if (getLangOpts().Sanitize.has(SanitizerKind::KernelMemory) &&
419       !isInNoSanitizeList(SanitizerKind::KernelMemory, Fn, Loc))
420     Fn->addFnAttr(llvm::Attribute::SanitizeMemory);
421 
422   if (getLangOpts().Sanitize.has(SanitizerKind::SafeStack) &&
423       !isInNoSanitizeList(SanitizerKind::SafeStack, Fn, Loc))
424     Fn->addFnAttr(llvm::Attribute::SafeStack);
425 
426   if (getLangOpts().Sanitize.has(SanitizerKind::ShadowCallStack) &&
427       !isInNoSanitizeList(SanitizerKind::ShadowCallStack, Fn, Loc))
428     Fn->addFnAttr(llvm::Attribute::ShadowCallStack);
429 
430   return Fn;
431 }
432 
433 /// Create a global pointer to a function that will initialize a global
434 /// variable.  The user has requested that this pointer be emitted in a specific
435 /// section.
EmitPointerToInitFunc(const VarDecl * D,llvm::GlobalVariable * GV,llvm::Function * InitFunc,InitSegAttr * ISA)436 void CodeGenModule::EmitPointerToInitFunc(const VarDecl *D,
437                                           llvm::GlobalVariable *GV,
438                                           llvm::Function *InitFunc,
439                                           InitSegAttr *ISA) {
440   llvm::GlobalVariable *PtrArray = new llvm::GlobalVariable(
441       TheModule, InitFunc->getType(), /*isConstant=*/true,
442       llvm::GlobalValue::PrivateLinkage, InitFunc, "__cxx_init_fn_ptr");
443   PtrArray->setSection(ISA->getSection());
444   addUsedGlobal(PtrArray);
445 
446   // If the GV is already in a comdat group, then we have to join it.
447   if (llvm::Comdat *C = GV->getComdat())
448     PtrArray->setComdat(C);
449 }
450 
451 void
EmitCXXGlobalVarDeclInitFunc(const VarDecl * D,llvm::GlobalVariable * Addr,bool PerformInit)452 CodeGenModule::EmitCXXGlobalVarDeclInitFunc(const VarDecl *D,
453                                             llvm::GlobalVariable *Addr,
454                                             bool PerformInit) {
455 
456   // According to E.2.3.1 in CUDA-7.5 Programming guide: __device__,
457   // __constant__ and __shared__ variables defined in namespace scope,
458   // that are of class type, cannot have a non-empty constructor. All
459   // the checks have been done in Sema by now. Whatever initializers
460   // are allowed are empty and we just need to ignore them here.
461   if (getLangOpts().CUDAIsDevice && !getLangOpts().GPUAllowDeviceInit &&
462       (D->hasAttr<CUDADeviceAttr>() || D->hasAttr<CUDAConstantAttr>() ||
463        D->hasAttr<CUDASharedAttr>()))
464     return;
465 
466   if (getLangOpts().OpenMP &&
467       getOpenMPRuntime().emitDeclareTargetVarDefinition(D, Addr, PerformInit))
468     return;
469 
470   // Check if we've already initialized this decl.
471   auto I = DelayedCXXInitPosition.find(D);
472   if (I != DelayedCXXInitPosition.end() && I->second == ~0U)
473     return;
474 
475   llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false);
476   SmallString<256> FnName;
477   {
478     llvm::raw_svector_ostream Out(FnName);
479     getCXXABI().getMangleContext().mangleDynamicInitializer(D, Out);
480   }
481 
482   // Create a variable initialization function.
483   llvm::Function *Fn = CreateGlobalInitOrCleanUpFunction(
484       FTy, FnName.str(), getTypes().arrangeNullaryFunction(), D->getLocation());
485 
486   auto *ISA = D->getAttr<InitSegAttr>();
487   CodeGenFunction(*this).GenerateCXXGlobalVarDeclInitFunc(Fn, D, Addr,
488                                                           PerformInit);
489 
490   llvm::GlobalVariable *COMDATKey =
491       supportsCOMDAT() && D->isExternallyVisible() ? Addr : nullptr;
492 
493   if (D->getTLSKind()) {
494     // FIXME: Should we support init_priority for thread_local?
495     // FIXME: We only need to register one __cxa_thread_atexit function for the
496     // entire TU.
497     CXXThreadLocalInits.push_back(Fn);
498     CXXThreadLocalInitVars.push_back(D);
499   } else if (PerformInit && ISA) {
500     EmitPointerToInitFunc(D, Addr, Fn, ISA);
501   } else if (auto *IPA = D->getAttr<InitPriorityAttr>()) {
502     OrderGlobalInitsOrStermFinalizers Key(IPA->getPriority(),
503                                           PrioritizedCXXGlobalInits.size());
504     PrioritizedCXXGlobalInits.push_back(std::make_pair(Key, Fn));
505   } else if (isTemplateInstantiation(D->getTemplateSpecializationKind()) ||
506              getContext().GetGVALinkageForVariable(D) == GVA_DiscardableODR) {
507     // C++ [basic.start.init]p2:
508     //   Definitions of explicitly specialized class template static data
509     //   members have ordered initialization. Other class template static data
510     //   members (i.e., implicitly or explicitly instantiated specializations)
511     //   have unordered initialization.
512     //
513     // As a consequence, we can put them into their own llvm.global_ctors entry.
514     //
515     // If the global is externally visible, put the initializer into a COMDAT
516     // group with the global being initialized.  On most platforms, this is a
517     // minor startup time optimization.  In the MS C++ ABI, there are no guard
518     // variables, so this COMDAT key is required for correctness.
519     AddGlobalCtor(Fn, 65535, COMDATKey);
520     if (getTarget().getCXXABI().isMicrosoft() && COMDATKey) {
521       // In The MS C++, MS add template static data member in the linker
522       // drective.
523       addUsedGlobal(COMDATKey);
524     }
525   } else if (D->hasAttr<SelectAnyAttr>()) {
526     // SelectAny globals will be comdat-folded. Put the initializer into a
527     // COMDAT group associated with the global, so the initializers get folded
528     // too.
529     AddGlobalCtor(Fn, 65535, COMDATKey);
530   } else {
531     I = DelayedCXXInitPosition.find(D); // Re-do lookup in case of re-hash.
532     if (I == DelayedCXXInitPosition.end()) {
533       CXXGlobalInits.push_back(Fn);
534     } else if (I->second != ~0U) {
535       assert(I->second < CXXGlobalInits.size() &&
536              CXXGlobalInits[I->second] == nullptr);
537       CXXGlobalInits[I->second] = Fn;
538     }
539   }
540 
541   // Remember that we already emitted the initializer for this global.
542   DelayedCXXInitPosition[D] = ~0U;
543 }
544 
EmitCXXThreadLocalInitFunc()545 void CodeGenModule::EmitCXXThreadLocalInitFunc() {
546   getCXXABI().EmitThreadLocalInitFuncs(
547       *this, CXXThreadLocals, CXXThreadLocalInits, CXXThreadLocalInitVars);
548 
549   CXXThreadLocalInits.clear();
550   CXXThreadLocalInitVars.clear();
551   CXXThreadLocals.clear();
552 }
553 
getTransformedFileName(llvm::Module & M)554 static SmallString<128> getTransformedFileName(llvm::Module &M) {
555   SmallString<128> FileName = llvm::sys::path::filename(M.getName());
556 
557   if (FileName.empty())
558     FileName = "<null>";
559 
560   for (size_t i = 0; i < FileName.size(); ++i) {
561     // Replace everything that's not [a-zA-Z0-9._] with a _. This set happens
562     // to be the set of C preprocessing numbers.
563     if (!isPreprocessingNumberBody(FileName[i]))
564       FileName[i] = '_';
565   }
566 
567   return FileName;
568 }
569 
getPrioritySuffix(unsigned int Priority)570 static std::string getPrioritySuffix(unsigned int Priority) {
571   assert(Priority <= 65535 && "Priority should always be <= 65535.");
572 
573   // Compute the function suffix from priority. Prepend with zeroes to make
574   // sure the function names are also ordered as priorities.
575   std::string PrioritySuffix = llvm::utostr(Priority);
576   PrioritySuffix = std::string(6 - PrioritySuffix.size(), '0') + PrioritySuffix;
577 
578   return PrioritySuffix;
579 }
580 
581 void
EmitCXXGlobalInitFunc()582 CodeGenModule::EmitCXXGlobalInitFunc() {
583   while (!CXXGlobalInits.empty() && !CXXGlobalInits.back())
584     CXXGlobalInits.pop_back();
585 
586   if (CXXGlobalInits.empty() && PrioritizedCXXGlobalInits.empty())
587     return;
588 
589   llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false);
590   const CGFunctionInfo &FI = getTypes().arrangeNullaryFunction();
591 
592   // Create our global prioritized initialization function.
593   if (!PrioritizedCXXGlobalInits.empty()) {
594     SmallVector<llvm::Function *, 8> LocalCXXGlobalInits;
595     llvm::array_pod_sort(PrioritizedCXXGlobalInits.begin(),
596                          PrioritizedCXXGlobalInits.end());
597     // Iterate over "chunks" of ctors with same priority and emit each chunk
598     // into separate function. Note - everything is sorted first by priority,
599     // second - by lex order, so we emit ctor functions in proper order.
600     for (SmallVectorImpl<GlobalInitData >::iterator
601            I = PrioritizedCXXGlobalInits.begin(),
602            E = PrioritizedCXXGlobalInits.end(); I != E; ) {
603       SmallVectorImpl<GlobalInitData >::iterator
604         PrioE = std::upper_bound(I + 1, E, *I, GlobalInitPriorityCmp());
605 
606       LocalCXXGlobalInits.clear();
607 
608       unsigned int Priority = I->first.priority;
609       llvm::Function *Fn = CreateGlobalInitOrCleanUpFunction(
610           FTy, "_GLOBAL__I_" + getPrioritySuffix(Priority), FI);
611 
612       for (; I < PrioE; ++I)
613         LocalCXXGlobalInits.push_back(I->second);
614 
615       CodeGenFunction(*this).GenerateCXXGlobalInitFunc(Fn, LocalCXXGlobalInits);
616       AddGlobalCtor(Fn, Priority);
617     }
618     PrioritizedCXXGlobalInits.clear();
619   }
620 
621   if (getCXXABI().useSinitAndSterm() && CXXGlobalInits.empty())
622     return;
623 
624   // Include the filename in the symbol name. Including "sub_" matches gcc
625   // and makes sure these symbols appear lexicographically behind the symbols
626   // with priority emitted above.
627   llvm::Function *Fn = CreateGlobalInitOrCleanUpFunction(
628       FTy, llvm::Twine("_GLOBAL__sub_I_", getTransformedFileName(getModule())),
629       FI);
630 
631   CodeGenFunction(*this).GenerateCXXGlobalInitFunc(Fn, CXXGlobalInits);
632   AddGlobalCtor(Fn);
633 
634   // In OpenCL global init functions must be converted to kernels in order to
635   // be able to launch them from the host.
636   // FIXME: Some more work might be needed to handle destructors correctly.
637   // Current initialization function makes use of function pointers callbacks.
638   // We can't support function pointers especially between host and device.
639   // However it seems global destruction has little meaning without any
640   // dynamic resource allocation on the device and program scope variables are
641   // destroyed by the runtime when program is released.
642   if (getLangOpts().OpenCL) {
643     GenOpenCLArgMetadata(Fn);
644     Fn->setCallingConv(llvm::CallingConv::SPIR_KERNEL);
645   }
646 
647   assert(!getLangOpts().CUDA || !getLangOpts().CUDAIsDevice ||
648          getLangOpts().GPUAllowDeviceInit);
649   if (getLangOpts().HIP && getLangOpts().CUDAIsDevice) {
650     Fn->setCallingConv(llvm::CallingConv::AMDGPU_KERNEL);
651     Fn->addFnAttr("device-init");
652   }
653 
654   CXXGlobalInits.clear();
655 }
656 
EmitCXXGlobalCleanUpFunc()657 void CodeGenModule::EmitCXXGlobalCleanUpFunc() {
658   if (CXXGlobalDtorsOrStermFinalizers.empty() &&
659       PrioritizedCXXStermFinalizers.empty())
660     return;
661 
662   llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false);
663   const CGFunctionInfo &FI = getTypes().arrangeNullaryFunction();
664 
665   // Create our global prioritized cleanup function.
666   if (!PrioritizedCXXStermFinalizers.empty()) {
667     SmallVector<CXXGlobalDtorsOrStermFinalizer_t, 8> LocalCXXStermFinalizers;
668     llvm::array_pod_sort(PrioritizedCXXStermFinalizers.begin(),
669                          PrioritizedCXXStermFinalizers.end());
670     // Iterate over "chunks" of dtors with same priority and emit each chunk
671     // into separate function. Note - everything is sorted first by priority,
672     // second - by lex order, so we emit dtor functions in proper order.
673     for (SmallVectorImpl<StermFinalizerData>::iterator
674              I = PrioritizedCXXStermFinalizers.begin(),
675              E = PrioritizedCXXStermFinalizers.end();
676          I != E;) {
677       SmallVectorImpl<StermFinalizerData>::iterator PrioE =
678           std::upper_bound(I + 1, E, *I, StermFinalizerPriorityCmp());
679 
680       LocalCXXStermFinalizers.clear();
681 
682       unsigned int Priority = I->first.priority;
683       llvm::Function *Fn = CreateGlobalInitOrCleanUpFunction(
684           FTy, "_GLOBAL__a_" + getPrioritySuffix(Priority), FI);
685 
686       for (; I < PrioE; ++I) {
687         llvm::FunctionCallee DtorFn = I->second;
688         LocalCXXStermFinalizers.emplace_back(DtorFn.getFunctionType(),
689                                              DtorFn.getCallee(), nullptr);
690       }
691 
692       CodeGenFunction(*this).GenerateCXXGlobalCleanUpFunc(
693           Fn, LocalCXXStermFinalizers);
694       AddGlobalDtor(Fn, Priority);
695     }
696     PrioritizedCXXStermFinalizers.clear();
697   }
698 
699   if (CXXGlobalDtorsOrStermFinalizers.empty())
700     return;
701 
702   // Create our global cleanup function.
703   llvm::Function *Fn =
704       CreateGlobalInitOrCleanUpFunction(FTy, "_GLOBAL__D_a", FI);
705 
706   CodeGenFunction(*this).GenerateCXXGlobalCleanUpFunc(
707       Fn, CXXGlobalDtorsOrStermFinalizers);
708   AddGlobalDtor(Fn);
709   CXXGlobalDtorsOrStermFinalizers.clear();
710 }
711 
712 /// Emit the code necessary to initialize the given global variable.
GenerateCXXGlobalVarDeclInitFunc(llvm::Function * Fn,const VarDecl * D,llvm::GlobalVariable * Addr,bool PerformInit)713 void CodeGenFunction::GenerateCXXGlobalVarDeclInitFunc(llvm::Function *Fn,
714                                                        const VarDecl *D,
715                                                  llvm::GlobalVariable *Addr,
716                                                        bool PerformInit) {
717   // Check if we need to emit debug info for variable initializer.
718   if (D->hasAttr<NoDebugAttr>())
719     DebugInfo = nullptr; // disable debug info indefinitely for this function
720 
721   CurEHLocation = D->getBeginLoc();
722 
723   StartFunction(GlobalDecl(D, DynamicInitKind::Initializer),
724                 getContext().VoidTy, Fn, getTypes().arrangeNullaryFunction(),
725                 FunctionArgList());
726   // Emit an artificial location for this function.
727   auto AL = ApplyDebugLocation::CreateArtificial(*this);
728 
729   // Use guarded initialization if the global variable is weak. This
730   // occurs for, e.g., instantiated static data members and
731   // definitions explicitly marked weak.
732   //
733   // Also use guarded initialization for a variable with dynamic TLS and
734   // unordered initialization. (If the initialization is ordered, the ABI
735   // layer will guard the whole-TU initialization for us.)
736   if (Addr->hasWeakLinkage() || Addr->hasLinkOnceLinkage() ||
737       (D->getTLSKind() == VarDecl::TLS_Dynamic &&
738        isTemplateInstantiation(D->getTemplateSpecializationKind()))) {
739     EmitCXXGuardedInit(*D, Addr, PerformInit);
740   } else {
741     EmitCXXGlobalVarDeclInit(*D, Addr, PerformInit);
742   }
743 
744   FinishFunction();
745 }
746 
747 void
GenerateCXXGlobalInitFunc(llvm::Function * Fn,ArrayRef<llvm::Function * > Decls,ConstantAddress Guard)748 CodeGenFunction::GenerateCXXGlobalInitFunc(llvm::Function *Fn,
749                                            ArrayRef<llvm::Function *> Decls,
750                                            ConstantAddress Guard) {
751   {
752     auto NL = ApplyDebugLocation::CreateEmpty(*this);
753     StartFunction(GlobalDecl(), getContext().VoidTy, Fn,
754                   getTypes().arrangeNullaryFunction(), FunctionArgList());
755     // Emit an artificial location for this function.
756     auto AL = ApplyDebugLocation::CreateArtificial(*this);
757 
758     llvm::BasicBlock *ExitBlock = nullptr;
759     if (Guard.isValid()) {
760       // If we have a guard variable, check whether we've already performed
761       // these initializations. This happens for TLS initialization functions.
762       llvm::Value *GuardVal = Builder.CreateLoad(Guard);
763       llvm::Value *Uninit = Builder.CreateIsNull(GuardVal,
764                                                  "guard.uninitialized");
765       llvm::BasicBlock *InitBlock = createBasicBlock("init");
766       ExitBlock = createBasicBlock("exit");
767       EmitCXXGuardedInitBranch(Uninit, InitBlock, ExitBlock,
768                                GuardKind::TlsGuard, nullptr);
769       EmitBlock(InitBlock);
770       // Mark as initialized before initializing anything else. If the
771       // initializers use previously-initialized thread_local vars, that's
772       // probably supposed to be OK, but the standard doesn't say.
773       Builder.CreateStore(llvm::ConstantInt::get(GuardVal->getType(),1), Guard);
774 
775       // The guard variable can't ever change again.
776       EmitInvariantStart(
777           Guard.getPointer(),
778           CharUnits::fromQuantity(
779               CGM.getDataLayout().getTypeAllocSize(GuardVal->getType())));
780     }
781 
782     RunCleanupsScope Scope(*this);
783 
784     // When building in Objective-C++ ARC mode, create an autorelease pool
785     // around the global initializers.
786     if (getLangOpts().ObjCAutoRefCount && getLangOpts().CPlusPlus) {
787       llvm::Value *token = EmitObjCAutoreleasePoolPush();
788       EmitObjCAutoreleasePoolCleanup(token);
789     }
790 
791     for (unsigned i = 0, e = Decls.size(); i != e; ++i)
792       if (Decls[i])
793         EmitRuntimeCall(Decls[i]);
794 
795     Scope.ForceCleanup();
796 
797     if (ExitBlock) {
798       Builder.CreateBr(ExitBlock);
799       EmitBlock(ExitBlock);
800     }
801   }
802 
803   FinishFunction();
804 }
805 
GenerateCXXGlobalCleanUpFunc(llvm::Function * Fn,ArrayRef<std::tuple<llvm::FunctionType *,llvm::WeakTrackingVH,llvm::Constant * >> DtorsOrStermFinalizers)806 void CodeGenFunction::GenerateCXXGlobalCleanUpFunc(
807     llvm::Function *Fn,
808     ArrayRef<std::tuple<llvm::FunctionType *, llvm::WeakTrackingVH,
809                         llvm::Constant *>>
810         DtorsOrStermFinalizers) {
811   {
812     auto NL = ApplyDebugLocation::CreateEmpty(*this);
813     StartFunction(GlobalDecl(), getContext().VoidTy, Fn,
814                   getTypes().arrangeNullaryFunction(), FunctionArgList());
815     // Emit an artificial location for this function.
816     auto AL = ApplyDebugLocation::CreateArtificial(*this);
817 
818     // Emit the cleanups, in reverse order from construction.
819     for (unsigned i = 0, e = DtorsOrStermFinalizers.size(); i != e; ++i) {
820       llvm::FunctionType *CalleeTy;
821       llvm::Value *Callee;
822       llvm::Constant *Arg;
823       std::tie(CalleeTy, Callee, Arg) = DtorsOrStermFinalizers[e - i - 1];
824 
825       llvm::CallInst *CI = nullptr;
826       if (Arg == nullptr) {
827         assert(
828             CGM.getCXXABI().useSinitAndSterm() &&
829             "Arg could not be nullptr unless using sinit and sterm functions.");
830         CI = Builder.CreateCall(CalleeTy, Callee);
831       } else
832         CI = Builder.CreateCall(CalleeTy, Callee, Arg);
833 
834       // Make sure the call and the callee agree on calling convention.
835       if (llvm::Function *F = dyn_cast<llvm::Function>(Callee))
836         CI->setCallingConv(F->getCallingConv());
837     }
838   }
839 
840   FinishFunction();
841 }
842 
843 /// generateDestroyHelper - Generates a helper function which, when
844 /// invoked, destroys the given object.  The address of the object
845 /// should be in global memory.
generateDestroyHelper(Address addr,QualType type,Destroyer * destroyer,bool useEHCleanupForArray,const VarDecl * VD)846 llvm::Function *CodeGenFunction::generateDestroyHelper(
847     Address addr, QualType type, Destroyer *destroyer,
848     bool useEHCleanupForArray, const VarDecl *VD) {
849   FunctionArgList args;
850   ImplicitParamDecl Dst(getContext(), getContext().VoidPtrTy,
851                         ImplicitParamDecl::Other);
852   args.push_back(&Dst);
853 
854   const CGFunctionInfo &FI =
855     CGM.getTypes().arrangeBuiltinFunctionDeclaration(getContext().VoidTy, args);
856   llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI);
857   llvm::Function *fn = CGM.CreateGlobalInitOrCleanUpFunction(
858       FTy, "__cxx_global_array_dtor", FI, VD->getLocation());
859 
860   CurEHLocation = VD->getBeginLoc();
861 
862   StartFunction(GlobalDecl(VD, DynamicInitKind::GlobalArrayDestructor),
863                 getContext().VoidTy, fn, FI, args);
864   // Emit an artificial location for this function.
865   auto AL = ApplyDebugLocation::CreateArtificial(*this);
866 
867   emitDestroy(addr, type, destroyer, useEHCleanupForArray);
868 
869   FinishFunction();
870 
871   return fn;
872 }
873