1 //===--- CGDeclCXX.cpp - Emit LLVM Code for C++ declarations --------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This contains code dealing with code generation of C++ declarations
10 //
11 //===----------------------------------------------------------------------===//
12
13 #include "CGCXXABI.h"
14 #include "CGHLSLRuntime.h"
15 #include "CGObjCRuntime.h"
16 #include "CGOpenMPRuntime.h"
17 #include "CodeGenFunction.h"
18 #include "TargetInfo.h"
19 #include "clang/AST/Attr.h"
20 #include "clang/Basic/LangOptions.h"
21 #include "llvm/ADT/StringExtras.h"
22 #include "llvm/IR/Intrinsics.h"
23 #include "llvm/IR/MDBuilder.h"
24 #include "llvm/Support/Path.h"
25
26 using namespace clang;
27 using namespace CodeGen;
28
EmitDeclInit(CodeGenFunction & CGF,const VarDecl & D,ConstantAddress DeclPtr)29 static void EmitDeclInit(CodeGenFunction &CGF, const VarDecl &D,
30 ConstantAddress DeclPtr) {
31 assert(
32 (D.hasGlobalStorage() ||
33 (D.hasLocalStorage() && CGF.getContext().getLangOpts().OpenCLCPlusPlus)) &&
34 "VarDecl must have global or local (in the case of OpenCL) storage!");
35 assert(!D.getType()->isReferenceType() &&
36 "Should not call EmitDeclInit on a reference!");
37
38 QualType type = D.getType();
39 LValue lv = CGF.MakeAddrLValue(DeclPtr, type);
40
41 const Expr *Init = D.getInit();
42 switch (CGF.getEvaluationKind(type)) {
43 case TEK_Scalar: {
44 CodeGenModule &CGM = CGF.CGM;
45 if (lv.isObjCStrong())
46 CGM.getObjCRuntime().EmitObjCGlobalAssign(CGF, CGF.EmitScalarExpr(Init),
47 DeclPtr, D.getTLSKind());
48 else if (lv.isObjCWeak())
49 CGM.getObjCRuntime().EmitObjCWeakAssign(CGF, CGF.EmitScalarExpr(Init),
50 DeclPtr);
51 else
52 CGF.EmitScalarInit(Init, &D, lv, false);
53 return;
54 }
55 case TEK_Complex:
56 CGF.EmitComplexExprIntoLValue(Init, lv, /*isInit*/ true);
57 return;
58 case TEK_Aggregate:
59 CGF.EmitAggExpr(Init,
60 AggValueSlot::forLValue(lv, CGF, AggValueSlot::IsDestructed,
61 AggValueSlot::DoesNotNeedGCBarriers,
62 AggValueSlot::IsNotAliased,
63 AggValueSlot::DoesNotOverlap));
64 return;
65 }
66 llvm_unreachable("bad evaluation kind");
67 }
68
69 /// Emit code to cause the destruction of the given variable with
70 /// static storage duration.
EmitDeclDestroy(CodeGenFunction & CGF,const VarDecl & D,ConstantAddress Addr)71 static void EmitDeclDestroy(CodeGenFunction &CGF, const VarDecl &D,
72 ConstantAddress Addr) {
73 // Honor __attribute__((no_destroy)) and bail instead of attempting
74 // to emit a reference to a possibly nonexistent destructor, which
75 // in turn can cause a crash. This will result in a global constructor
76 // that isn't balanced out by a destructor call as intended by the
77 // attribute. This also checks for -fno-c++-static-destructors and
78 // bails even if the attribute is not present.
79 QualType::DestructionKind DtorKind = D.needsDestruction(CGF.getContext());
80
81 // FIXME: __attribute__((cleanup)) ?
82
83 switch (DtorKind) {
84 case QualType::DK_none:
85 return;
86
87 case QualType::DK_cxx_destructor:
88 break;
89
90 case QualType::DK_objc_strong_lifetime:
91 case QualType::DK_objc_weak_lifetime:
92 case QualType::DK_nontrivial_c_struct:
93 // We don't care about releasing objects during process teardown.
94 assert(!D.getTLSKind() && "should have rejected this");
95 return;
96 }
97
98 llvm::FunctionCallee Func;
99 llvm::Constant *Argument;
100
101 CodeGenModule &CGM = CGF.CGM;
102 QualType Type = D.getType();
103
104 // Special-case non-array C++ destructors, if they have the right signature.
105 // Under some ABIs, destructors return this instead of void, and cannot be
106 // passed directly to __cxa_atexit if the target does not allow this
107 // mismatch.
108 const CXXRecordDecl *Record = Type->getAsCXXRecordDecl();
109 bool CanRegisterDestructor =
110 Record && (!CGM.getCXXABI().HasThisReturn(
111 GlobalDecl(Record->getDestructor(), Dtor_Complete)) ||
112 CGM.getCXXABI().canCallMismatchedFunctionType());
113 // If __cxa_atexit is disabled via a flag, a different helper function is
114 // generated elsewhere which uses atexit instead, and it takes the destructor
115 // directly.
116 bool UsingExternalHelper = !CGM.getCodeGenOpts().CXAAtExit;
117 if (Record && (CanRegisterDestructor || UsingExternalHelper)) {
118 assert(!Record->hasTrivialDestructor());
119 CXXDestructorDecl *Dtor = Record->getDestructor();
120
121 Func = CGM.getAddrAndTypeOfCXXStructor(GlobalDecl(Dtor, Dtor_Complete));
122 if (CGF.getContext().getLangOpts().OpenCL) {
123 auto DestAS =
124 CGM.getTargetCodeGenInfo().getAddrSpaceOfCxaAtexitPtrParam();
125 auto DestTy = CGF.getTypes().ConvertType(Type)->getPointerTo(
126 CGM.getContext().getTargetAddressSpace(DestAS));
127 auto SrcAS = D.getType().getQualifiers().getAddressSpace();
128 if (DestAS == SrcAS)
129 Argument = llvm::ConstantExpr::getBitCast(Addr.getPointer(), DestTy);
130 else
131 // FIXME: On addr space mismatch we are passing NULL. The generation
132 // of the global destructor function should be adjusted accordingly.
133 Argument = llvm::ConstantPointerNull::get(DestTy);
134 } else {
135 Argument = llvm::ConstantExpr::getBitCast(
136 Addr.getPointer(), CGF.getTypes().ConvertType(Type)->getPointerTo());
137 }
138 // Otherwise, the standard logic requires a helper function.
139 } else {
140 Addr = Addr.getElementBitCast(CGF.ConvertTypeForMem(Type));
141 Func = CodeGenFunction(CGM)
142 .generateDestroyHelper(Addr, Type, CGF.getDestroyer(DtorKind),
143 CGF.needsEHCleanup(DtorKind), &D);
144 Argument = llvm::Constant::getNullValue(CGF.Int8PtrTy);
145 }
146
147 CGM.getCXXABI().registerGlobalDtor(CGF, D, Func, Argument);
148 }
149
150 /// Emit code to cause the variable at the given address to be considered as
151 /// constant from this point onwards.
EmitDeclInvariant(CodeGenFunction & CGF,const VarDecl & D,llvm::Constant * Addr)152 static void EmitDeclInvariant(CodeGenFunction &CGF, const VarDecl &D,
153 llvm::Constant *Addr) {
154 return CGF.EmitInvariantStart(
155 Addr, CGF.getContext().getTypeSizeInChars(D.getType()));
156 }
157
EmitInvariantStart(llvm::Constant * Addr,CharUnits Size)158 void CodeGenFunction::EmitInvariantStart(llvm::Constant *Addr, CharUnits Size) {
159 // Do not emit the intrinsic if we're not optimizing.
160 if (!CGM.getCodeGenOpts().OptimizationLevel)
161 return;
162
163 // Grab the llvm.invariant.start intrinsic.
164 llvm::Intrinsic::ID InvStartID = llvm::Intrinsic::invariant_start;
165 // Overloaded address space type.
166 llvm::Type *ObjectPtr[1] = {Int8PtrTy};
167 llvm::Function *InvariantStart = CGM.getIntrinsic(InvStartID, ObjectPtr);
168
169 // Emit a call with the size in bytes of the object.
170 uint64_t Width = Size.getQuantity();
171 llvm::Value *Args[2] = { llvm::ConstantInt::getSigned(Int64Ty, Width),
172 llvm::ConstantExpr::getBitCast(Addr, Int8PtrTy)};
173 Builder.CreateCall(InvariantStart, Args);
174 }
175
EmitCXXGlobalVarDeclInit(const VarDecl & D,llvm::GlobalVariable * GV,bool PerformInit)176 void CodeGenFunction::EmitCXXGlobalVarDeclInit(const VarDecl &D,
177 llvm::GlobalVariable *GV,
178 bool PerformInit) {
179
180 const Expr *Init = D.getInit();
181 QualType T = D.getType();
182
183 // The address space of a static local variable (DeclPtr) may be different
184 // from the address space of the "this" argument of the constructor. In that
185 // case, we need an addrspacecast before calling the constructor.
186 //
187 // struct StructWithCtor {
188 // __device__ StructWithCtor() {...}
189 // };
190 // __device__ void foo() {
191 // __shared__ StructWithCtor s;
192 // ...
193 // }
194 //
195 // For example, in the above CUDA code, the static local variable s has a
196 // "shared" address space qualifier, but the constructor of StructWithCtor
197 // expects "this" in the "generic" address space.
198 unsigned ExpectedAddrSpace = getTypes().getTargetAddressSpace(T);
199 unsigned ActualAddrSpace = GV->getAddressSpace();
200 llvm::Constant *DeclPtr = GV;
201 if (ActualAddrSpace != ExpectedAddrSpace) {
202 llvm::PointerType *PTy = llvm::PointerType::getWithSamePointeeType(
203 GV->getType(), ExpectedAddrSpace);
204 DeclPtr = llvm::ConstantExpr::getAddrSpaceCast(DeclPtr, PTy);
205 }
206
207 ConstantAddress DeclAddr(
208 DeclPtr, GV->getValueType(), getContext().getDeclAlign(&D));
209
210 if (!T->isReferenceType()) {
211 if (getLangOpts().OpenMP && !getLangOpts().OpenMPSimd &&
212 D.hasAttr<OMPThreadPrivateDeclAttr>()) {
213 (void)CGM.getOpenMPRuntime().emitThreadPrivateVarDefinition(
214 &D, DeclAddr, D.getAttr<OMPThreadPrivateDeclAttr>()->getLocation(),
215 PerformInit, this);
216 }
217 if (PerformInit)
218 EmitDeclInit(*this, D, DeclAddr);
219 if (CGM.isTypeConstant(D.getType(), true))
220 EmitDeclInvariant(*this, D, DeclPtr);
221 else
222 EmitDeclDestroy(*this, D, DeclAddr);
223 return;
224 }
225
226 assert(PerformInit && "cannot have constant initializer which needs "
227 "destruction for reference");
228 RValue RV = EmitReferenceBindingToExpr(Init);
229 EmitStoreOfScalar(RV.getScalarVal(), DeclAddr, false, T);
230 }
231
232 /// Create a stub function, suitable for being passed to atexit,
233 /// which passes the given address to the given destructor function.
createAtExitStub(const VarDecl & VD,llvm::FunctionCallee dtor,llvm::Constant * addr)234 llvm::Function *CodeGenFunction::createAtExitStub(const VarDecl &VD,
235 llvm::FunctionCallee dtor,
236 llvm::Constant *addr) {
237 // Get the destructor function type, void(*)(void).
238 llvm::FunctionType *ty = llvm::FunctionType::get(CGM.VoidTy, false);
239 SmallString<256> FnName;
240 {
241 llvm::raw_svector_ostream Out(FnName);
242 CGM.getCXXABI().getMangleContext().mangleDynamicAtExitDestructor(&VD, Out);
243 }
244
245 const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
246 llvm::Function *fn = CGM.CreateGlobalInitOrCleanUpFunction(
247 ty, FnName.str(), FI, VD.getLocation());
248
249 CodeGenFunction CGF(CGM);
250
251 CGF.StartFunction(GlobalDecl(&VD, DynamicInitKind::AtExit),
252 CGM.getContext().VoidTy, fn, FI, FunctionArgList(),
253 VD.getLocation(), VD.getInit()->getExprLoc());
254 // Emit an artificial location for this function.
255 auto AL = ApplyDebugLocation::CreateArtificial(CGF);
256
257 llvm::CallInst *call = CGF.Builder.CreateCall(dtor, addr);
258
259 // Make sure the call and the callee agree on calling convention.
260 if (auto *dtorFn = dyn_cast<llvm::Function>(
261 dtor.getCallee()->stripPointerCastsAndAliases()))
262 call->setCallingConv(dtorFn->getCallingConv());
263
264 CGF.FinishFunction();
265
266 return fn;
267 }
268
269 /// Create a stub function, suitable for being passed to __pt_atexit_np,
270 /// which passes the given address to the given destructor function.
createTLSAtExitStub(const VarDecl & D,llvm::FunctionCallee Dtor,llvm::Constant * Addr,llvm::FunctionCallee & AtExit)271 llvm::Function *CodeGenFunction::createTLSAtExitStub(
272 const VarDecl &D, llvm::FunctionCallee Dtor, llvm::Constant *Addr,
273 llvm::FunctionCallee &AtExit) {
274 SmallString<256> FnName;
275 {
276 llvm::raw_svector_ostream Out(FnName);
277 CGM.getCXXABI().getMangleContext().mangleDynamicAtExitDestructor(&D, Out);
278 }
279
280 const CGFunctionInfo &FI = CGM.getTypes().arrangeLLVMFunctionInfo(
281 getContext().IntTy, /*instanceMethod=*/false, /*chainCall=*/false,
282 {getContext().IntTy}, FunctionType::ExtInfo(), {}, RequiredArgs::All);
283
284 // Get the stub function type, int(*)(int,...).
285 llvm::FunctionType *StubTy =
286 llvm::FunctionType::get(CGM.IntTy, {CGM.IntTy}, true);
287
288 llvm::Function *DtorStub = CGM.CreateGlobalInitOrCleanUpFunction(
289 StubTy, FnName.str(), FI, D.getLocation());
290
291 CodeGenFunction CGF(CGM);
292
293 FunctionArgList Args;
294 ImplicitParamDecl IPD(CGM.getContext(), CGM.getContext().IntTy,
295 ImplicitParamDecl::Other);
296 Args.push_back(&IPD);
297 QualType ResTy = CGM.getContext().IntTy;
298
299 CGF.StartFunction(GlobalDecl(&D, DynamicInitKind::AtExit), ResTy, DtorStub,
300 FI, Args, D.getLocation(), D.getInit()->getExprLoc());
301
302 // Emit an artificial location for this function.
303 auto AL = ApplyDebugLocation::CreateArtificial(CGF);
304
305 llvm::CallInst *call = CGF.Builder.CreateCall(Dtor, Addr);
306
307 // Make sure the call and the callee agree on calling convention.
308 if (auto *DtorFn = dyn_cast<llvm::Function>(
309 Dtor.getCallee()->stripPointerCastsAndAliases()))
310 call->setCallingConv(DtorFn->getCallingConv());
311
312 // Return 0 from function
313 CGF.Builder.CreateStore(llvm::Constant::getNullValue(CGM.IntTy),
314 CGF.ReturnValue);
315
316 CGF.FinishFunction();
317
318 return DtorStub;
319 }
320
321 /// Register a global destructor using the C atexit runtime function.
registerGlobalDtorWithAtExit(const VarDecl & VD,llvm::FunctionCallee dtor,llvm::Constant * addr)322 void CodeGenFunction::registerGlobalDtorWithAtExit(const VarDecl &VD,
323 llvm::FunctionCallee dtor,
324 llvm::Constant *addr) {
325 // Create a function which calls the destructor.
326 llvm::Constant *dtorStub = createAtExitStub(VD, dtor, addr);
327 registerGlobalDtorWithAtExit(dtorStub);
328 }
329
registerGlobalDtorWithAtExit(llvm::Constant * dtorStub)330 void CodeGenFunction::registerGlobalDtorWithAtExit(llvm::Constant *dtorStub) {
331 // extern "C" int atexit(void (*f)(void));
332 assert(dtorStub->getType() ==
333 llvm::PointerType::get(
334 llvm::FunctionType::get(CGM.VoidTy, false),
335 dtorStub->getType()->getPointerAddressSpace()) &&
336 "Argument to atexit has a wrong type.");
337
338 llvm::FunctionType *atexitTy =
339 llvm::FunctionType::get(IntTy, dtorStub->getType(), false);
340
341 llvm::FunctionCallee atexit =
342 CGM.CreateRuntimeFunction(atexitTy, "atexit", llvm::AttributeList(),
343 /*Local=*/true);
344 if (llvm::Function *atexitFn = dyn_cast<llvm::Function>(atexit.getCallee()))
345 atexitFn->setDoesNotThrow();
346
347 EmitNounwindRuntimeCall(atexit, dtorStub);
348 }
349
350 llvm::Value *
unregisterGlobalDtorWithUnAtExit(llvm::Constant * dtorStub)351 CodeGenFunction::unregisterGlobalDtorWithUnAtExit(llvm::Constant *dtorStub) {
352 // The unatexit subroutine unregisters __dtor functions that were previously
353 // registered by the atexit subroutine. If the referenced function is found,
354 // it is removed from the list of functions that are called at normal program
355 // termination and the unatexit returns a value of 0, otherwise a non-zero
356 // value is returned.
357 //
358 // extern "C" int unatexit(void (*f)(void));
359 assert(dtorStub->getType() ==
360 llvm::PointerType::get(
361 llvm::FunctionType::get(CGM.VoidTy, false),
362 dtorStub->getType()->getPointerAddressSpace()) &&
363 "Argument to unatexit has a wrong type.");
364
365 llvm::FunctionType *unatexitTy =
366 llvm::FunctionType::get(IntTy, {dtorStub->getType()}, /*isVarArg=*/false);
367
368 llvm::FunctionCallee unatexit =
369 CGM.CreateRuntimeFunction(unatexitTy, "unatexit", llvm::AttributeList());
370
371 cast<llvm::Function>(unatexit.getCallee())->setDoesNotThrow();
372
373 return EmitNounwindRuntimeCall(unatexit, dtorStub);
374 }
375
EmitCXXGuardedInit(const VarDecl & D,llvm::GlobalVariable * DeclPtr,bool PerformInit)376 void CodeGenFunction::EmitCXXGuardedInit(const VarDecl &D,
377 llvm::GlobalVariable *DeclPtr,
378 bool PerformInit) {
379 // If we've been asked to forbid guard variables, emit an error now.
380 // This diagnostic is hard-coded for Darwin's use case; we can find
381 // better phrasing if someone else needs it.
382 if (CGM.getCodeGenOpts().ForbidGuardVariables)
383 CGM.Error(D.getLocation(),
384 "this initialization requires a guard variable, which "
385 "the kernel does not support");
386
387 CGM.getCXXABI().EmitGuardedInit(*this, D, DeclPtr, PerformInit);
388 }
389
EmitCXXGuardedInitBranch(llvm::Value * NeedsInit,llvm::BasicBlock * InitBlock,llvm::BasicBlock * NoInitBlock,GuardKind Kind,const VarDecl * D)390 void CodeGenFunction::EmitCXXGuardedInitBranch(llvm::Value *NeedsInit,
391 llvm::BasicBlock *InitBlock,
392 llvm::BasicBlock *NoInitBlock,
393 GuardKind Kind,
394 const VarDecl *D) {
395 assert((Kind == GuardKind::TlsGuard || D) && "no guarded variable");
396
397 // A guess at how many times we will enter the initialization of a
398 // variable, depending on the kind of variable.
399 static const uint64_t InitsPerTLSVar = 1024;
400 static const uint64_t InitsPerLocalVar = 1024 * 1024;
401
402 llvm::MDNode *Weights;
403 if (Kind == GuardKind::VariableGuard && !D->isLocalVarDecl()) {
404 // For non-local variables, don't apply any weighting for now. Due to our
405 // use of COMDATs, we expect there to be at most one initialization of the
406 // variable per DSO, but we have no way to know how many DSOs will try to
407 // initialize the variable.
408 Weights = nullptr;
409 } else {
410 uint64_t NumInits;
411 // FIXME: For the TLS case, collect and use profiling information to
412 // determine a more accurate brach weight.
413 if (Kind == GuardKind::TlsGuard || D->getTLSKind())
414 NumInits = InitsPerTLSVar;
415 else
416 NumInits = InitsPerLocalVar;
417
418 // The probability of us entering the initializer is
419 // 1 / (total number of times we attempt to initialize the variable).
420 llvm::MDBuilder MDHelper(CGM.getLLVMContext());
421 Weights = MDHelper.createBranchWeights(1, NumInits - 1);
422 }
423
424 Builder.CreateCondBr(NeedsInit, InitBlock, NoInitBlock, Weights);
425 }
426
CreateGlobalInitOrCleanUpFunction(llvm::FunctionType * FTy,const Twine & Name,const CGFunctionInfo & FI,SourceLocation Loc,bool TLS,llvm::GlobalVariable::LinkageTypes Linkage)427 llvm::Function *CodeGenModule::CreateGlobalInitOrCleanUpFunction(
428 llvm::FunctionType *FTy, const Twine &Name, const CGFunctionInfo &FI,
429 SourceLocation Loc, bool TLS, llvm::GlobalVariable::LinkageTypes Linkage) {
430 llvm::Function *Fn = llvm::Function::Create(FTy, Linkage, Name, &getModule());
431
432 if (!getLangOpts().AppleKext && !TLS) {
433 // Set the section if needed.
434 if (const char *Section = getTarget().getStaticInitSectionSpecifier())
435 Fn->setSection(Section);
436 }
437
438 if (Linkage == llvm::GlobalVariable::InternalLinkage)
439 SetInternalFunctionAttributes(GlobalDecl(), Fn, FI);
440
441 Fn->setCallingConv(getRuntimeCC());
442
443 if (!getLangOpts().Exceptions)
444 Fn->setDoesNotThrow();
445
446 if (getLangOpts().Sanitize.has(SanitizerKind::Address) &&
447 !isInNoSanitizeList(SanitizerKind::Address, Fn, Loc))
448 Fn->addFnAttr(llvm::Attribute::SanitizeAddress);
449
450 if (getLangOpts().Sanitize.has(SanitizerKind::KernelAddress) &&
451 !isInNoSanitizeList(SanitizerKind::KernelAddress, Fn, Loc))
452 Fn->addFnAttr(llvm::Attribute::SanitizeAddress);
453
454 if (getLangOpts().Sanitize.has(SanitizerKind::HWAddress) &&
455 !isInNoSanitizeList(SanitizerKind::HWAddress, Fn, Loc))
456 Fn->addFnAttr(llvm::Attribute::SanitizeHWAddress);
457
458 if (getLangOpts().Sanitize.has(SanitizerKind::KernelHWAddress) &&
459 !isInNoSanitizeList(SanitizerKind::KernelHWAddress, Fn, Loc))
460 Fn->addFnAttr(llvm::Attribute::SanitizeHWAddress);
461
462 if (getLangOpts().Sanitize.has(SanitizerKind::MemtagStack) &&
463 !isInNoSanitizeList(SanitizerKind::MemtagStack, Fn, Loc))
464 Fn->addFnAttr(llvm::Attribute::SanitizeMemTag);
465
466 if (getLangOpts().Sanitize.has(SanitizerKind::Thread) &&
467 !isInNoSanitizeList(SanitizerKind::Thread, Fn, Loc))
468 Fn->addFnAttr(llvm::Attribute::SanitizeThread);
469
470 if (getLangOpts().Sanitize.has(SanitizerKind::Memory) &&
471 !isInNoSanitizeList(SanitizerKind::Memory, Fn, Loc))
472 Fn->addFnAttr(llvm::Attribute::SanitizeMemory);
473
474 if (getLangOpts().Sanitize.has(SanitizerKind::KernelMemory) &&
475 !isInNoSanitizeList(SanitizerKind::KernelMemory, Fn, Loc))
476 Fn->addFnAttr(llvm::Attribute::SanitizeMemory);
477
478 if (getLangOpts().Sanitize.has(SanitizerKind::SafeStack) &&
479 !isInNoSanitizeList(SanitizerKind::SafeStack, Fn, Loc))
480 Fn->addFnAttr(llvm::Attribute::SafeStack);
481
482 if (getLangOpts().Sanitize.has(SanitizerKind::ShadowCallStack) &&
483 !isInNoSanitizeList(SanitizerKind::ShadowCallStack, Fn, Loc))
484 Fn->addFnAttr(llvm::Attribute::ShadowCallStack);
485
486 return Fn;
487 }
488
489 /// Create a global pointer to a function that will initialize a global
490 /// variable. The user has requested that this pointer be emitted in a specific
491 /// section.
EmitPointerToInitFunc(const VarDecl * D,llvm::GlobalVariable * GV,llvm::Function * InitFunc,InitSegAttr * ISA)492 void CodeGenModule::EmitPointerToInitFunc(const VarDecl *D,
493 llvm::GlobalVariable *GV,
494 llvm::Function *InitFunc,
495 InitSegAttr *ISA) {
496 llvm::GlobalVariable *PtrArray = new llvm::GlobalVariable(
497 TheModule, InitFunc->getType(), /*isConstant=*/true,
498 llvm::GlobalValue::PrivateLinkage, InitFunc, "__cxx_init_fn_ptr");
499 PtrArray->setSection(ISA->getSection());
500 addUsedGlobal(PtrArray);
501
502 // If the GV is already in a comdat group, then we have to join it.
503 if (llvm::Comdat *C = GV->getComdat())
504 PtrArray->setComdat(C);
505 }
506
507 void
EmitCXXGlobalVarDeclInitFunc(const VarDecl * D,llvm::GlobalVariable * Addr,bool PerformInit)508 CodeGenModule::EmitCXXGlobalVarDeclInitFunc(const VarDecl *D,
509 llvm::GlobalVariable *Addr,
510 bool PerformInit) {
511
512 // According to E.2.3.1 in CUDA-7.5 Programming guide: __device__,
513 // __constant__ and __shared__ variables defined in namespace scope,
514 // that are of class type, cannot have a non-empty constructor. All
515 // the checks have been done in Sema by now. Whatever initializers
516 // are allowed are empty and we just need to ignore them here.
517 if (getLangOpts().CUDAIsDevice && !getLangOpts().GPUAllowDeviceInit &&
518 (D->hasAttr<CUDADeviceAttr>() || D->hasAttr<CUDAConstantAttr>() ||
519 D->hasAttr<CUDASharedAttr>()))
520 return;
521
522 if (getLangOpts().OpenMP &&
523 getOpenMPRuntime().emitDeclareTargetVarDefinition(D, Addr, PerformInit))
524 return;
525
526 // Check if we've already initialized this decl.
527 auto I = DelayedCXXInitPosition.find(D);
528 if (I != DelayedCXXInitPosition.end() && I->second == ~0U)
529 return;
530
531 llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false);
532 SmallString<256> FnName;
533 {
534 llvm::raw_svector_ostream Out(FnName);
535 getCXXABI().getMangleContext().mangleDynamicInitializer(D, Out);
536 }
537
538 // Create a variable initialization function.
539 llvm::Function *Fn = CreateGlobalInitOrCleanUpFunction(
540 FTy, FnName.str(), getTypes().arrangeNullaryFunction(), D->getLocation());
541
542 auto *ISA = D->getAttr<InitSegAttr>();
543 CodeGenFunction(*this).GenerateCXXGlobalVarDeclInitFunc(Fn, D, Addr,
544 PerformInit);
545
546 llvm::GlobalVariable *COMDATKey =
547 supportsCOMDAT() && D->isExternallyVisible() ? Addr : nullptr;
548
549 if (D->getTLSKind()) {
550 // FIXME: Should we support init_priority for thread_local?
551 // FIXME: We only need to register one __cxa_thread_atexit function for the
552 // entire TU.
553 CXXThreadLocalInits.push_back(Fn);
554 CXXThreadLocalInitVars.push_back(D);
555 } else if (PerformInit && ISA) {
556 // Contract with backend that "init_seg(compiler)" corresponds to priority
557 // 200 and "init_seg(lib)" corresponds to priority 400.
558 int Priority = -1;
559 if (ISA->getSection() == ".CRT$XCC")
560 Priority = 200;
561 else if (ISA->getSection() == ".CRT$XCL")
562 Priority = 400;
563
564 if (Priority != -1)
565 AddGlobalCtor(Fn, Priority, ~0U, COMDATKey);
566 else
567 EmitPointerToInitFunc(D, Addr, Fn, ISA);
568 } else if (auto *IPA = D->getAttr<InitPriorityAttr>()) {
569 OrderGlobalInitsOrStermFinalizers Key(IPA->getPriority(),
570 PrioritizedCXXGlobalInits.size());
571 PrioritizedCXXGlobalInits.push_back(std::make_pair(Key, Fn));
572 } else if (isTemplateInstantiation(D->getTemplateSpecializationKind()) ||
573 getContext().GetGVALinkageForVariable(D) == GVA_DiscardableODR ||
574 D->hasAttr<SelectAnyAttr>()) {
575 // C++ [basic.start.init]p2:
576 // Definitions of explicitly specialized class template static data
577 // members have ordered initialization. Other class template static data
578 // members (i.e., implicitly or explicitly instantiated specializations)
579 // have unordered initialization.
580 //
581 // As a consequence, we can put them into their own llvm.global_ctors entry.
582 //
583 // If the global is externally visible, put the initializer into a COMDAT
584 // group with the global being initialized. On most platforms, this is a
585 // minor startup time optimization. In the MS C++ ABI, there are no guard
586 // variables, so this COMDAT key is required for correctness.
587 //
588 // SelectAny globals will be comdat-folded. Put the initializer into a
589 // COMDAT group associated with the global, so the initializers get folded
590 // too.
591 I = DelayedCXXInitPosition.find(D);
592 // CXXGlobalInits.size() is the lex order number for the next deferred
593 // VarDecl. Use it when the current VarDecl is non-deferred. Although this
594 // lex order number is shared between current VarDecl and some following
595 // VarDecls, their order of insertion into `llvm.global_ctors` is the same
596 // as the lexing order and the following stable sort would preserve such
597 // order.
598 unsigned LexOrder =
599 I == DelayedCXXInitPosition.end() ? CXXGlobalInits.size() : I->second;
600 AddGlobalCtor(Fn, 65535, LexOrder, COMDATKey);
601 if (COMDATKey && (getTriple().isOSBinFormatELF() ||
602 getTarget().getCXXABI().isMicrosoft())) {
603 // When COMDAT is used on ELF or in the MS C++ ABI, the key must be in
604 // llvm.used to prevent linker GC.
605 addUsedGlobal(COMDATKey);
606 }
607
608 // If we used a COMDAT key for the global ctor, the init function can be
609 // discarded if the global ctor entry is discarded.
610 // FIXME: Do we need to restrict this to ELF and Wasm?
611 llvm::Comdat *C = Addr->getComdat();
612 if (COMDATKey && C &&
613 (getTarget().getTriple().isOSBinFormatELF() ||
614 getTarget().getTriple().isOSBinFormatWasm())) {
615 Fn->setComdat(C);
616 }
617 } else {
618 I = DelayedCXXInitPosition.find(D); // Re-do lookup in case of re-hash.
619 if (I == DelayedCXXInitPosition.end()) {
620 CXXGlobalInits.push_back(Fn);
621 } else if (I->second != ~0U) {
622 assert(I->second < CXXGlobalInits.size() &&
623 CXXGlobalInits[I->second] == nullptr);
624 CXXGlobalInits[I->second] = Fn;
625 }
626 }
627
628 // Remember that we already emitted the initializer for this global.
629 DelayedCXXInitPosition[D] = ~0U;
630 }
631
EmitCXXThreadLocalInitFunc()632 void CodeGenModule::EmitCXXThreadLocalInitFunc() {
633 getCXXABI().EmitThreadLocalInitFuncs(
634 *this, CXXThreadLocals, CXXThreadLocalInits, CXXThreadLocalInitVars);
635
636 CXXThreadLocalInits.clear();
637 CXXThreadLocalInitVars.clear();
638 CXXThreadLocals.clear();
639 }
640
641 /* Build the initializer for a C++20 module:
642 This is arranged to be run only once regardless of how many times the module
643 might be included transitively. This arranged by using a guard variable.
644
645 If there are no initalizers at all (and also no imported modules) we reduce
646 this to an empty function (since the Itanium ABI requires that this function
647 be available to a caller, which might be produced by a different
648 implementation).
649
650 First we call any initializers for imported modules.
651 We then call initializers for the Global Module Fragment (if present)
652 We then call initializers for the current module.
653 We then call initializers for the Private Module Fragment (if present)
654 */
655
EmitCXXModuleInitFunc(Module * Primary)656 void CodeGenModule::EmitCXXModuleInitFunc(Module *Primary) {
657 while (!CXXGlobalInits.empty() && !CXXGlobalInits.back())
658 CXXGlobalInits.pop_back();
659
660 // As noted above, we create the function, even if it is empty.
661 // Module initializers for imported modules are emitted first.
662
663 // Collect all the modules that we import
664 SmallVector<Module *> AllImports;
665 // Ones that we export
666 for (auto I : Primary->Exports)
667 AllImports.push_back(I.getPointer());
668 // Ones that we only import.
669 for (Module *M : Primary->Imports)
670 AllImports.push_back(M);
671
672 SmallVector<llvm::Function *, 8> ModuleInits;
673 for (Module *M : AllImports) {
674 // No Itanium initializer in header like modules.
675 if (M->isHeaderLikeModule())
676 continue; // TODO: warn of mixed use of module map modules and C++20?
677 llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false);
678 SmallString<256> FnName;
679 {
680 llvm::raw_svector_ostream Out(FnName);
681 cast<ItaniumMangleContext>(getCXXABI().getMangleContext())
682 .mangleModuleInitializer(M, Out);
683 }
684 assert(!GetGlobalValue(FnName.str()) &&
685 "We should only have one use of the initializer call");
686 llvm::Function *Fn = llvm::Function::Create(
687 FTy, llvm::Function::ExternalLinkage, FnName.str(), &getModule());
688 ModuleInits.push_back(Fn);
689 }
690
691 // Add any initializers with specified priority; this uses the same approach
692 // as EmitCXXGlobalInitFunc().
693 if (!PrioritizedCXXGlobalInits.empty()) {
694 SmallVector<llvm::Function *, 8> LocalCXXGlobalInits;
695 llvm::array_pod_sort(PrioritizedCXXGlobalInits.begin(),
696 PrioritizedCXXGlobalInits.end());
697 for (SmallVectorImpl<GlobalInitData>::iterator
698 I = PrioritizedCXXGlobalInits.begin(),
699 E = PrioritizedCXXGlobalInits.end();
700 I != E;) {
701 SmallVectorImpl<GlobalInitData>::iterator PrioE =
702 std::upper_bound(I + 1, E, *I, GlobalInitPriorityCmp());
703
704 for (; I < PrioE; ++I)
705 ModuleInits.push_back(I->second);
706 }
707 }
708
709 // Now append the ones without specified priority.
710 for (auto *F : CXXGlobalInits)
711 ModuleInits.push_back(F);
712
713 llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false);
714 const CGFunctionInfo &FI = getTypes().arrangeNullaryFunction();
715
716 // We now build the initializer for this module, which has a mangled name
717 // as per the Itanium ABI . The action of the initializer is guarded so that
718 // each init is run just once (even though a module might be imported
719 // multiple times via nested use).
720 llvm::Function *Fn;
721 {
722 SmallString<256> InitFnName;
723 llvm::raw_svector_ostream Out(InitFnName);
724 cast<ItaniumMangleContext>(getCXXABI().getMangleContext())
725 .mangleModuleInitializer(Primary, Out);
726 Fn = CreateGlobalInitOrCleanUpFunction(
727 FTy, llvm::Twine(InitFnName), FI, SourceLocation(), false,
728 llvm::GlobalVariable::ExternalLinkage);
729
730 // If we have a completely empty initializer then we do not want to create
731 // the guard variable.
732 ConstantAddress GuardAddr = ConstantAddress::invalid();
733 if (!AllImports.empty() || !PrioritizedCXXGlobalInits.empty() ||
734 !CXXGlobalInits.empty()) {
735 // Create the guard var.
736 llvm::GlobalVariable *Guard = new llvm::GlobalVariable(
737 getModule(), Int8Ty, /*isConstant=*/false,
738 llvm::GlobalVariable::InternalLinkage,
739 llvm::ConstantInt::get(Int8Ty, 0), InitFnName.str() + "__in_chrg");
740 CharUnits GuardAlign = CharUnits::One();
741 Guard->setAlignment(GuardAlign.getAsAlign());
742 GuardAddr = ConstantAddress(Guard, Int8Ty, GuardAlign);
743 }
744 CodeGenFunction(*this).GenerateCXXGlobalInitFunc(Fn, ModuleInits,
745 GuardAddr);
746 }
747
748 // We allow for the case that a module object is added to a linked binary
749 // without a specific call to the the initializer. This also ensures that
750 // implementation partition initializers are called when the partition
751 // is not imported as an interface.
752 AddGlobalCtor(Fn);
753
754 // See the comment in EmitCXXGlobalInitFunc about OpenCL global init
755 // functions.
756 if (getLangOpts().OpenCL) {
757 GenKernelArgMetadata(Fn);
758 Fn->setCallingConv(llvm::CallingConv::SPIR_KERNEL);
759 }
760
761 assert(!getLangOpts().CUDA || !getLangOpts().CUDAIsDevice ||
762 getLangOpts().GPUAllowDeviceInit);
763 if (getLangOpts().HIP && getLangOpts().CUDAIsDevice) {
764 Fn->setCallingConv(llvm::CallingConv::AMDGPU_KERNEL);
765 Fn->addFnAttr("device-init");
766 }
767
768 // We are done with the inits.
769 AllImports.clear();
770 PrioritizedCXXGlobalInits.clear();
771 CXXGlobalInits.clear();
772 ModuleInits.clear();
773 }
774
getTransformedFileName(llvm::Module & M)775 static SmallString<128> getTransformedFileName(llvm::Module &M) {
776 SmallString<128> FileName = llvm::sys::path::filename(M.getName());
777
778 if (FileName.empty())
779 FileName = "<null>";
780
781 for (size_t i = 0; i < FileName.size(); ++i) {
782 // Replace everything that's not [a-zA-Z0-9._] with a _. This set happens
783 // to be the set of C preprocessing numbers.
784 if (!isPreprocessingNumberBody(FileName[i]))
785 FileName[i] = '_';
786 }
787
788 return FileName;
789 }
790
getPrioritySuffix(unsigned int Priority)791 static std::string getPrioritySuffix(unsigned int Priority) {
792 assert(Priority <= 65535 && "Priority should always be <= 65535.");
793
794 // Compute the function suffix from priority. Prepend with zeroes to make
795 // sure the function names are also ordered as priorities.
796 std::string PrioritySuffix = llvm::utostr(Priority);
797 PrioritySuffix = std::string(6 - PrioritySuffix.size(), '0') + PrioritySuffix;
798
799 return PrioritySuffix;
800 }
801
802 void
EmitCXXGlobalInitFunc()803 CodeGenModule::EmitCXXGlobalInitFunc() {
804 while (!CXXGlobalInits.empty() && !CXXGlobalInits.back())
805 CXXGlobalInits.pop_back();
806
807 // When we import C++20 modules, we must run their initializers first.
808 SmallVector<llvm::Function *, 8> ModuleInits;
809 if (CXX20ModuleInits)
810 for (Module *M : ImportedModules) {
811 // No Itanium initializer in header like modules.
812 if (M->isHeaderLikeModule())
813 continue;
814 llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false);
815 SmallString<256> FnName;
816 {
817 llvm::raw_svector_ostream Out(FnName);
818 cast<ItaniumMangleContext>(getCXXABI().getMangleContext())
819 .mangleModuleInitializer(M, Out);
820 }
821 assert(!GetGlobalValue(FnName.str()) &&
822 "We should only have one use of the initializer call");
823 llvm::Function *Fn = llvm::Function::Create(
824 FTy, llvm::Function::ExternalLinkage, FnName.str(), &getModule());
825 ModuleInits.push_back(Fn);
826 }
827
828 if (ModuleInits.empty() && CXXGlobalInits.empty() &&
829 PrioritizedCXXGlobalInits.empty())
830 return;
831
832 llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false);
833 const CGFunctionInfo &FI = getTypes().arrangeNullaryFunction();
834
835 // Create our global prioritized initialization function.
836 if (!PrioritizedCXXGlobalInits.empty()) {
837 SmallVector<llvm::Function *, 8> LocalCXXGlobalInits;
838 llvm::array_pod_sort(PrioritizedCXXGlobalInits.begin(),
839 PrioritizedCXXGlobalInits.end());
840 // Iterate over "chunks" of ctors with same priority and emit each chunk
841 // into separate function. Note - everything is sorted first by priority,
842 // second - by lex order, so we emit ctor functions in proper order.
843 for (SmallVectorImpl<GlobalInitData >::iterator
844 I = PrioritizedCXXGlobalInits.begin(),
845 E = PrioritizedCXXGlobalInits.end(); I != E; ) {
846 SmallVectorImpl<GlobalInitData >::iterator
847 PrioE = std::upper_bound(I + 1, E, *I, GlobalInitPriorityCmp());
848
849 LocalCXXGlobalInits.clear();
850
851 unsigned int Priority = I->first.priority;
852 llvm::Function *Fn = CreateGlobalInitOrCleanUpFunction(
853 FTy, "_GLOBAL__I_" + getPrioritySuffix(Priority), FI);
854
855 // Prepend the module inits to the highest priority set.
856 if (!ModuleInits.empty()) {
857 for (auto *F : ModuleInits)
858 LocalCXXGlobalInits.push_back(F);
859 ModuleInits.clear();
860 }
861
862 for (; I < PrioE; ++I)
863 LocalCXXGlobalInits.push_back(I->second);
864
865 CodeGenFunction(*this).GenerateCXXGlobalInitFunc(Fn, LocalCXXGlobalInits);
866 AddGlobalCtor(Fn, Priority);
867 }
868 PrioritizedCXXGlobalInits.clear();
869 }
870
871 if (getCXXABI().useSinitAndSterm() && ModuleInits.empty() &&
872 CXXGlobalInits.empty())
873 return;
874
875 for (auto *F : CXXGlobalInits)
876 ModuleInits.push_back(F);
877 CXXGlobalInits.clear();
878
879 // Include the filename in the symbol name. Including "sub_" matches gcc
880 // and makes sure these symbols appear lexicographically behind the symbols
881 // with priority emitted above.
882 llvm::Function *Fn;
883 if (CXX20ModuleInits && getContext().getModuleForCodeGen()) {
884 SmallString<256> InitFnName;
885 llvm::raw_svector_ostream Out(InitFnName);
886 cast<ItaniumMangleContext>(getCXXABI().getMangleContext())
887 .mangleModuleInitializer(getContext().getModuleForCodeGen(), Out);
888 Fn = CreateGlobalInitOrCleanUpFunction(
889 FTy, llvm::Twine(InitFnName), FI, SourceLocation(), false,
890 llvm::GlobalVariable::ExternalLinkage);
891 } else
892 Fn = CreateGlobalInitOrCleanUpFunction(
893 FTy,
894 llvm::Twine("_GLOBAL__sub_I_", getTransformedFileName(getModule())),
895 FI);
896
897 CodeGenFunction(*this).GenerateCXXGlobalInitFunc(Fn, ModuleInits);
898 AddGlobalCtor(Fn);
899
900 // In OpenCL global init functions must be converted to kernels in order to
901 // be able to launch them from the host.
902 // FIXME: Some more work might be needed to handle destructors correctly.
903 // Current initialization function makes use of function pointers callbacks.
904 // We can't support function pointers especially between host and device.
905 // However it seems global destruction has little meaning without any
906 // dynamic resource allocation on the device and program scope variables are
907 // destroyed by the runtime when program is released.
908 if (getLangOpts().OpenCL) {
909 GenKernelArgMetadata(Fn);
910 Fn->setCallingConv(llvm::CallingConv::SPIR_KERNEL);
911 }
912
913 assert(!getLangOpts().CUDA || !getLangOpts().CUDAIsDevice ||
914 getLangOpts().GPUAllowDeviceInit);
915 if (getLangOpts().HIP && getLangOpts().CUDAIsDevice) {
916 Fn->setCallingConv(llvm::CallingConv::AMDGPU_KERNEL);
917 Fn->addFnAttr("device-init");
918 }
919
920 ModuleInits.clear();
921 }
922
EmitCXXGlobalCleanUpFunc()923 void CodeGenModule::EmitCXXGlobalCleanUpFunc() {
924 if (CXXGlobalDtorsOrStermFinalizers.empty() &&
925 PrioritizedCXXStermFinalizers.empty())
926 return;
927
928 llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false);
929 const CGFunctionInfo &FI = getTypes().arrangeNullaryFunction();
930
931 // Create our global prioritized cleanup function.
932 if (!PrioritizedCXXStermFinalizers.empty()) {
933 SmallVector<CXXGlobalDtorsOrStermFinalizer_t, 8> LocalCXXStermFinalizers;
934 llvm::array_pod_sort(PrioritizedCXXStermFinalizers.begin(),
935 PrioritizedCXXStermFinalizers.end());
936 // Iterate over "chunks" of dtors with same priority and emit each chunk
937 // into separate function. Note - everything is sorted first by priority,
938 // second - by lex order, so we emit dtor functions in proper order.
939 for (SmallVectorImpl<StermFinalizerData>::iterator
940 I = PrioritizedCXXStermFinalizers.begin(),
941 E = PrioritizedCXXStermFinalizers.end();
942 I != E;) {
943 SmallVectorImpl<StermFinalizerData>::iterator PrioE =
944 std::upper_bound(I + 1, E, *I, StermFinalizerPriorityCmp());
945
946 LocalCXXStermFinalizers.clear();
947
948 unsigned int Priority = I->first.priority;
949 llvm::Function *Fn = CreateGlobalInitOrCleanUpFunction(
950 FTy, "_GLOBAL__a_" + getPrioritySuffix(Priority), FI);
951
952 for (; I < PrioE; ++I) {
953 llvm::FunctionCallee DtorFn = I->second;
954 LocalCXXStermFinalizers.emplace_back(DtorFn.getFunctionType(),
955 DtorFn.getCallee(), nullptr);
956 }
957
958 CodeGenFunction(*this).GenerateCXXGlobalCleanUpFunc(
959 Fn, LocalCXXStermFinalizers);
960 AddGlobalDtor(Fn, Priority);
961 }
962 PrioritizedCXXStermFinalizers.clear();
963 }
964
965 if (CXXGlobalDtorsOrStermFinalizers.empty())
966 return;
967
968 // Create our global cleanup function.
969 llvm::Function *Fn =
970 CreateGlobalInitOrCleanUpFunction(FTy, "_GLOBAL__D_a", FI);
971
972 CodeGenFunction(*this).GenerateCXXGlobalCleanUpFunc(
973 Fn, CXXGlobalDtorsOrStermFinalizers);
974 AddGlobalDtor(Fn);
975 CXXGlobalDtorsOrStermFinalizers.clear();
976 }
977
978 /// Emit the code necessary to initialize the given global variable.
GenerateCXXGlobalVarDeclInitFunc(llvm::Function * Fn,const VarDecl * D,llvm::GlobalVariable * Addr,bool PerformInit)979 void CodeGenFunction::GenerateCXXGlobalVarDeclInitFunc(llvm::Function *Fn,
980 const VarDecl *D,
981 llvm::GlobalVariable *Addr,
982 bool PerformInit) {
983 // Check if we need to emit debug info for variable initializer.
984 if (D->hasAttr<NoDebugAttr>())
985 DebugInfo = nullptr; // disable debug info indefinitely for this function
986
987 CurEHLocation = D->getBeginLoc();
988
989 StartFunction(GlobalDecl(D, DynamicInitKind::Initializer),
990 getContext().VoidTy, Fn, getTypes().arrangeNullaryFunction(),
991 FunctionArgList());
992 // Emit an artificial location for this function.
993 auto AL = ApplyDebugLocation::CreateArtificial(*this);
994
995 // Use guarded initialization if the global variable is weak. This
996 // occurs for, e.g., instantiated static data members and
997 // definitions explicitly marked weak.
998 //
999 // Also use guarded initialization for a variable with dynamic TLS and
1000 // unordered initialization. (If the initialization is ordered, the ABI
1001 // layer will guard the whole-TU initialization for us.)
1002 if (Addr->hasWeakLinkage() || Addr->hasLinkOnceLinkage() ||
1003 (D->getTLSKind() == VarDecl::TLS_Dynamic &&
1004 isTemplateInstantiation(D->getTemplateSpecializationKind()))) {
1005 EmitCXXGuardedInit(*D, Addr, PerformInit);
1006 } else {
1007 EmitCXXGlobalVarDeclInit(*D, Addr, PerformInit);
1008 }
1009
1010 if (getLangOpts().HLSL)
1011 CGM.getHLSLRuntime().annotateHLSLResource(D, Addr);
1012
1013 FinishFunction();
1014 }
1015
1016 void
GenerateCXXGlobalInitFunc(llvm::Function * Fn,ArrayRef<llvm::Function * > Decls,ConstantAddress Guard)1017 CodeGenFunction::GenerateCXXGlobalInitFunc(llvm::Function *Fn,
1018 ArrayRef<llvm::Function *> Decls,
1019 ConstantAddress Guard) {
1020 {
1021 auto NL = ApplyDebugLocation::CreateEmpty(*this);
1022 StartFunction(GlobalDecl(), getContext().VoidTy, Fn,
1023 getTypes().arrangeNullaryFunction(), FunctionArgList());
1024 // Emit an artificial location for this function.
1025 auto AL = ApplyDebugLocation::CreateArtificial(*this);
1026
1027 llvm::BasicBlock *ExitBlock = nullptr;
1028 if (Guard.isValid()) {
1029 // If we have a guard variable, check whether we've already performed
1030 // these initializations. This happens for TLS initialization functions.
1031 llvm::Value *GuardVal = Builder.CreateLoad(Guard);
1032 llvm::Value *Uninit = Builder.CreateIsNull(GuardVal,
1033 "guard.uninitialized");
1034 llvm::BasicBlock *InitBlock = createBasicBlock("init");
1035 ExitBlock = createBasicBlock("exit");
1036 EmitCXXGuardedInitBranch(Uninit, InitBlock, ExitBlock,
1037 GuardKind::TlsGuard, nullptr);
1038 EmitBlock(InitBlock);
1039 // Mark as initialized before initializing anything else. If the
1040 // initializers use previously-initialized thread_local vars, that's
1041 // probably supposed to be OK, but the standard doesn't say.
1042 Builder.CreateStore(llvm::ConstantInt::get(GuardVal->getType(),1), Guard);
1043
1044 // The guard variable can't ever change again.
1045 EmitInvariantStart(
1046 Guard.getPointer(),
1047 CharUnits::fromQuantity(
1048 CGM.getDataLayout().getTypeAllocSize(GuardVal->getType())));
1049 }
1050
1051 RunCleanupsScope Scope(*this);
1052
1053 // When building in Objective-C++ ARC mode, create an autorelease pool
1054 // around the global initializers.
1055 if (getLangOpts().ObjCAutoRefCount && getLangOpts().CPlusPlus) {
1056 llvm::Value *token = EmitObjCAutoreleasePoolPush();
1057 EmitObjCAutoreleasePoolCleanup(token);
1058 }
1059
1060 for (unsigned i = 0, e = Decls.size(); i != e; ++i)
1061 if (Decls[i])
1062 EmitRuntimeCall(Decls[i]);
1063
1064 Scope.ForceCleanup();
1065
1066 if (ExitBlock) {
1067 Builder.CreateBr(ExitBlock);
1068 EmitBlock(ExitBlock);
1069 }
1070 }
1071
1072 FinishFunction();
1073 }
1074
GenerateCXXGlobalCleanUpFunc(llvm::Function * Fn,ArrayRef<std::tuple<llvm::FunctionType *,llvm::WeakTrackingVH,llvm::Constant * >> DtorsOrStermFinalizers)1075 void CodeGenFunction::GenerateCXXGlobalCleanUpFunc(
1076 llvm::Function *Fn,
1077 ArrayRef<std::tuple<llvm::FunctionType *, llvm::WeakTrackingVH,
1078 llvm::Constant *>>
1079 DtorsOrStermFinalizers) {
1080 {
1081 auto NL = ApplyDebugLocation::CreateEmpty(*this);
1082 StartFunction(GlobalDecl(), getContext().VoidTy, Fn,
1083 getTypes().arrangeNullaryFunction(), FunctionArgList());
1084 // Emit an artificial location for this function.
1085 auto AL = ApplyDebugLocation::CreateArtificial(*this);
1086
1087 // Emit the cleanups, in reverse order from construction.
1088 for (unsigned i = 0, e = DtorsOrStermFinalizers.size(); i != e; ++i) {
1089 llvm::FunctionType *CalleeTy;
1090 llvm::Value *Callee;
1091 llvm::Constant *Arg;
1092 std::tie(CalleeTy, Callee, Arg) = DtorsOrStermFinalizers[e - i - 1];
1093
1094 llvm::CallInst *CI = nullptr;
1095 if (Arg == nullptr) {
1096 assert(
1097 CGM.getCXXABI().useSinitAndSterm() &&
1098 "Arg could not be nullptr unless using sinit and sterm functions.");
1099 CI = Builder.CreateCall(CalleeTy, Callee);
1100 } else
1101 CI = Builder.CreateCall(CalleeTy, Callee, Arg);
1102
1103 // Make sure the call and the callee agree on calling convention.
1104 if (llvm::Function *F = dyn_cast<llvm::Function>(Callee))
1105 CI->setCallingConv(F->getCallingConv());
1106 }
1107 }
1108
1109 FinishFunction();
1110 }
1111
1112 /// generateDestroyHelper - Generates a helper function which, when
1113 /// invoked, destroys the given object. The address of the object
1114 /// should be in global memory.
generateDestroyHelper(Address addr,QualType type,Destroyer * destroyer,bool useEHCleanupForArray,const VarDecl * VD)1115 llvm::Function *CodeGenFunction::generateDestroyHelper(
1116 Address addr, QualType type, Destroyer *destroyer,
1117 bool useEHCleanupForArray, const VarDecl *VD) {
1118 FunctionArgList args;
1119 ImplicitParamDecl Dst(getContext(), getContext().VoidPtrTy,
1120 ImplicitParamDecl::Other);
1121 args.push_back(&Dst);
1122
1123 const CGFunctionInfo &FI =
1124 CGM.getTypes().arrangeBuiltinFunctionDeclaration(getContext().VoidTy, args);
1125 llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI);
1126 llvm::Function *fn = CGM.CreateGlobalInitOrCleanUpFunction(
1127 FTy, "__cxx_global_array_dtor", FI, VD->getLocation());
1128
1129 CurEHLocation = VD->getBeginLoc();
1130
1131 StartFunction(GlobalDecl(VD, DynamicInitKind::GlobalArrayDestructor),
1132 getContext().VoidTy, fn, FI, args);
1133 // Emit an artificial location for this function.
1134 auto AL = ApplyDebugLocation::CreateArtificial(*this);
1135
1136 emitDestroy(addr, type, destroyer, useEHCleanupForArray);
1137
1138 FinishFunction();
1139
1140 return fn;
1141 }
1142