1 //===--- CodeGenModule.cpp - Emit LLVM Code from ASTs for a Module --------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This coordinates the per-module state used while generating code.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #include "CodeGenModule.h"
14 #include "CGBlocks.h"
15 #include "CGCUDARuntime.h"
16 #include "CGCXXABI.h"
17 #include "CGCall.h"
18 #include "CGDebugInfo.h"
19 #include "CGObjCRuntime.h"
20 #include "CGOpenCLRuntime.h"
21 #include "CGOpenMPRuntime.h"
22 #include "CGOpenMPRuntimeAMDGCN.h"
23 #include "CGOpenMPRuntimeNVPTX.h"
24 #include "CodeGenFunction.h"
25 #include "CodeGenPGO.h"
26 #include "ConstantEmitter.h"
27 #include "CoverageMappingGen.h"
28 #include "TargetInfo.h"
29 #include "clang/AST/ASTContext.h"
30 #include "clang/AST/CharUnits.h"
31 #include "clang/AST/DeclCXX.h"
32 #include "clang/AST/DeclObjC.h"
33 #include "clang/AST/DeclTemplate.h"
34 #include "clang/AST/Mangle.h"
35 #include "clang/AST/RecordLayout.h"
36 #include "clang/AST/RecursiveASTVisitor.h"
37 #include "clang/AST/StmtVisitor.h"
38 #include "clang/Basic/Builtins.h"
39 #include "clang/Basic/CharInfo.h"
40 #include "clang/Basic/CodeGenOptions.h"
41 #include "clang/Basic/Diagnostic.h"
42 #include "clang/Basic/FileManager.h"
43 #include "clang/Basic/Module.h"
44 #include "clang/Basic/SourceManager.h"
45 #include "clang/Basic/TargetInfo.h"
46 #include "clang/Basic/Version.h"
47 #include "clang/CodeGen/ConstantInitBuilder.h"
48 #include "clang/Frontend/FrontendDiagnostic.h"
49 #include "llvm/ADT/StringSwitch.h"
50 #include "llvm/ADT/Triple.h"
51 #include "llvm/Analysis/TargetLibraryInfo.h"
52 #include "llvm/Frontend/OpenMP/OMPIRBuilder.h"
53 #include "llvm/IR/CallingConv.h"
54 #include "llvm/IR/DataLayout.h"
55 #include "llvm/IR/Intrinsics.h"
56 #include "llvm/IR/LLVMContext.h"
57 #include "llvm/IR/Module.h"
58 #include "llvm/IR/ProfileSummary.h"
59 #include "llvm/ProfileData/InstrProfReader.h"
60 #include "llvm/Support/CodeGen.h"
61 #include "llvm/Support/CommandLine.h"
62 #include "llvm/Support/ConvertUTF.h"
63 #include "llvm/Support/ErrorHandling.h"
64 #include "llvm/Support/MD5.h"
65 #include "llvm/Support/TimeProfiler.h"
66
67 using namespace clang;
68 using namespace CodeGen;
69
70 static llvm::cl::opt<bool> LimitedCoverage(
71 "limited-coverage-experimental", llvm::cl::ZeroOrMore, llvm::cl::Hidden,
72 llvm::cl::desc("Emit limited coverage mapping information (experimental)"),
73 llvm::cl::init(false));
74
75 static const char AnnotationSection[] = "llvm.metadata";
76
createCXXABI(CodeGenModule & CGM)77 static CGCXXABI *createCXXABI(CodeGenModule &CGM) {
78 switch (CGM.getTarget().getCXXABI().getKind()) {
79 case TargetCXXABI::Fuchsia:
80 case TargetCXXABI::GenericAArch64:
81 case TargetCXXABI::GenericARM:
82 case TargetCXXABI::iOS:
83 case TargetCXXABI::iOS64:
84 case TargetCXXABI::WatchOS:
85 case TargetCXXABI::GenericMIPS:
86 case TargetCXXABI::GenericItanium:
87 case TargetCXXABI::WebAssembly:
88 case TargetCXXABI::XL:
89 return CreateItaniumCXXABI(CGM);
90 case TargetCXXABI::Microsoft:
91 return CreateMicrosoftCXXABI(CGM);
92 }
93
94 llvm_unreachable("invalid C++ ABI kind");
95 }
96
CodeGenModule(ASTContext & C,const HeaderSearchOptions & HSO,const PreprocessorOptions & PPO,const CodeGenOptions & CGO,llvm::Module & M,DiagnosticsEngine & diags,CoverageSourceInfo * CoverageInfo)97 CodeGenModule::CodeGenModule(ASTContext &C, const HeaderSearchOptions &HSO,
98 const PreprocessorOptions &PPO,
99 const CodeGenOptions &CGO, llvm::Module &M,
100 DiagnosticsEngine &diags,
101 CoverageSourceInfo *CoverageInfo)
102 : Context(C), LangOpts(C.getLangOpts()), HeaderSearchOpts(HSO),
103 PreprocessorOpts(PPO), CodeGenOpts(CGO), TheModule(M), Diags(diags),
104 Target(C.getTargetInfo()), ABI(createCXXABI(*this)),
105 VMContext(M.getContext()), Types(*this), VTables(*this),
106 SanitizerMD(new SanitizerMetadata(*this)) {
107
108 // Initialize the type cache.
109 llvm::LLVMContext &LLVMContext = M.getContext();
110 VoidTy = llvm::Type::getVoidTy(LLVMContext);
111 Int8Ty = llvm::Type::getInt8Ty(LLVMContext);
112 Int16Ty = llvm::Type::getInt16Ty(LLVMContext);
113 Int32Ty = llvm::Type::getInt32Ty(LLVMContext);
114 Int64Ty = llvm::Type::getInt64Ty(LLVMContext);
115 HalfTy = llvm::Type::getHalfTy(LLVMContext);
116 BFloatTy = llvm::Type::getBFloatTy(LLVMContext);
117 FloatTy = llvm::Type::getFloatTy(LLVMContext);
118 DoubleTy = llvm::Type::getDoubleTy(LLVMContext);
119 PointerWidthInBits = C.getTargetInfo().getPointerWidth(0);
120 PointerAlignInBytes =
121 C.toCharUnitsFromBits(C.getTargetInfo().getPointerAlign(0)).getQuantity();
122 SizeSizeInBytes =
123 C.toCharUnitsFromBits(C.getTargetInfo().getMaxPointerWidth()).getQuantity();
124 IntAlignInBytes =
125 C.toCharUnitsFromBits(C.getTargetInfo().getIntAlign()).getQuantity();
126 IntTy = llvm::IntegerType::get(LLVMContext, C.getTargetInfo().getIntWidth());
127 IntPtrTy = llvm::IntegerType::get(LLVMContext,
128 C.getTargetInfo().getMaxPointerWidth());
129 Int8PtrTy = Int8Ty->getPointerTo(0);
130 Int8PtrPtrTy = Int8PtrTy->getPointerTo(0);
131 AllocaInt8PtrTy = Int8Ty->getPointerTo(
132 M.getDataLayout().getAllocaAddrSpace());
133 ASTAllocaAddressSpace = getTargetCodeGenInfo().getASTAllocaAddressSpace();
134
135 RuntimeCC = getTargetCodeGenInfo().getABIInfo().getRuntimeCC();
136
137 if (LangOpts.ObjC)
138 createObjCRuntime();
139 if (LangOpts.OpenCL)
140 createOpenCLRuntime();
141 if (LangOpts.OpenMP)
142 createOpenMPRuntime();
143 if (LangOpts.CUDA)
144 createCUDARuntime();
145
146 // Enable TBAA unless it's suppressed. ThreadSanitizer needs TBAA even at O0.
147 if (LangOpts.Sanitize.has(SanitizerKind::Thread) ||
148 (!CodeGenOpts.RelaxedAliasing && CodeGenOpts.OptimizationLevel > 0))
149 TBAA.reset(new CodeGenTBAA(Context, TheModule, CodeGenOpts, getLangOpts(),
150 getCXXABI().getMangleContext()));
151
152 // If debug info or coverage generation is enabled, create the CGDebugInfo
153 // object.
154 if (CodeGenOpts.getDebugInfo() != codegenoptions::NoDebugInfo ||
155 CodeGenOpts.EmitGcovArcs || CodeGenOpts.EmitGcovNotes)
156 DebugInfo.reset(new CGDebugInfo(*this));
157
158 Block.GlobalUniqueCount = 0;
159
160 if (C.getLangOpts().ObjC)
161 ObjCData.reset(new ObjCEntrypoints());
162
163 if (CodeGenOpts.hasProfileClangUse()) {
164 auto ReaderOrErr = llvm::IndexedInstrProfReader::create(
165 CodeGenOpts.ProfileInstrumentUsePath, CodeGenOpts.ProfileRemappingFile);
166 if (auto E = ReaderOrErr.takeError()) {
167 unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
168 "Could not read profile %0: %1");
169 llvm::handleAllErrors(std::move(E), [&](const llvm::ErrorInfoBase &EI) {
170 getDiags().Report(DiagID) << CodeGenOpts.ProfileInstrumentUsePath
171 << EI.message();
172 });
173 } else
174 PGOReader = std::move(ReaderOrErr.get());
175 }
176
177 // If coverage mapping generation is enabled, create the
178 // CoverageMappingModuleGen object.
179 if (CodeGenOpts.CoverageMapping)
180 CoverageMapping.reset(new CoverageMappingModuleGen(*this, *CoverageInfo));
181 }
182
~CodeGenModule()183 CodeGenModule::~CodeGenModule() {}
184
createObjCRuntime()185 void CodeGenModule::createObjCRuntime() {
186 // This is just isGNUFamily(), but we want to force implementors of
187 // new ABIs to decide how best to do this.
188 switch (LangOpts.ObjCRuntime.getKind()) {
189 case ObjCRuntime::GNUstep:
190 case ObjCRuntime::GCC:
191 case ObjCRuntime::ObjFW:
192 ObjCRuntime.reset(CreateGNUObjCRuntime(*this));
193 return;
194
195 case ObjCRuntime::FragileMacOSX:
196 case ObjCRuntime::MacOSX:
197 case ObjCRuntime::iOS:
198 case ObjCRuntime::WatchOS:
199 ObjCRuntime.reset(CreateMacObjCRuntime(*this));
200 return;
201 }
202 llvm_unreachable("bad runtime kind");
203 }
204
createOpenCLRuntime()205 void CodeGenModule::createOpenCLRuntime() {
206 OpenCLRuntime.reset(new CGOpenCLRuntime(*this));
207 }
208
createOpenMPRuntime()209 void CodeGenModule::createOpenMPRuntime() {
210 // Select a specialized code generation class based on the target, if any.
211 // If it does not exist use the default implementation.
212 switch (getTriple().getArch()) {
213 case llvm::Triple::nvptx:
214 case llvm::Triple::nvptx64:
215 assert(getLangOpts().OpenMPIsDevice &&
216 "OpenMP NVPTX is only prepared to deal with device code.");
217 OpenMPRuntime.reset(new CGOpenMPRuntimeNVPTX(*this));
218 break;
219 case llvm::Triple::amdgcn:
220 assert(getLangOpts().OpenMPIsDevice &&
221 "OpenMP AMDGCN is only prepared to deal with device code.");
222 OpenMPRuntime.reset(new CGOpenMPRuntimeAMDGCN(*this));
223 break;
224 default:
225 if (LangOpts.OpenMPSimd)
226 OpenMPRuntime.reset(new CGOpenMPSIMDRuntime(*this));
227 else
228 OpenMPRuntime.reset(new CGOpenMPRuntime(*this));
229 break;
230 }
231 }
232
createCUDARuntime()233 void CodeGenModule::createCUDARuntime() {
234 CUDARuntime.reset(CreateNVCUDARuntime(*this));
235 }
236
addReplacement(StringRef Name,llvm::Constant * C)237 void CodeGenModule::addReplacement(StringRef Name, llvm::Constant *C) {
238 Replacements[Name] = C;
239 }
240
applyReplacements()241 void CodeGenModule::applyReplacements() {
242 for (auto &I : Replacements) {
243 StringRef MangledName = I.first();
244 llvm::Constant *Replacement = I.second;
245 llvm::GlobalValue *Entry = GetGlobalValue(MangledName);
246 if (!Entry)
247 continue;
248 auto *OldF = cast<llvm::Function>(Entry);
249 auto *NewF = dyn_cast<llvm::Function>(Replacement);
250 if (!NewF) {
251 if (auto *Alias = dyn_cast<llvm::GlobalAlias>(Replacement)) {
252 NewF = dyn_cast<llvm::Function>(Alias->getAliasee());
253 } else {
254 auto *CE = cast<llvm::ConstantExpr>(Replacement);
255 assert(CE->getOpcode() == llvm::Instruction::BitCast ||
256 CE->getOpcode() == llvm::Instruction::GetElementPtr);
257 NewF = dyn_cast<llvm::Function>(CE->getOperand(0));
258 }
259 }
260
261 // Replace old with new, but keep the old order.
262 OldF->replaceAllUsesWith(Replacement);
263 if (NewF) {
264 NewF->removeFromParent();
265 OldF->getParent()->getFunctionList().insertAfter(OldF->getIterator(),
266 NewF);
267 }
268 OldF->eraseFromParent();
269 }
270 }
271
addGlobalValReplacement(llvm::GlobalValue * GV,llvm::Constant * C)272 void CodeGenModule::addGlobalValReplacement(llvm::GlobalValue *GV, llvm::Constant *C) {
273 GlobalValReplacements.push_back(std::make_pair(GV, C));
274 }
275
applyGlobalValReplacements()276 void CodeGenModule::applyGlobalValReplacements() {
277 for (auto &I : GlobalValReplacements) {
278 llvm::GlobalValue *GV = I.first;
279 llvm::Constant *C = I.second;
280
281 GV->replaceAllUsesWith(C);
282 GV->eraseFromParent();
283 }
284 }
285
286 // This is only used in aliases that we created and we know they have a
287 // linear structure.
getAliasedGlobal(const llvm::GlobalIndirectSymbol & GIS)288 static const llvm::GlobalObject *getAliasedGlobal(
289 const llvm::GlobalIndirectSymbol &GIS) {
290 llvm::SmallPtrSet<const llvm::GlobalIndirectSymbol*, 4> Visited;
291 const llvm::Constant *C = &GIS;
292 for (;;) {
293 C = C->stripPointerCasts();
294 if (auto *GO = dyn_cast<llvm::GlobalObject>(C))
295 return GO;
296 // stripPointerCasts will not walk over weak aliases.
297 auto *GIS2 = dyn_cast<llvm::GlobalIndirectSymbol>(C);
298 if (!GIS2)
299 return nullptr;
300 if (!Visited.insert(GIS2).second)
301 return nullptr;
302 C = GIS2->getIndirectSymbol();
303 }
304 }
305
checkAliases()306 void CodeGenModule::checkAliases() {
307 // Check if the constructed aliases are well formed. It is really unfortunate
308 // that we have to do this in CodeGen, but we only construct mangled names
309 // and aliases during codegen.
310 bool Error = false;
311 DiagnosticsEngine &Diags = getDiags();
312 for (const GlobalDecl &GD : Aliases) {
313 const auto *D = cast<ValueDecl>(GD.getDecl());
314 SourceLocation Location;
315 bool IsIFunc = D->hasAttr<IFuncAttr>();
316 if (const Attr *A = D->getDefiningAttr())
317 Location = A->getLocation();
318 else
319 llvm_unreachable("Not an alias or ifunc?");
320 StringRef MangledName = getMangledName(GD);
321 llvm::GlobalValue *Entry = GetGlobalValue(MangledName);
322 auto *Alias = cast<llvm::GlobalIndirectSymbol>(Entry);
323 const llvm::GlobalValue *GV = getAliasedGlobal(*Alias);
324 if (!GV) {
325 Error = true;
326 Diags.Report(Location, diag::err_cyclic_alias) << IsIFunc;
327 } else if (GV->isDeclaration()) {
328 Error = true;
329 Diags.Report(Location, diag::err_alias_to_undefined)
330 << IsIFunc << IsIFunc;
331 } else if (IsIFunc) {
332 // Check resolver function type.
333 llvm::FunctionType *FTy = dyn_cast<llvm::FunctionType>(
334 GV->getType()->getPointerElementType());
335 assert(FTy);
336 if (!FTy->getReturnType()->isPointerTy())
337 Diags.Report(Location, diag::err_ifunc_resolver_return);
338 }
339
340 llvm::Constant *Aliasee = Alias->getIndirectSymbol();
341 llvm::GlobalValue *AliaseeGV;
342 if (auto CE = dyn_cast<llvm::ConstantExpr>(Aliasee))
343 AliaseeGV = cast<llvm::GlobalValue>(CE->getOperand(0));
344 else
345 AliaseeGV = cast<llvm::GlobalValue>(Aliasee);
346
347 if (const SectionAttr *SA = D->getAttr<SectionAttr>()) {
348 StringRef AliasSection = SA->getName();
349 if (AliasSection != AliaseeGV->getSection())
350 Diags.Report(SA->getLocation(), diag::warn_alias_with_section)
351 << AliasSection << IsIFunc << IsIFunc;
352 }
353
354 // We have to handle alias to weak aliases in here. LLVM itself disallows
355 // this since the object semantics would not match the IL one. For
356 // compatibility with gcc we implement it by just pointing the alias
357 // to its aliasee's aliasee. We also warn, since the user is probably
358 // expecting the link to be weak.
359 if (auto GA = dyn_cast<llvm::GlobalIndirectSymbol>(AliaseeGV)) {
360 if (GA->isInterposable()) {
361 Diags.Report(Location, diag::warn_alias_to_weak_alias)
362 << GV->getName() << GA->getName() << IsIFunc;
363 Aliasee = llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(
364 GA->getIndirectSymbol(), Alias->getType());
365 Alias->setIndirectSymbol(Aliasee);
366 }
367 }
368 }
369 if (!Error)
370 return;
371
372 for (const GlobalDecl &GD : Aliases) {
373 StringRef MangledName = getMangledName(GD);
374 llvm::GlobalValue *Entry = GetGlobalValue(MangledName);
375 auto *Alias = cast<llvm::GlobalIndirectSymbol>(Entry);
376 Alias->replaceAllUsesWith(llvm::UndefValue::get(Alias->getType()));
377 Alias->eraseFromParent();
378 }
379 }
380
clear()381 void CodeGenModule::clear() {
382 DeferredDeclsToEmit.clear();
383 if (OpenMPRuntime)
384 OpenMPRuntime->clear();
385 }
386
reportDiagnostics(DiagnosticsEngine & Diags,StringRef MainFile)387 void InstrProfStats::reportDiagnostics(DiagnosticsEngine &Diags,
388 StringRef MainFile) {
389 if (!hasDiagnostics())
390 return;
391 if (VisitedInMainFile > 0 && VisitedInMainFile == MissingInMainFile) {
392 if (MainFile.empty())
393 MainFile = "<stdin>";
394 Diags.Report(diag::warn_profile_data_unprofiled) << MainFile;
395 } else {
396 if (Mismatched > 0)
397 Diags.Report(diag::warn_profile_data_out_of_date) << Visited << Mismatched;
398
399 if (Missing > 0)
400 Diags.Report(diag::warn_profile_data_missing) << Visited << Missing;
401 }
402 }
403
setVisibilityFromDLLStorageClass(const clang::LangOptions & LO,llvm::Module & M)404 static void setVisibilityFromDLLStorageClass(const clang::LangOptions &LO,
405 llvm::Module &M) {
406 if (!LO.VisibilityFromDLLStorageClass)
407 return;
408
409 llvm::GlobalValue::VisibilityTypes DLLExportVisibility =
410 CodeGenModule::GetLLVMVisibility(LO.getDLLExportVisibility());
411 llvm::GlobalValue::VisibilityTypes NoDLLStorageClassVisibility =
412 CodeGenModule::GetLLVMVisibility(LO.getNoDLLStorageClassVisibility());
413 llvm::GlobalValue::VisibilityTypes ExternDeclDLLImportVisibility =
414 CodeGenModule::GetLLVMVisibility(LO.getExternDeclDLLImportVisibility());
415 llvm::GlobalValue::VisibilityTypes ExternDeclNoDLLStorageClassVisibility =
416 CodeGenModule::GetLLVMVisibility(
417 LO.getExternDeclNoDLLStorageClassVisibility());
418
419 for (llvm::GlobalValue &GV : M.global_values()) {
420 if (GV.hasAppendingLinkage() || GV.hasLocalLinkage())
421 continue;
422
423 if (GV.isDeclarationForLinker()) {
424 GV.setVisibility(GV.getDLLStorageClass() ==
425 llvm::GlobalValue::DLLImportStorageClass
426 ? ExternDeclDLLImportVisibility
427 : ExternDeclNoDLLStorageClassVisibility);
428 } else {
429 GV.setVisibility(GV.getDLLStorageClass() ==
430 llvm::GlobalValue::DLLExportStorageClass
431 ? DLLExportVisibility
432 : NoDLLStorageClassVisibility);
433 }
434
435 GV.setDLLStorageClass(llvm::GlobalValue::DefaultStorageClass);
436 }
437 }
438
Release()439 void CodeGenModule::Release() {
440 EmitDeferred();
441 EmitVTablesOpportunistically();
442 applyGlobalValReplacements();
443 applyReplacements();
444 checkAliases();
445 emitMultiVersionFunctions();
446 EmitCXXGlobalInitFunc();
447 EmitCXXGlobalCleanUpFunc();
448 registerGlobalDtorsWithAtExit();
449 EmitCXXThreadLocalInitFunc();
450 if (ObjCRuntime)
451 if (llvm::Function *ObjCInitFunction = ObjCRuntime->ModuleInitFunction())
452 AddGlobalCtor(ObjCInitFunction);
453 if (Context.getLangOpts().CUDA && !Context.getLangOpts().CUDAIsDevice &&
454 CUDARuntime) {
455 if (llvm::Function *CudaCtorFunction =
456 CUDARuntime->makeModuleCtorFunction())
457 AddGlobalCtor(CudaCtorFunction);
458 }
459 if (OpenMPRuntime) {
460 if (llvm::Function *OpenMPRequiresDirectiveRegFun =
461 OpenMPRuntime->emitRequiresDirectiveRegFun()) {
462 AddGlobalCtor(OpenMPRequiresDirectiveRegFun, 0);
463 }
464 OpenMPRuntime->createOffloadEntriesAndInfoMetadata();
465 OpenMPRuntime->clear();
466 }
467 if (PGOReader) {
468 getModule().setProfileSummary(
469 PGOReader->getSummary(/* UseCS */ false).getMD(VMContext),
470 llvm::ProfileSummary::PSK_Instr);
471 if (PGOStats.hasDiagnostics())
472 PGOStats.reportDiagnostics(getDiags(), getCodeGenOpts().MainFileName);
473 }
474 EmitCtorList(GlobalCtors, "llvm.global_ctors");
475 EmitCtorList(GlobalDtors, "llvm.global_dtors");
476 EmitGlobalAnnotations();
477 EmitStaticExternCAliases();
478 EmitDeferredUnusedCoverageMappings();
479 if (CoverageMapping)
480 CoverageMapping->emit();
481 if (CodeGenOpts.SanitizeCfiCrossDso) {
482 CodeGenFunction(*this).EmitCfiCheckFail();
483 CodeGenFunction(*this).EmitCfiCheckStub();
484 }
485 emitAtAvailableLinkGuard();
486 if (Context.getTargetInfo().getTriple().isWasm() &&
487 !Context.getTargetInfo().getTriple().isOSEmscripten()) {
488 EmitMainVoidAlias();
489 }
490 emitLLVMUsed();
491 if (SanStats)
492 SanStats->finish();
493
494 if (CodeGenOpts.Autolink &&
495 (Context.getLangOpts().Modules || !LinkerOptionsMetadata.empty())) {
496 EmitModuleLinkOptions();
497 }
498
499 // On ELF we pass the dependent library specifiers directly to the linker
500 // without manipulating them. This is in contrast to other platforms where
501 // they are mapped to a specific linker option by the compiler. This
502 // difference is a result of the greater variety of ELF linkers and the fact
503 // that ELF linkers tend to handle libraries in a more complicated fashion
504 // than on other platforms. This forces us to defer handling the dependent
505 // libs to the linker.
506 //
507 // CUDA/HIP device and host libraries are different. Currently there is no
508 // way to differentiate dependent libraries for host or device. Existing
509 // usage of #pragma comment(lib, *) is intended for host libraries on
510 // Windows. Therefore emit llvm.dependent-libraries only for host.
511 if (!ELFDependentLibraries.empty() && !Context.getLangOpts().CUDAIsDevice) {
512 auto *NMD = getModule().getOrInsertNamedMetadata("llvm.dependent-libraries");
513 for (auto *MD : ELFDependentLibraries)
514 NMD->addOperand(MD);
515 }
516
517 // Record mregparm value now so it is visible through rest of codegen.
518 if (Context.getTargetInfo().getTriple().getArch() == llvm::Triple::x86)
519 getModule().addModuleFlag(llvm::Module::Error, "NumRegisterParameters",
520 CodeGenOpts.NumRegisterParameters);
521
522 if (CodeGenOpts.DwarfVersion) {
523 getModule().addModuleFlag(llvm::Module::Max, "Dwarf Version",
524 CodeGenOpts.DwarfVersion);
525 }
526
527 if (Context.getLangOpts().SemanticInterposition)
528 // Require various optimization to respect semantic interposition.
529 getModule().setSemanticInterposition(1);
530 else if (Context.getLangOpts().ExplicitNoSemanticInterposition)
531 // Allow dso_local on applicable targets.
532 getModule().setSemanticInterposition(0);
533
534 if (CodeGenOpts.EmitCodeView) {
535 // Indicate that we want CodeView in the metadata.
536 getModule().addModuleFlag(llvm::Module::Warning, "CodeView", 1);
537 }
538 if (CodeGenOpts.CodeViewGHash) {
539 getModule().addModuleFlag(llvm::Module::Warning, "CodeViewGHash", 1);
540 }
541 if (CodeGenOpts.ControlFlowGuard) {
542 // Function ID tables and checks for Control Flow Guard (cfguard=2).
543 getModule().addModuleFlag(llvm::Module::Warning, "cfguard", 2);
544 } else if (CodeGenOpts.ControlFlowGuardNoChecks) {
545 // Function ID tables for Control Flow Guard (cfguard=1).
546 getModule().addModuleFlag(llvm::Module::Warning, "cfguard", 1);
547 }
548 if (CodeGenOpts.OptimizationLevel > 0 && CodeGenOpts.StrictVTablePointers) {
549 // We don't support LTO with 2 with different StrictVTablePointers
550 // FIXME: we could support it by stripping all the information introduced
551 // by StrictVTablePointers.
552
553 getModule().addModuleFlag(llvm::Module::Error, "StrictVTablePointers",1);
554
555 llvm::Metadata *Ops[2] = {
556 llvm::MDString::get(VMContext, "StrictVTablePointers"),
557 llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
558 llvm::Type::getInt32Ty(VMContext), 1))};
559
560 getModule().addModuleFlag(llvm::Module::Require,
561 "StrictVTablePointersRequirement",
562 llvm::MDNode::get(VMContext, Ops));
563 }
564 if (getModuleDebugInfo())
565 // We support a single version in the linked module. The LLVM
566 // parser will drop debug info with a different version number
567 // (and warn about it, too).
568 getModule().addModuleFlag(llvm::Module::Warning, "Debug Info Version",
569 llvm::DEBUG_METADATA_VERSION);
570
571 // We need to record the widths of enums and wchar_t, so that we can generate
572 // the correct build attributes in the ARM backend. wchar_size is also used by
573 // TargetLibraryInfo.
574 uint64_t WCharWidth =
575 Context.getTypeSizeInChars(Context.getWideCharType()).getQuantity();
576 getModule().addModuleFlag(llvm::Module::Error, "wchar_size", WCharWidth);
577
578 llvm::Triple::ArchType Arch = Context.getTargetInfo().getTriple().getArch();
579 if ( Arch == llvm::Triple::arm
580 || Arch == llvm::Triple::armeb
581 || Arch == llvm::Triple::thumb
582 || Arch == llvm::Triple::thumbeb) {
583 // The minimum width of an enum in bytes
584 uint64_t EnumWidth = Context.getLangOpts().ShortEnums ? 1 : 4;
585 getModule().addModuleFlag(llvm::Module::Error, "min_enum_size", EnumWidth);
586 }
587
588 if (Arch == llvm::Triple::riscv32 || Arch == llvm::Triple::riscv64) {
589 StringRef ABIStr = Target.getABI();
590 llvm::LLVMContext &Ctx = TheModule.getContext();
591 getModule().addModuleFlag(llvm::Module::Error, "target-abi",
592 llvm::MDString::get(Ctx, ABIStr));
593 }
594
595 if (CodeGenOpts.SanitizeCfiCrossDso) {
596 // Indicate that we want cross-DSO control flow integrity checks.
597 getModule().addModuleFlag(llvm::Module::Override, "Cross-DSO CFI", 1);
598 }
599
600 if (CodeGenOpts.WholeProgramVTables) {
601 // Indicate whether VFE was enabled for this module, so that the
602 // vcall_visibility metadata added under whole program vtables is handled
603 // appropriately in the optimizer.
604 getModule().addModuleFlag(llvm::Module::Error, "Virtual Function Elim",
605 CodeGenOpts.VirtualFunctionElimination);
606 }
607
608 if (LangOpts.Sanitize.has(SanitizerKind::CFIICall)) {
609 getModule().addModuleFlag(llvm::Module::Override,
610 "CFI Canonical Jump Tables",
611 CodeGenOpts.SanitizeCfiCanonicalJumpTables);
612 }
613
614 if (CodeGenOpts.CFProtectionReturn &&
615 Target.checkCFProtectionReturnSupported(getDiags())) {
616 // Indicate that we want to instrument return control flow protection.
617 getModule().addModuleFlag(llvm::Module::Override, "cf-protection-return",
618 1);
619 }
620
621 if (CodeGenOpts.CFProtectionBranch &&
622 Target.checkCFProtectionBranchSupported(getDiags())) {
623 // Indicate that we want to instrument branch control flow protection.
624 getModule().addModuleFlag(llvm::Module::Override, "cf-protection-branch",
625 1);
626 }
627
628 if (Arch == llvm::Triple::aarch64 || Arch == llvm::Triple::aarch64_32 ||
629 Arch == llvm::Triple::aarch64_be) {
630 getModule().addModuleFlag(llvm::Module::Error,
631 "branch-target-enforcement",
632 LangOpts.BranchTargetEnforcement);
633
634 getModule().addModuleFlag(llvm::Module::Error, "sign-return-address",
635 LangOpts.hasSignReturnAddress());
636
637 getModule().addModuleFlag(llvm::Module::Error, "sign-return-address-all",
638 LangOpts.isSignReturnAddressScopeAll());
639
640 getModule().addModuleFlag(llvm::Module::Error,
641 "sign-return-address-with-bkey",
642 !LangOpts.isSignReturnAddressWithAKey());
643 }
644
645 if (!CodeGenOpts.MemoryProfileOutput.empty()) {
646 llvm::LLVMContext &Ctx = TheModule.getContext();
647 getModule().addModuleFlag(
648 llvm::Module::Error, "MemProfProfileFilename",
649 llvm::MDString::get(Ctx, CodeGenOpts.MemoryProfileOutput));
650 }
651
652 if (LangOpts.CUDAIsDevice && getTriple().isNVPTX()) {
653 // Indicate whether __nvvm_reflect should be configured to flush denormal
654 // floating point values to 0. (This corresponds to its "__CUDA_FTZ"
655 // property.)
656 getModule().addModuleFlag(llvm::Module::Override, "nvvm-reflect-ftz",
657 CodeGenOpts.FP32DenormalMode.Output !=
658 llvm::DenormalMode::IEEE);
659 }
660
661 // Emit OpenCL specific module metadata: OpenCL/SPIR version.
662 if (LangOpts.OpenCL) {
663 EmitOpenCLMetadata();
664 // Emit SPIR version.
665 if (getTriple().isSPIR()) {
666 // SPIR v2.0 s2.12 - The SPIR version used by the module is stored in the
667 // opencl.spir.version named metadata.
668 // C++ is backwards compatible with OpenCL v2.0.
669 auto Version = LangOpts.OpenCLCPlusPlus ? 200 : LangOpts.OpenCLVersion;
670 llvm::Metadata *SPIRVerElts[] = {
671 llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
672 Int32Ty, Version / 100)),
673 llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
674 Int32Ty, (Version / 100 > 1) ? 0 : 2))};
675 llvm::NamedMDNode *SPIRVerMD =
676 TheModule.getOrInsertNamedMetadata("opencl.spir.version");
677 llvm::LLVMContext &Ctx = TheModule.getContext();
678 SPIRVerMD->addOperand(llvm::MDNode::get(Ctx, SPIRVerElts));
679 }
680 }
681
682 if (uint32_t PLevel = Context.getLangOpts().PICLevel) {
683 assert(PLevel < 3 && "Invalid PIC Level");
684 getModule().setPICLevel(static_cast<llvm::PICLevel::Level>(PLevel));
685 if (Context.getLangOpts().PIE)
686 getModule().setPIELevel(static_cast<llvm::PIELevel::Level>(PLevel));
687 }
688
689 if (getCodeGenOpts().CodeModel.size() > 0) {
690 unsigned CM = llvm::StringSwitch<unsigned>(getCodeGenOpts().CodeModel)
691 .Case("tiny", llvm::CodeModel::Tiny)
692 .Case("small", llvm::CodeModel::Small)
693 .Case("kernel", llvm::CodeModel::Kernel)
694 .Case("medium", llvm::CodeModel::Medium)
695 .Case("large", llvm::CodeModel::Large)
696 .Default(~0u);
697 if (CM != ~0u) {
698 llvm::CodeModel::Model codeModel = static_cast<llvm::CodeModel::Model>(CM);
699 getModule().setCodeModel(codeModel);
700 }
701 }
702
703 if (CodeGenOpts.NoPLT)
704 getModule().setRtLibUseGOT();
705
706 SimplifyPersonality();
707
708 if (getCodeGenOpts().EmitDeclMetadata)
709 EmitDeclMetadata();
710
711 if (getCodeGenOpts().EmitGcovArcs || getCodeGenOpts().EmitGcovNotes)
712 EmitCoverageFile();
713
714 if (CGDebugInfo *DI = getModuleDebugInfo())
715 DI->finalize();
716
717 if (getCodeGenOpts().EmitVersionIdentMetadata)
718 EmitVersionIdentMetadata();
719
720 if (!getCodeGenOpts().RecordCommandLine.empty())
721 EmitCommandLineMetadata();
722
723 getTargetCodeGenInfo().emitTargetMetadata(*this, MangledDeclNames);
724
725 EmitBackendOptionsMetadata(getCodeGenOpts());
726
727 // Set visibility from DLL storage class
728 // We do this at the end of LLVM IR generation; after any operation
729 // that might affect the DLL storage class or the visibility, and
730 // before anything that might act on these.
731 setVisibilityFromDLLStorageClass(LangOpts, getModule());
732 }
733
EmitOpenCLMetadata()734 void CodeGenModule::EmitOpenCLMetadata() {
735 // SPIR v2.0 s2.13 - The OpenCL version used by the module is stored in the
736 // opencl.ocl.version named metadata node.
737 // C++ is backwards compatible with OpenCL v2.0.
738 // FIXME: We might need to add CXX version at some point too?
739 auto Version = LangOpts.OpenCLCPlusPlus ? 200 : LangOpts.OpenCLVersion;
740 llvm::Metadata *OCLVerElts[] = {
741 llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
742 Int32Ty, Version / 100)),
743 llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
744 Int32Ty, (Version % 100) / 10))};
745 llvm::NamedMDNode *OCLVerMD =
746 TheModule.getOrInsertNamedMetadata("opencl.ocl.version");
747 llvm::LLVMContext &Ctx = TheModule.getContext();
748 OCLVerMD->addOperand(llvm::MDNode::get(Ctx, OCLVerElts));
749 }
750
EmitBackendOptionsMetadata(const CodeGenOptions CodeGenOpts)751 void CodeGenModule::EmitBackendOptionsMetadata(
752 const CodeGenOptions CodeGenOpts) {
753 switch (getTriple().getArch()) {
754 default:
755 break;
756 case llvm::Triple::riscv32:
757 case llvm::Triple::riscv64:
758 getModule().addModuleFlag(llvm::Module::Error, "SmallDataLimit",
759 CodeGenOpts.SmallDataLimit);
760 break;
761 }
762 }
763
UpdateCompletedType(const TagDecl * TD)764 void CodeGenModule::UpdateCompletedType(const TagDecl *TD) {
765 // Make sure that this type is translated.
766 Types.UpdateCompletedType(TD);
767 }
768
RefreshTypeCacheForClass(const CXXRecordDecl * RD)769 void CodeGenModule::RefreshTypeCacheForClass(const CXXRecordDecl *RD) {
770 // Make sure that this type is translated.
771 Types.RefreshTypeCacheForClass(RD);
772 }
773
getTBAATypeInfo(QualType QTy)774 llvm::MDNode *CodeGenModule::getTBAATypeInfo(QualType QTy) {
775 if (!TBAA)
776 return nullptr;
777 return TBAA->getTypeInfo(QTy);
778 }
779
getTBAAAccessInfo(QualType AccessType)780 TBAAAccessInfo CodeGenModule::getTBAAAccessInfo(QualType AccessType) {
781 if (!TBAA)
782 return TBAAAccessInfo();
783 if (getLangOpts().CUDAIsDevice) {
784 // As CUDA builtin surface/texture types are replaced, skip generating TBAA
785 // access info.
786 if (AccessType->isCUDADeviceBuiltinSurfaceType()) {
787 if (getTargetCodeGenInfo().getCUDADeviceBuiltinSurfaceDeviceType() !=
788 nullptr)
789 return TBAAAccessInfo();
790 } else if (AccessType->isCUDADeviceBuiltinTextureType()) {
791 if (getTargetCodeGenInfo().getCUDADeviceBuiltinTextureDeviceType() !=
792 nullptr)
793 return TBAAAccessInfo();
794 }
795 }
796 return TBAA->getAccessInfo(AccessType);
797 }
798
799 TBAAAccessInfo
getTBAAVTablePtrAccessInfo(llvm::Type * VTablePtrType)800 CodeGenModule::getTBAAVTablePtrAccessInfo(llvm::Type *VTablePtrType) {
801 if (!TBAA)
802 return TBAAAccessInfo();
803 return TBAA->getVTablePtrAccessInfo(VTablePtrType);
804 }
805
getTBAAStructInfo(QualType QTy)806 llvm::MDNode *CodeGenModule::getTBAAStructInfo(QualType QTy) {
807 if (!TBAA)
808 return nullptr;
809 return TBAA->getTBAAStructInfo(QTy);
810 }
811
getTBAABaseTypeInfo(QualType QTy)812 llvm::MDNode *CodeGenModule::getTBAABaseTypeInfo(QualType QTy) {
813 if (!TBAA)
814 return nullptr;
815 return TBAA->getBaseTypeInfo(QTy);
816 }
817
getTBAAAccessTagInfo(TBAAAccessInfo Info)818 llvm::MDNode *CodeGenModule::getTBAAAccessTagInfo(TBAAAccessInfo Info) {
819 if (!TBAA)
820 return nullptr;
821 return TBAA->getAccessTagInfo(Info);
822 }
823
mergeTBAAInfoForCast(TBAAAccessInfo SourceInfo,TBAAAccessInfo TargetInfo)824 TBAAAccessInfo CodeGenModule::mergeTBAAInfoForCast(TBAAAccessInfo SourceInfo,
825 TBAAAccessInfo TargetInfo) {
826 if (!TBAA)
827 return TBAAAccessInfo();
828 return TBAA->mergeTBAAInfoForCast(SourceInfo, TargetInfo);
829 }
830
831 TBAAAccessInfo
mergeTBAAInfoForConditionalOperator(TBAAAccessInfo InfoA,TBAAAccessInfo InfoB)832 CodeGenModule::mergeTBAAInfoForConditionalOperator(TBAAAccessInfo InfoA,
833 TBAAAccessInfo InfoB) {
834 if (!TBAA)
835 return TBAAAccessInfo();
836 return TBAA->mergeTBAAInfoForConditionalOperator(InfoA, InfoB);
837 }
838
839 TBAAAccessInfo
mergeTBAAInfoForMemoryTransfer(TBAAAccessInfo DestInfo,TBAAAccessInfo SrcInfo)840 CodeGenModule::mergeTBAAInfoForMemoryTransfer(TBAAAccessInfo DestInfo,
841 TBAAAccessInfo SrcInfo) {
842 if (!TBAA)
843 return TBAAAccessInfo();
844 return TBAA->mergeTBAAInfoForConditionalOperator(DestInfo, SrcInfo);
845 }
846
DecorateInstructionWithTBAA(llvm::Instruction * Inst,TBAAAccessInfo TBAAInfo)847 void CodeGenModule::DecorateInstructionWithTBAA(llvm::Instruction *Inst,
848 TBAAAccessInfo TBAAInfo) {
849 if (llvm::MDNode *Tag = getTBAAAccessTagInfo(TBAAInfo))
850 Inst->setMetadata(llvm::LLVMContext::MD_tbaa, Tag);
851 }
852
DecorateInstructionWithInvariantGroup(llvm::Instruction * I,const CXXRecordDecl * RD)853 void CodeGenModule::DecorateInstructionWithInvariantGroup(
854 llvm::Instruction *I, const CXXRecordDecl *RD) {
855 I->setMetadata(llvm::LLVMContext::MD_invariant_group,
856 llvm::MDNode::get(getLLVMContext(), {}));
857 }
858
Error(SourceLocation loc,StringRef message)859 void CodeGenModule::Error(SourceLocation loc, StringRef message) {
860 unsigned diagID = getDiags().getCustomDiagID(DiagnosticsEngine::Error, "%0");
861 getDiags().Report(Context.getFullLoc(loc), diagID) << message;
862 }
863
864 /// ErrorUnsupported - Print out an error that codegen doesn't support the
865 /// specified stmt yet.
ErrorUnsupported(const Stmt * S,const char * Type)866 void CodeGenModule::ErrorUnsupported(const Stmt *S, const char *Type) {
867 unsigned DiagID = getDiags().getCustomDiagID(DiagnosticsEngine::Error,
868 "cannot compile this %0 yet");
869 std::string Msg = Type;
870 getDiags().Report(Context.getFullLoc(S->getBeginLoc()), DiagID)
871 << Msg << S->getSourceRange();
872 }
873
874 /// ErrorUnsupported - Print out an error that codegen doesn't support the
875 /// specified decl yet.
ErrorUnsupported(const Decl * D,const char * Type)876 void CodeGenModule::ErrorUnsupported(const Decl *D, const char *Type) {
877 unsigned DiagID = getDiags().getCustomDiagID(DiagnosticsEngine::Error,
878 "cannot compile this %0 yet");
879 std::string Msg = Type;
880 getDiags().Report(Context.getFullLoc(D->getLocation()), DiagID) << Msg;
881 }
882
getSize(CharUnits size)883 llvm::ConstantInt *CodeGenModule::getSize(CharUnits size) {
884 return llvm::ConstantInt::get(SizeTy, size.getQuantity());
885 }
886
setGlobalVisibility(llvm::GlobalValue * GV,const NamedDecl * D) const887 void CodeGenModule::setGlobalVisibility(llvm::GlobalValue *GV,
888 const NamedDecl *D) const {
889 if (GV->hasDLLImportStorageClass())
890 return;
891 // Internal definitions always have default visibility.
892 if (GV->hasLocalLinkage()) {
893 GV->setVisibility(llvm::GlobalValue::DefaultVisibility);
894 return;
895 }
896 if (!D)
897 return;
898 // Set visibility for definitions, and for declarations if requested globally
899 // or set explicitly.
900 LinkageInfo LV = D->getLinkageAndVisibility();
901 if (LV.isVisibilityExplicit() || getLangOpts().SetVisibilityForExternDecls ||
902 !GV->isDeclarationForLinker())
903 GV->setVisibility(GetLLVMVisibility(LV.getVisibility()));
904 }
905
shouldAssumeDSOLocal(const CodeGenModule & CGM,llvm::GlobalValue * GV)906 static bool shouldAssumeDSOLocal(const CodeGenModule &CGM,
907 llvm::GlobalValue *GV) {
908 if (GV->hasLocalLinkage())
909 return true;
910
911 if (!GV->hasDefaultVisibility() && !GV->hasExternalWeakLinkage())
912 return true;
913
914 // DLLImport explicitly marks the GV as external.
915 if (GV->hasDLLImportStorageClass())
916 return false;
917
918 const llvm::Triple &TT = CGM.getTriple();
919 if (TT.isWindowsGNUEnvironment()) {
920 // In MinGW, variables without DLLImport can still be automatically
921 // imported from a DLL by the linker; don't mark variables that
922 // potentially could come from another DLL as DSO local.
923 if (GV->isDeclarationForLinker() && isa<llvm::GlobalVariable>(GV) &&
924 !GV->isThreadLocal())
925 return false;
926 }
927
928 // On COFF, don't mark 'extern_weak' symbols as DSO local. If these symbols
929 // remain unresolved in the link, they can be resolved to zero, which is
930 // outside the current DSO.
931 if (TT.isOSBinFormatCOFF() && GV->hasExternalWeakLinkage())
932 return false;
933
934 // Every other GV is local on COFF.
935 // Make an exception for windows OS in the triple: Some firmware builds use
936 // *-win32-macho triples. This (accidentally?) produced windows relocations
937 // without GOT tables in older clang versions; Keep this behaviour.
938 // FIXME: even thread local variables?
939 if (TT.isOSBinFormatCOFF() || (TT.isOSWindows() && TT.isOSBinFormatMachO()))
940 return true;
941
942 // Only handle COFF and ELF for now.
943 if (!TT.isOSBinFormatELF())
944 return false;
945
946 // If this is not an executable, don't assume anything is local.
947 const auto &CGOpts = CGM.getCodeGenOpts();
948 llvm::Reloc::Model RM = CGOpts.RelocationModel;
949 const auto &LOpts = CGM.getLangOpts();
950 if (RM != llvm::Reloc::Static && !LOpts.PIE)
951 return false;
952
953 // A definition cannot be preempted from an executable.
954 if (!GV->isDeclarationForLinker())
955 return true;
956
957 // Most PIC code sequences that assume that a symbol is local cannot produce a
958 // 0 if it turns out the symbol is undefined. While this is ABI and relocation
959 // depended, it seems worth it to handle it here.
960 if (RM == llvm::Reloc::PIC_ && GV->hasExternalWeakLinkage())
961 return false;
962
963 // PPC has no copy relocations and cannot use a plt entry as a symbol address.
964 llvm::Triple::ArchType Arch = TT.getArch();
965 if (Arch == llvm::Triple::ppc || Arch == llvm::Triple::ppc64 ||
966 Arch == llvm::Triple::ppc64le)
967 return false;
968
969 // If we can use copy relocations we can assume it is local.
970 if (auto *Var = dyn_cast<llvm::GlobalVariable>(GV))
971 if (!Var->isThreadLocal() &&
972 (RM == llvm::Reloc::Static || CGOpts.PIECopyRelocations))
973 return true;
974
975 // If we can use a plt entry as the symbol address we can assume it
976 // is local.
977 // FIXME: This should work for PIE, but the gold linker doesn't support it.
978 if (isa<llvm::Function>(GV) && !CGOpts.NoPLT && RM == llvm::Reloc::Static)
979 return true;
980
981 // Otherwise don't assume it is local.
982 return false;
983 }
984
setDSOLocal(llvm::GlobalValue * GV) const985 void CodeGenModule::setDSOLocal(llvm::GlobalValue *GV) const {
986 GV->setDSOLocal(shouldAssumeDSOLocal(*this, GV));
987 }
988
setDLLImportDLLExport(llvm::GlobalValue * GV,GlobalDecl GD) const989 void CodeGenModule::setDLLImportDLLExport(llvm::GlobalValue *GV,
990 GlobalDecl GD) const {
991 const auto *D = dyn_cast<NamedDecl>(GD.getDecl());
992 // C++ destructors have a few C++ ABI specific special cases.
993 if (const auto *Dtor = dyn_cast_or_null<CXXDestructorDecl>(D)) {
994 getCXXABI().setCXXDestructorDLLStorage(GV, Dtor, GD.getDtorType());
995 return;
996 }
997 setDLLImportDLLExport(GV, D);
998 }
999
setDLLImportDLLExport(llvm::GlobalValue * GV,const NamedDecl * D) const1000 void CodeGenModule::setDLLImportDLLExport(llvm::GlobalValue *GV,
1001 const NamedDecl *D) const {
1002 if (D && D->isExternallyVisible()) {
1003 if (D->hasAttr<DLLImportAttr>())
1004 GV->setDLLStorageClass(llvm::GlobalVariable::DLLImportStorageClass);
1005 else if (D->hasAttr<DLLExportAttr>() && !GV->isDeclarationForLinker())
1006 GV->setDLLStorageClass(llvm::GlobalVariable::DLLExportStorageClass);
1007 }
1008 }
1009
setGVProperties(llvm::GlobalValue * GV,GlobalDecl GD) const1010 void CodeGenModule::setGVProperties(llvm::GlobalValue *GV,
1011 GlobalDecl GD) const {
1012 setDLLImportDLLExport(GV, GD);
1013 setGVPropertiesAux(GV, dyn_cast<NamedDecl>(GD.getDecl()));
1014 }
1015
setGVProperties(llvm::GlobalValue * GV,const NamedDecl * D) const1016 void CodeGenModule::setGVProperties(llvm::GlobalValue *GV,
1017 const NamedDecl *D) const {
1018 setDLLImportDLLExport(GV, D);
1019 setGVPropertiesAux(GV, D);
1020 }
1021
setGVPropertiesAux(llvm::GlobalValue * GV,const NamedDecl * D) const1022 void CodeGenModule::setGVPropertiesAux(llvm::GlobalValue *GV,
1023 const NamedDecl *D) const {
1024 setGlobalVisibility(GV, D);
1025 setDSOLocal(GV);
1026 GV->setPartition(CodeGenOpts.SymbolPartition);
1027 }
1028
GetLLVMTLSModel(StringRef S)1029 static llvm::GlobalVariable::ThreadLocalMode GetLLVMTLSModel(StringRef S) {
1030 return llvm::StringSwitch<llvm::GlobalVariable::ThreadLocalMode>(S)
1031 .Case("global-dynamic", llvm::GlobalVariable::GeneralDynamicTLSModel)
1032 .Case("local-dynamic", llvm::GlobalVariable::LocalDynamicTLSModel)
1033 .Case("initial-exec", llvm::GlobalVariable::InitialExecTLSModel)
1034 .Case("local-exec", llvm::GlobalVariable::LocalExecTLSModel);
1035 }
1036
1037 llvm::GlobalVariable::ThreadLocalMode
GetDefaultLLVMTLSModel() const1038 CodeGenModule::GetDefaultLLVMTLSModel() const {
1039 switch (CodeGenOpts.getDefaultTLSModel()) {
1040 case CodeGenOptions::GeneralDynamicTLSModel:
1041 return llvm::GlobalVariable::GeneralDynamicTLSModel;
1042 case CodeGenOptions::LocalDynamicTLSModel:
1043 return llvm::GlobalVariable::LocalDynamicTLSModel;
1044 case CodeGenOptions::InitialExecTLSModel:
1045 return llvm::GlobalVariable::InitialExecTLSModel;
1046 case CodeGenOptions::LocalExecTLSModel:
1047 return llvm::GlobalVariable::LocalExecTLSModel;
1048 }
1049 llvm_unreachable("Invalid TLS model!");
1050 }
1051
setTLSMode(llvm::GlobalValue * GV,const VarDecl & D) const1052 void CodeGenModule::setTLSMode(llvm::GlobalValue *GV, const VarDecl &D) const {
1053 assert(D.getTLSKind() && "setting TLS mode on non-TLS var!");
1054
1055 llvm::GlobalValue::ThreadLocalMode TLM;
1056 TLM = GetDefaultLLVMTLSModel();
1057
1058 // Override the TLS model if it is explicitly specified.
1059 if (const TLSModelAttr *Attr = D.getAttr<TLSModelAttr>()) {
1060 TLM = GetLLVMTLSModel(Attr->getModel());
1061 }
1062
1063 GV->setThreadLocalMode(TLM);
1064 }
1065
getCPUSpecificMangling(const CodeGenModule & CGM,StringRef Name)1066 static std::string getCPUSpecificMangling(const CodeGenModule &CGM,
1067 StringRef Name) {
1068 const TargetInfo &Target = CGM.getTarget();
1069 return (Twine('.') + Twine(Target.CPUSpecificManglingCharacter(Name))).str();
1070 }
1071
AppendCPUSpecificCPUDispatchMangling(const CodeGenModule & CGM,const CPUSpecificAttr * Attr,unsigned CPUIndex,raw_ostream & Out)1072 static void AppendCPUSpecificCPUDispatchMangling(const CodeGenModule &CGM,
1073 const CPUSpecificAttr *Attr,
1074 unsigned CPUIndex,
1075 raw_ostream &Out) {
1076 // cpu_specific gets the current name, dispatch gets the resolver if IFunc is
1077 // supported.
1078 if (Attr)
1079 Out << getCPUSpecificMangling(CGM, Attr->getCPUName(CPUIndex)->getName());
1080 else if (CGM.getTarget().supportsIFunc())
1081 Out << ".resolver";
1082 }
1083
AppendTargetMangling(const CodeGenModule & CGM,const TargetAttr * Attr,raw_ostream & Out)1084 static void AppendTargetMangling(const CodeGenModule &CGM,
1085 const TargetAttr *Attr, raw_ostream &Out) {
1086 if (Attr->isDefaultVersion())
1087 return;
1088
1089 Out << '.';
1090 const TargetInfo &Target = CGM.getTarget();
1091 ParsedTargetAttr Info =
1092 Attr->parse([&Target](StringRef LHS, StringRef RHS) {
1093 // Multiversioning doesn't allow "no-${feature}", so we can
1094 // only have "+" prefixes here.
1095 assert(LHS.startswith("+") && RHS.startswith("+") &&
1096 "Features should always have a prefix.");
1097 return Target.multiVersionSortPriority(LHS.substr(1)) >
1098 Target.multiVersionSortPriority(RHS.substr(1));
1099 });
1100
1101 bool IsFirst = true;
1102
1103 if (!Info.Architecture.empty()) {
1104 IsFirst = false;
1105 Out << "arch_" << Info.Architecture;
1106 }
1107
1108 for (StringRef Feat : Info.Features) {
1109 if (!IsFirst)
1110 Out << '_';
1111 IsFirst = false;
1112 Out << Feat.substr(1);
1113 }
1114 }
1115
getMangledNameImpl(const CodeGenModule & CGM,GlobalDecl GD,const NamedDecl * ND,bool OmitMultiVersionMangling=false)1116 static std::string getMangledNameImpl(const CodeGenModule &CGM, GlobalDecl GD,
1117 const NamedDecl *ND,
1118 bool OmitMultiVersionMangling = false) {
1119 SmallString<256> Buffer;
1120 llvm::raw_svector_ostream Out(Buffer);
1121 MangleContext &MC = CGM.getCXXABI().getMangleContext();
1122 if (MC.shouldMangleDeclName(ND))
1123 MC.mangleName(GD.getWithDecl(ND), Out);
1124 else {
1125 IdentifierInfo *II = ND->getIdentifier();
1126 assert(II && "Attempt to mangle unnamed decl.");
1127 const auto *FD = dyn_cast<FunctionDecl>(ND);
1128
1129 if (FD &&
1130 FD->getType()->castAs<FunctionType>()->getCallConv() == CC_X86RegCall) {
1131 Out << "__regcall3__" << II->getName();
1132 } else if (FD && FD->hasAttr<CUDAGlobalAttr>() &&
1133 GD.getKernelReferenceKind() == KernelReferenceKind::Stub) {
1134 Out << "__device_stub__" << II->getName();
1135 } else {
1136 Out << II->getName();
1137 }
1138 }
1139
1140 if (const auto *FD = dyn_cast<FunctionDecl>(ND))
1141 if (FD->isMultiVersion() && !OmitMultiVersionMangling) {
1142 switch (FD->getMultiVersionKind()) {
1143 case MultiVersionKind::CPUDispatch:
1144 case MultiVersionKind::CPUSpecific:
1145 AppendCPUSpecificCPUDispatchMangling(CGM,
1146 FD->getAttr<CPUSpecificAttr>(),
1147 GD.getMultiVersionIndex(), Out);
1148 break;
1149 case MultiVersionKind::Target:
1150 AppendTargetMangling(CGM, FD->getAttr<TargetAttr>(), Out);
1151 break;
1152 case MultiVersionKind::None:
1153 llvm_unreachable("None multiversion type isn't valid here");
1154 }
1155 }
1156
1157 return std::string(Out.str());
1158 }
1159
UpdateMultiVersionNames(GlobalDecl GD,const FunctionDecl * FD)1160 void CodeGenModule::UpdateMultiVersionNames(GlobalDecl GD,
1161 const FunctionDecl *FD) {
1162 if (!FD->isMultiVersion())
1163 return;
1164
1165 // Get the name of what this would be without the 'target' attribute. This
1166 // allows us to lookup the version that was emitted when this wasn't a
1167 // multiversion function.
1168 std::string NonTargetName =
1169 getMangledNameImpl(*this, GD, FD, /*OmitMultiVersionMangling=*/true);
1170 GlobalDecl OtherGD;
1171 if (lookupRepresentativeDecl(NonTargetName, OtherGD)) {
1172 assert(OtherGD.getCanonicalDecl()
1173 .getDecl()
1174 ->getAsFunction()
1175 ->isMultiVersion() &&
1176 "Other GD should now be a multiversioned function");
1177 // OtherFD is the version of this function that was mangled BEFORE
1178 // becoming a MultiVersion function. It potentially needs to be updated.
1179 const FunctionDecl *OtherFD = OtherGD.getCanonicalDecl()
1180 .getDecl()
1181 ->getAsFunction()
1182 ->getMostRecentDecl();
1183 std::string OtherName = getMangledNameImpl(*this, OtherGD, OtherFD);
1184 // This is so that if the initial version was already the 'default'
1185 // version, we don't try to update it.
1186 if (OtherName != NonTargetName) {
1187 // Remove instead of erase, since others may have stored the StringRef
1188 // to this.
1189 const auto ExistingRecord = Manglings.find(NonTargetName);
1190 if (ExistingRecord != std::end(Manglings))
1191 Manglings.remove(&(*ExistingRecord));
1192 auto Result = Manglings.insert(std::make_pair(OtherName, OtherGD));
1193 MangledDeclNames[OtherGD.getCanonicalDecl()] = Result.first->first();
1194 if (llvm::GlobalValue *Entry = GetGlobalValue(NonTargetName))
1195 Entry->setName(OtherName);
1196 }
1197 }
1198 }
1199
getMangledName(GlobalDecl GD)1200 StringRef CodeGenModule::getMangledName(GlobalDecl GD) {
1201 GlobalDecl CanonicalGD = GD.getCanonicalDecl();
1202
1203 // Some ABIs don't have constructor variants. Make sure that base and
1204 // complete constructors get mangled the same.
1205 if (const auto *CD = dyn_cast<CXXConstructorDecl>(CanonicalGD.getDecl())) {
1206 if (!getTarget().getCXXABI().hasConstructorVariants()) {
1207 CXXCtorType OrigCtorType = GD.getCtorType();
1208 assert(OrigCtorType == Ctor_Base || OrigCtorType == Ctor_Complete);
1209 if (OrigCtorType == Ctor_Base)
1210 CanonicalGD = GlobalDecl(CD, Ctor_Complete);
1211 }
1212 }
1213
1214 auto FoundName = MangledDeclNames.find(CanonicalGD);
1215 if (FoundName != MangledDeclNames.end())
1216 return FoundName->second;
1217
1218 // Keep the first result in the case of a mangling collision.
1219 const auto *ND = cast<NamedDecl>(GD.getDecl());
1220 std::string MangledName = getMangledNameImpl(*this, GD, ND);
1221
1222 // Ensure either we have different ABIs between host and device compilations,
1223 // says host compilation following MSVC ABI but device compilation follows
1224 // Itanium C++ ABI or, if they follow the same ABI, kernel names after
1225 // mangling should be the same after name stubbing. The later checking is
1226 // very important as the device kernel name being mangled in host-compilation
1227 // is used to resolve the device binaries to be executed. Inconsistent naming
1228 // result in undefined behavior. Even though we cannot check that naming
1229 // directly between host- and device-compilations, the host- and
1230 // device-mangling in host compilation could help catching certain ones.
1231 assert(!isa<FunctionDecl>(ND) || !ND->hasAttr<CUDAGlobalAttr>() ||
1232 getLangOpts().CUDAIsDevice ||
1233 (getContext().getAuxTargetInfo() &&
1234 (getContext().getAuxTargetInfo()->getCXXABI() !=
1235 getContext().getTargetInfo().getCXXABI())) ||
1236 getCUDARuntime().getDeviceSideName(ND) ==
1237 getMangledNameImpl(
1238 *this,
1239 GD.getWithKernelReferenceKind(KernelReferenceKind::Kernel),
1240 ND));
1241
1242 auto Result = Manglings.insert(std::make_pair(MangledName, GD));
1243 return MangledDeclNames[CanonicalGD] = Result.first->first();
1244 }
1245
getBlockMangledName(GlobalDecl GD,const BlockDecl * BD)1246 StringRef CodeGenModule::getBlockMangledName(GlobalDecl GD,
1247 const BlockDecl *BD) {
1248 MangleContext &MangleCtx = getCXXABI().getMangleContext();
1249 const Decl *D = GD.getDecl();
1250
1251 SmallString<256> Buffer;
1252 llvm::raw_svector_ostream Out(Buffer);
1253 if (!D)
1254 MangleCtx.mangleGlobalBlock(BD,
1255 dyn_cast_or_null<VarDecl>(initializedGlobalDecl.getDecl()), Out);
1256 else if (const auto *CD = dyn_cast<CXXConstructorDecl>(D))
1257 MangleCtx.mangleCtorBlock(CD, GD.getCtorType(), BD, Out);
1258 else if (const auto *DD = dyn_cast<CXXDestructorDecl>(D))
1259 MangleCtx.mangleDtorBlock(DD, GD.getDtorType(), BD, Out);
1260 else
1261 MangleCtx.mangleBlock(cast<DeclContext>(D), BD, Out);
1262
1263 auto Result = Manglings.insert(std::make_pair(Out.str(), BD));
1264 return Result.first->first();
1265 }
1266
GetGlobalValue(StringRef Name)1267 llvm::GlobalValue *CodeGenModule::GetGlobalValue(StringRef Name) {
1268 return getModule().getNamedValue(Name);
1269 }
1270
1271 /// AddGlobalCtor - Add a function to the list that will be called before
1272 /// main() runs.
AddGlobalCtor(llvm::Function * Ctor,int Priority,llvm::Constant * AssociatedData)1273 void CodeGenModule::AddGlobalCtor(llvm::Function *Ctor, int Priority,
1274 llvm::Constant *AssociatedData) {
1275 // FIXME: Type coercion of void()* types.
1276 GlobalCtors.push_back(Structor(Priority, Ctor, AssociatedData));
1277 }
1278
1279 /// AddGlobalDtor - Add a function to the list that will be called
1280 /// when the module is unloaded.
AddGlobalDtor(llvm::Function * Dtor,int Priority)1281 void CodeGenModule::AddGlobalDtor(llvm::Function *Dtor, int Priority) {
1282 if (CodeGenOpts.RegisterGlobalDtorsWithAtExit) {
1283 if (getCXXABI().useSinitAndSterm())
1284 llvm::report_fatal_error(
1285 "register global dtors with atexit() is not supported yet");
1286 DtorsUsingAtExit[Priority].push_back(Dtor);
1287 return;
1288 }
1289
1290 // FIXME: Type coercion of void()* types.
1291 GlobalDtors.push_back(Structor(Priority, Dtor, nullptr));
1292 }
1293
EmitCtorList(CtorList & Fns,const char * GlobalName)1294 void CodeGenModule::EmitCtorList(CtorList &Fns, const char *GlobalName) {
1295 if (Fns.empty()) return;
1296
1297 // Ctor function type is void()*.
1298 llvm::FunctionType* CtorFTy = llvm::FunctionType::get(VoidTy, false);
1299 llvm::Type *CtorPFTy = llvm::PointerType::get(CtorFTy,
1300 TheModule.getDataLayout().getProgramAddressSpace());
1301
1302 // Get the type of a ctor entry, { i32, void ()*, i8* }.
1303 llvm::StructType *CtorStructTy = llvm::StructType::get(
1304 Int32Ty, CtorPFTy, VoidPtrTy);
1305
1306 // Construct the constructor and destructor arrays.
1307 ConstantInitBuilder builder(*this);
1308 auto ctors = builder.beginArray(CtorStructTy);
1309 for (const auto &I : Fns) {
1310 auto ctor = ctors.beginStruct(CtorStructTy);
1311 ctor.addInt(Int32Ty, I.Priority);
1312 ctor.add(llvm::ConstantExpr::getBitCast(I.Initializer, CtorPFTy));
1313 if (I.AssociatedData)
1314 ctor.add(llvm::ConstantExpr::getBitCast(I.AssociatedData, VoidPtrTy));
1315 else
1316 ctor.addNullPointer(VoidPtrTy);
1317 ctor.finishAndAddTo(ctors);
1318 }
1319
1320 auto list =
1321 ctors.finishAndCreateGlobal(GlobalName, getPointerAlign(),
1322 /*constant*/ false,
1323 llvm::GlobalValue::AppendingLinkage);
1324
1325 // The LTO linker doesn't seem to like it when we set an alignment
1326 // on appending variables. Take it off as a workaround.
1327 list->setAlignment(llvm::None);
1328
1329 Fns.clear();
1330 }
1331
1332 llvm::GlobalValue::LinkageTypes
getFunctionLinkage(GlobalDecl GD)1333 CodeGenModule::getFunctionLinkage(GlobalDecl GD) {
1334 const auto *D = cast<FunctionDecl>(GD.getDecl());
1335
1336 GVALinkage Linkage = getContext().GetGVALinkageForFunction(D);
1337
1338 if (const auto *Dtor = dyn_cast<CXXDestructorDecl>(D))
1339 return getCXXABI().getCXXDestructorLinkage(Linkage, Dtor, GD.getDtorType());
1340
1341 if (isa<CXXConstructorDecl>(D) &&
1342 cast<CXXConstructorDecl>(D)->isInheritingConstructor() &&
1343 Context.getTargetInfo().getCXXABI().isMicrosoft()) {
1344 // Our approach to inheriting constructors is fundamentally different from
1345 // that used by the MS ABI, so keep our inheriting constructor thunks
1346 // internal rather than trying to pick an unambiguous mangling for them.
1347 return llvm::GlobalValue::InternalLinkage;
1348 }
1349
1350 return getLLVMLinkageForDeclarator(D, Linkage, /*IsConstantVariable=*/false);
1351 }
1352
CreateCrossDsoCfiTypeId(llvm::Metadata * MD)1353 llvm::ConstantInt *CodeGenModule::CreateCrossDsoCfiTypeId(llvm::Metadata *MD) {
1354 llvm::MDString *MDS = dyn_cast<llvm::MDString>(MD);
1355 if (!MDS) return nullptr;
1356
1357 return llvm::ConstantInt::get(Int64Ty, llvm::MD5Hash(MDS->getString()));
1358 }
1359
SetLLVMFunctionAttributes(GlobalDecl GD,const CGFunctionInfo & Info,llvm::Function * F)1360 void CodeGenModule::SetLLVMFunctionAttributes(GlobalDecl GD,
1361 const CGFunctionInfo &Info,
1362 llvm::Function *F) {
1363 unsigned CallingConv;
1364 llvm::AttributeList PAL;
1365 ConstructAttributeList(F->getName(), Info, GD, PAL, CallingConv, false);
1366 F->setAttributes(PAL);
1367 F->setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
1368 }
1369
removeImageAccessQualifier(std::string & TyName)1370 static void removeImageAccessQualifier(std::string& TyName) {
1371 std::string ReadOnlyQual("__read_only");
1372 std::string::size_type ReadOnlyPos = TyName.find(ReadOnlyQual);
1373 if (ReadOnlyPos != std::string::npos)
1374 // "+ 1" for the space after access qualifier.
1375 TyName.erase(ReadOnlyPos, ReadOnlyQual.size() + 1);
1376 else {
1377 std::string WriteOnlyQual("__write_only");
1378 std::string::size_type WriteOnlyPos = TyName.find(WriteOnlyQual);
1379 if (WriteOnlyPos != std::string::npos)
1380 TyName.erase(WriteOnlyPos, WriteOnlyQual.size() + 1);
1381 else {
1382 std::string ReadWriteQual("__read_write");
1383 std::string::size_type ReadWritePos = TyName.find(ReadWriteQual);
1384 if (ReadWritePos != std::string::npos)
1385 TyName.erase(ReadWritePos, ReadWriteQual.size() + 1);
1386 }
1387 }
1388 }
1389
1390 // Returns the address space id that should be produced to the
1391 // kernel_arg_addr_space metadata. This is always fixed to the ids
1392 // as specified in the SPIR 2.0 specification in order to differentiate
1393 // for example in clGetKernelArgInfo() implementation between the address
1394 // spaces with targets without unique mapping to the OpenCL address spaces
1395 // (basically all single AS CPUs).
ArgInfoAddressSpace(LangAS AS)1396 static unsigned ArgInfoAddressSpace(LangAS AS) {
1397 switch (AS) {
1398 case LangAS::opencl_global:
1399 return 1;
1400 case LangAS::opencl_constant:
1401 return 2;
1402 case LangAS::opencl_local:
1403 return 3;
1404 case LangAS::opencl_generic:
1405 return 4; // Not in SPIR 2.0 specs.
1406 case LangAS::opencl_global_device:
1407 return 5;
1408 case LangAS::opencl_global_host:
1409 return 6;
1410 default:
1411 return 0; // Assume private.
1412 }
1413 }
1414
GenOpenCLArgMetadata(llvm::Function * Fn,const FunctionDecl * FD,CodeGenFunction * CGF)1415 void CodeGenModule::GenOpenCLArgMetadata(llvm::Function *Fn,
1416 const FunctionDecl *FD,
1417 CodeGenFunction *CGF) {
1418 assert(((FD && CGF) || (!FD && !CGF)) &&
1419 "Incorrect use - FD and CGF should either be both null or not!");
1420 // Create MDNodes that represent the kernel arg metadata.
1421 // Each MDNode is a list in the form of "key", N number of values which is
1422 // the same number of values as their are kernel arguments.
1423
1424 const PrintingPolicy &Policy = Context.getPrintingPolicy();
1425
1426 // MDNode for the kernel argument address space qualifiers.
1427 SmallVector<llvm::Metadata *, 8> addressQuals;
1428
1429 // MDNode for the kernel argument access qualifiers (images only).
1430 SmallVector<llvm::Metadata *, 8> accessQuals;
1431
1432 // MDNode for the kernel argument type names.
1433 SmallVector<llvm::Metadata *, 8> argTypeNames;
1434
1435 // MDNode for the kernel argument base type names.
1436 SmallVector<llvm::Metadata *, 8> argBaseTypeNames;
1437
1438 // MDNode for the kernel argument type qualifiers.
1439 SmallVector<llvm::Metadata *, 8> argTypeQuals;
1440
1441 // MDNode for the kernel argument names.
1442 SmallVector<llvm::Metadata *, 8> argNames;
1443
1444 if (FD && CGF)
1445 for (unsigned i = 0, e = FD->getNumParams(); i != e; ++i) {
1446 const ParmVarDecl *parm = FD->getParamDecl(i);
1447 QualType ty = parm->getType();
1448 std::string typeQuals;
1449
1450 if (ty->isPointerType()) {
1451 QualType pointeeTy = ty->getPointeeType();
1452
1453 // Get address qualifier.
1454 addressQuals.push_back(
1455 llvm::ConstantAsMetadata::get(CGF->Builder.getInt32(
1456 ArgInfoAddressSpace(pointeeTy.getAddressSpace()))));
1457
1458 // Get argument type name.
1459 std::string typeName =
1460 pointeeTy.getUnqualifiedType().getAsString(Policy) + "*";
1461
1462 // Turn "unsigned type" to "utype"
1463 std::string::size_type pos = typeName.find("unsigned");
1464 if (pointeeTy.isCanonical() && pos != std::string::npos)
1465 typeName.erase(pos + 1, 8);
1466
1467 argTypeNames.push_back(llvm::MDString::get(VMContext, typeName));
1468
1469 std::string baseTypeName =
1470 pointeeTy.getUnqualifiedType().getCanonicalType().getAsString(
1471 Policy) +
1472 "*";
1473
1474 // Turn "unsigned type" to "utype"
1475 pos = baseTypeName.find("unsigned");
1476 if (pos != std::string::npos)
1477 baseTypeName.erase(pos + 1, 8);
1478
1479 argBaseTypeNames.push_back(
1480 llvm::MDString::get(VMContext, baseTypeName));
1481
1482 // Get argument type qualifiers:
1483 if (ty.isRestrictQualified())
1484 typeQuals = "restrict";
1485 if (pointeeTy.isConstQualified() ||
1486 (pointeeTy.getAddressSpace() == LangAS::opencl_constant))
1487 typeQuals += typeQuals.empty() ? "const" : " const";
1488 if (pointeeTy.isVolatileQualified())
1489 typeQuals += typeQuals.empty() ? "volatile" : " volatile";
1490 } else {
1491 uint32_t AddrSpc = 0;
1492 bool isPipe = ty->isPipeType();
1493 if (ty->isImageType() || isPipe)
1494 AddrSpc = ArgInfoAddressSpace(LangAS::opencl_global);
1495
1496 addressQuals.push_back(
1497 llvm::ConstantAsMetadata::get(CGF->Builder.getInt32(AddrSpc)));
1498
1499 // Get argument type name.
1500 std::string typeName;
1501 if (isPipe)
1502 typeName = ty.getCanonicalType()
1503 ->castAs<PipeType>()
1504 ->getElementType()
1505 .getAsString(Policy);
1506 else
1507 typeName = ty.getUnqualifiedType().getAsString(Policy);
1508
1509 // Turn "unsigned type" to "utype"
1510 std::string::size_type pos = typeName.find("unsigned");
1511 if (ty.isCanonical() && pos != std::string::npos)
1512 typeName.erase(pos + 1, 8);
1513
1514 std::string baseTypeName;
1515 if (isPipe)
1516 baseTypeName = ty.getCanonicalType()
1517 ->castAs<PipeType>()
1518 ->getElementType()
1519 .getCanonicalType()
1520 .getAsString(Policy);
1521 else
1522 baseTypeName =
1523 ty.getUnqualifiedType().getCanonicalType().getAsString(Policy);
1524
1525 // Remove access qualifiers on images
1526 // (as they are inseparable from type in clang implementation,
1527 // but OpenCL spec provides a special query to get access qualifier
1528 // via clGetKernelArgInfo with CL_KERNEL_ARG_ACCESS_QUALIFIER):
1529 if (ty->isImageType()) {
1530 removeImageAccessQualifier(typeName);
1531 removeImageAccessQualifier(baseTypeName);
1532 }
1533
1534 argTypeNames.push_back(llvm::MDString::get(VMContext, typeName));
1535
1536 // Turn "unsigned type" to "utype"
1537 pos = baseTypeName.find("unsigned");
1538 if (pos != std::string::npos)
1539 baseTypeName.erase(pos + 1, 8);
1540
1541 argBaseTypeNames.push_back(
1542 llvm::MDString::get(VMContext, baseTypeName));
1543
1544 if (isPipe)
1545 typeQuals = "pipe";
1546 }
1547
1548 argTypeQuals.push_back(llvm::MDString::get(VMContext, typeQuals));
1549
1550 // Get image and pipe access qualifier:
1551 if (ty->isImageType() || ty->isPipeType()) {
1552 const Decl *PDecl = parm;
1553 if (auto *TD = dyn_cast<TypedefType>(ty))
1554 PDecl = TD->getDecl();
1555 const OpenCLAccessAttr *A = PDecl->getAttr<OpenCLAccessAttr>();
1556 if (A && A->isWriteOnly())
1557 accessQuals.push_back(llvm::MDString::get(VMContext, "write_only"));
1558 else if (A && A->isReadWrite())
1559 accessQuals.push_back(llvm::MDString::get(VMContext, "read_write"));
1560 else
1561 accessQuals.push_back(llvm::MDString::get(VMContext, "read_only"));
1562 } else
1563 accessQuals.push_back(llvm::MDString::get(VMContext, "none"));
1564
1565 // Get argument name.
1566 argNames.push_back(llvm::MDString::get(VMContext, parm->getName()));
1567 }
1568
1569 Fn->setMetadata("kernel_arg_addr_space",
1570 llvm::MDNode::get(VMContext, addressQuals));
1571 Fn->setMetadata("kernel_arg_access_qual",
1572 llvm::MDNode::get(VMContext, accessQuals));
1573 Fn->setMetadata("kernel_arg_type",
1574 llvm::MDNode::get(VMContext, argTypeNames));
1575 Fn->setMetadata("kernel_arg_base_type",
1576 llvm::MDNode::get(VMContext, argBaseTypeNames));
1577 Fn->setMetadata("kernel_arg_type_qual",
1578 llvm::MDNode::get(VMContext, argTypeQuals));
1579 if (getCodeGenOpts().EmitOpenCLArgMetadata)
1580 Fn->setMetadata("kernel_arg_name",
1581 llvm::MDNode::get(VMContext, argNames));
1582 }
1583
1584 /// Determines whether the language options require us to model
1585 /// unwind exceptions. We treat -fexceptions as mandating this
1586 /// except under the fragile ObjC ABI with only ObjC exceptions
1587 /// enabled. This means, for example, that C with -fexceptions
1588 /// enables this.
hasUnwindExceptions(const LangOptions & LangOpts)1589 static bool hasUnwindExceptions(const LangOptions &LangOpts) {
1590 // If exceptions are completely disabled, obviously this is false.
1591 if (!LangOpts.Exceptions) return false;
1592
1593 // If C++ exceptions are enabled, this is true.
1594 if (LangOpts.CXXExceptions) return true;
1595
1596 // If ObjC exceptions are enabled, this depends on the ABI.
1597 if (LangOpts.ObjCExceptions) {
1598 return LangOpts.ObjCRuntime.hasUnwindExceptions();
1599 }
1600
1601 return true;
1602 }
1603
requiresMemberFunctionPointerTypeMetadata(CodeGenModule & CGM,const CXXMethodDecl * MD)1604 static bool requiresMemberFunctionPointerTypeMetadata(CodeGenModule &CGM,
1605 const CXXMethodDecl *MD) {
1606 // Check that the type metadata can ever actually be used by a call.
1607 if (!CGM.getCodeGenOpts().LTOUnit ||
1608 !CGM.HasHiddenLTOVisibility(MD->getParent()))
1609 return false;
1610
1611 // Only functions whose address can be taken with a member function pointer
1612 // need this sort of type metadata.
1613 return !MD->isStatic() && !MD->isVirtual() && !isa<CXXConstructorDecl>(MD) &&
1614 !isa<CXXDestructorDecl>(MD);
1615 }
1616
1617 std::vector<const CXXRecordDecl *>
getMostBaseClasses(const CXXRecordDecl * RD)1618 CodeGenModule::getMostBaseClasses(const CXXRecordDecl *RD) {
1619 llvm::SetVector<const CXXRecordDecl *> MostBases;
1620
1621 std::function<void (const CXXRecordDecl *)> CollectMostBases;
1622 CollectMostBases = [&](const CXXRecordDecl *RD) {
1623 if (RD->getNumBases() == 0)
1624 MostBases.insert(RD);
1625 for (const CXXBaseSpecifier &B : RD->bases())
1626 CollectMostBases(B.getType()->getAsCXXRecordDecl());
1627 };
1628 CollectMostBases(RD);
1629 return MostBases.takeVector();
1630 }
1631
SetLLVMFunctionAttributesForDefinition(const Decl * D,llvm::Function * F)1632 void CodeGenModule::SetLLVMFunctionAttributesForDefinition(const Decl *D,
1633 llvm::Function *F) {
1634 llvm::AttrBuilder B;
1635
1636 if (CodeGenOpts.UnwindTables)
1637 B.addAttribute(llvm::Attribute::UWTable);
1638
1639 if (CodeGenOpts.StackClashProtector)
1640 B.addAttribute("probe-stack", "inline-asm");
1641
1642 if (!hasUnwindExceptions(LangOpts))
1643 B.addAttribute(llvm::Attribute::NoUnwind);
1644
1645 if (D && D->hasAttr<NoStackProtectorAttr>())
1646 B.addAttribute(llvm::Attribute::NoStackProtect);
1647 else if (LangOpts.getStackProtector() == LangOptions::SSPOn)
1648 B.addAttribute(llvm::Attribute::StackProtect);
1649 else if (LangOpts.getStackProtector() == LangOptions::SSPStrong)
1650 B.addAttribute(llvm::Attribute::StackProtectStrong);
1651 else if (LangOpts.getStackProtector() == LangOptions::SSPReq)
1652 B.addAttribute(llvm::Attribute::StackProtectReq);
1653
1654 if (!D) {
1655 // If we don't have a declaration to control inlining, the function isn't
1656 // explicitly marked as alwaysinline for semantic reasons, and inlining is
1657 // disabled, mark the function as noinline.
1658 if (!F->hasFnAttribute(llvm::Attribute::AlwaysInline) &&
1659 CodeGenOpts.getInlining() == CodeGenOptions::OnlyAlwaysInlining)
1660 B.addAttribute(llvm::Attribute::NoInline);
1661
1662 F->addAttributes(llvm::AttributeList::FunctionIndex, B);
1663 return;
1664 }
1665
1666 // Track whether we need to add the optnone LLVM attribute,
1667 // starting with the default for this optimization level.
1668 bool ShouldAddOptNone =
1669 !CodeGenOpts.DisableO0ImplyOptNone && CodeGenOpts.OptimizationLevel == 0;
1670 // We can't add optnone in the following cases, it won't pass the verifier.
1671 ShouldAddOptNone &= !D->hasAttr<MinSizeAttr>();
1672 ShouldAddOptNone &= !D->hasAttr<AlwaysInlineAttr>();
1673
1674 // Add optnone, but do so only if the function isn't always_inline.
1675 if ((ShouldAddOptNone || D->hasAttr<OptimizeNoneAttr>()) &&
1676 !F->hasFnAttribute(llvm::Attribute::AlwaysInline)) {
1677 B.addAttribute(llvm::Attribute::OptimizeNone);
1678
1679 // OptimizeNone implies noinline; we should not be inlining such functions.
1680 B.addAttribute(llvm::Attribute::NoInline);
1681
1682 // We still need to handle naked functions even though optnone subsumes
1683 // much of their semantics.
1684 if (D->hasAttr<NakedAttr>())
1685 B.addAttribute(llvm::Attribute::Naked);
1686
1687 // OptimizeNone wins over OptimizeForSize and MinSize.
1688 F->removeFnAttr(llvm::Attribute::OptimizeForSize);
1689 F->removeFnAttr(llvm::Attribute::MinSize);
1690 } else if (D->hasAttr<NakedAttr>()) {
1691 // Naked implies noinline: we should not be inlining such functions.
1692 B.addAttribute(llvm::Attribute::Naked);
1693 B.addAttribute(llvm::Attribute::NoInline);
1694 } else if (D->hasAttr<NoDuplicateAttr>()) {
1695 B.addAttribute(llvm::Attribute::NoDuplicate);
1696 } else if (D->hasAttr<NoInlineAttr>() && !F->hasFnAttribute(llvm::Attribute::AlwaysInline)) {
1697 // Add noinline if the function isn't always_inline.
1698 B.addAttribute(llvm::Attribute::NoInline);
1699 } else if (D->hasAttr<AlwaysInlineAttr>() &&
1700 !F->hasFnAttribute(llvm::Attribute::NoInline)) {
1701 // (noinline wins over always_inline, and we can't specify both in IR)
1702 B.addAttribute(llvm::Attribute::AlwaysInline);
1703 } else if (CodeGenOpts.getInlining() == CodeGenOptions::OnlyAlwaysInlining) {
1704 // If we're not inlining, then force everything that isn't always_inline to
1705 // carry an explicit noinline attribute.
1706 if (!F->hasFnAttribute(llvm::Attribute::AlwaysInline))
1707 B.addAttribute(llvm::Attribute::NoInline);
1708 } else {
1709 // Otherwise, propagate the inline hint attribute and potentially use its
1710 // absence to mark things as noinline.
1711 if (auto *FD = dyn_cast<FunctionDecl>(D)) {
1712 // Search function and template pattern redeclarations for inline.
1713 auto CheckForInline = [](const FunctionDecl *FD) {
1714 auto CheckRedeclForInline = [](const FunctionDecl *Redecl) {
1715 return Redecl->isInlineSpecified();
1716 };
1717 if (any_of(FD->redecls(), CheckRedeclForInline))
1718 return true;
1719 const FunctionDecl *Pattern = FD->getTemplateInstantiationPattern();
1720 if (!Pattern)
1721 return false;
1722 return any_of(Pattern->redecls(), CheckRedeclForInline);
1723 };
1724 if (CheckForInline(FD)) {
1725 B.addAttribute(llvm::Attribute::InlineHint);
1726 } else if (CodeGenOpts.getInlining() ==
1727 CodeGenOptions::OnlyHintInlining &&
1728 !FD->isInlined() &&
1729 !F->hasFnAttribute(llvm::Attribute::AlwaysInline)) {
1730 B.addAttribute(llvm::Attribute::NoInline);
1731 }
1732 }
1733 }
1734
1735 // Add other optimization related attributes if we are optimizing this
1736 // function.
1737 if (!D->hasAttr<OptimizeNoneAttr>()) {
1738 if (D->hasAttr<ColdAttr>()) {
1739 if (!ShouldAddOptNone)
1740 B.addAttribute(llvm::Attribute::OptimizeForSize);
1741 B.addAttribute(llvm::Attribute::Cold);
1742 }
1743
1744 if (D->hasAttr<MinSizeAttr>())
1745 B.addAttribute(llvm::Attribute::MinSize);
1746 }
1747
1748 F->addAttributes(llvm::AttributeList::FunctionIndex, B);
1749
1750 unsigned alignment = D->getMaxAlignment() / Context.getCharWidth();
1751 if (alignment)
1752 F->setAlignment(llvm::Align(alignment));
1753
1754 if (!D->hasAttr<AlignedAttr>())
1755 if (LangOpts.FunctionAlignment)
1756 F->setAlignment(llvm::Align(1ull << LangOpts.FunctionAlignment));
1757
1758 // Some C++ ABIs require 2-byte alignment for member functions, in order to
1759 // reserve a bit for differentiating between virtual and non-virtual member
1760 // functions. If the current target's C++ ABI requires this and this is a
1761 // member function, set its alignment accordingly.
1762 if (getTarget().getCXXABI().areMemberFunctionsAligned()) {
1763 if (F->getAlignment() < 2 && isa<CXXMethodDecl>(D))
1764 F->setAlignment(llvm::Align(2));
1765 }
1766
1767 // In the cross-dso CFI mode with canonical jump tables, we want !type
1768 // attributes on definitions only.
1769 if (CodeGenOpts.SanitizeCfiCrossDso &&
1770 CodeGenOpts.SanitizeCfiCanonicalJumpTables) {
1771 if (auto *FD = dyn_cast<FunctionDecl>(D)) {
1772 // Skip available_externally functions. They won't be codegen'ed in the
1773 // current module anyway.
1774 if (getContext().GetGVALinkageForFunction(FD) != GVA_AvailableExternally)
1775 CreateFunctionTypeMetadataForIcall(FD, F);
1776 }
1777 }
1778
1779 // Emit type metadata on member functions for member function pointer checks.
1780 // These are only ever necessary on definitions; we're guaranteed that the
1781 // definition will be present in the LTO unit as a result of LTO visibility.
1782 auto *MD = dyn_cast<CXXMethodDecl>(D);
1783 if (MD && requiresMemberFunctionPointerTypeMetadata(*this, MD)) {
1784 for (const CXXRecordDecl *Base : getMostBaseClasses(MD->getParent())) {
1785 llvm::Metadata *Id =
1786 CreateMetadataIdentifierForType(Context.getMemberPointerType(
1787 MD->getType(), Context.getRecordType(Base).getTypePtr()));
1788 F->addTypeMetadata(0, Id);
1789 }
1790 }
1791 }
1792
setLLVMFunctionFEnvAttributes(const FunctionDecl * D,llvm::Function * F)1793 void CodeGenModule::setLLVMFunctionFEnvAttributes(const FunctionDecl *D,
1794 llvm::Function *F) {
1795 if (D->hasAttr<StrictFPAttr>()) {
1796 llvm::AttrBuilder FuncAttrs;
1797 FuncAttrs.addAttribute("strictfp");
1798 F->addAttributes(llvm::AttributeList::FunctionIndex, FuncAttrs);
1799 }
1800 }
1801
SetCommonAttributes(GlobalDecl GD,llvm::GlobalValue * GV)1802 void CodeGenModule::SetCommonAttributes(GlobalDecl GD, llvm::GlobalValue *GV) {
1803 const Decl *D = GD.getDecl();
1804 if (dyn_cast_or_null<NamedDecl>(D))
1805 setGVProperties(GV, GD);
1806 else
1807 GV->setVisibility(llvm::GlobalValue::DefaultVisibility);
1808
1809 if (D && D->hasAttr<UsedAttr>())
1810 addUsedGlobal(GV);
1811
1812 if (CodeGenOpts.KeepStaticConsts && D && isa<VarDecl>(D)) {
1813 const auto *VD = cast<VarDecl>(D);
1814 if (VD->getType().isConstQualified() &&
1815 VD->getStorageDuration() == SD_Static)
1816 addUsedGlobal(GV);
1817 }
1818 }
1819
GetCPUAndFeaturesAttributes(GlobalDecl GD,llvm::AttrBuilder & Attrs)1820 bool CodeGenModule::GetCPUAndFeaturesAttributes(GlobalDecl GD,
1821 llvm::AttrBuilder &Attrs) {
1822 // Add target-cpu and target-features attributes to functions. If
1823 // we have a decl for the function and it has a target attribute then
1824 // parse that and add it to the feature set.
1825 StringRef TargetCPU = getTarget().getTargetOpts().CPU;
1826 StringRef TuneCPU = getTarget().getTargetOpts().TuneCPU;
1827 std::vector<std::string> Features;
1828 const auto *FD = dyn_cast_or_null<FunctionDecl>(GD.getDecl());
1829 FD = FD ? FD->getMostRecentDecl() : FD;
1830 const auto *TD = FD ? FD->getAttr<TargetAttr>() : nullptr;
1831 const auto *SD = FD ? FD->getAttr<CPUSpecificAttr>() : nullptr;
1832 bool AddedAttr = false;
1833 if (TD || SD) {
1834 llvm::StringMap<bool> FeatureMap;
1835 getContext().getFunctionFeatureMap(FeatureMap, GD);
1836
1837 // Produce the canonical string for this set of features.
1838 for (const llvm::StringMap<bool>::value_type &Entry : FeatureMap)
1839 Features.push_back((Entry.getValue() ? "+" : "-") + Entry.getKey().str());
1840
1841 // Now add the target-cpu and target-features to the function.
1842 // While we populated the feature map above, we still need to
1843 // get and parse the target attribute so we can get the cpu for
1844 // the function.
1845 if (TD) {
1846 ParsedTargetAttr ParsedAttr = TD->parse();
1847 if (!ParsedAttr.Architecture.empty() &&
1848 getTarget().isValidCPUName(ParsedAttr.Architecture)) {
1849 TargetCPU = ParsedAttr.Architecture;
1850 TuneCPU = ""; // Clear the tune CPU.
1851 }
1852 if (!ParsedAttr.Tune.empty() &&
1853 getTarget().isValidCPUName(ParsedAttr.Tune))
1854 TuneCPU = ParsedAttr.Tune;
1855 }
1856 } else {
1857 // Otherwise just add the existing target cpu and target features to the
1858 // function.
1859 Features = getTarget().getTargetOpts().Features;
1860 }
1861
1862 if (!TargetCPU.empty()) {
1863 Attrs.addAttribute("target-cpu", TargetCPU);
1864 AddedAttr = true;
1865 }
1866 if (!TuneCPU.empty()) {
1867 Attrs.addAttribute("tune-cpu", TuneCPU);
1868 AddedAttr = true;
1869 }
1870 if (!Features.empty()) {
1871 llvm::sort(Features);
1872 Attrs.addAttribute("target-features", llvm::join(Features, ","));
1873 AddedAttr = true;
1874 }
1875
1876 return AddedAttr;
1877 }
1878
setNonAliasAttributes(GlobalDecl GD,llvm::GlobalObject * GO)1879 void CodeGenModule::setNonAliasAttributes(GlobalDecl GD,
1880 llvm::GlobalObject *GO) {
1881 const Decl *D = GD.getDecl();
1882 SetCommonAttributes(GD, GO);
1883
1884 if (D) {
1885 if (auto *GV = dyn_cast<llvm::GlobalVariable>(GO)) {
1886 if (auto *SA = D->getAttr<PragmaClangBSSSectionAttr>())
1887 GV->addAttribute("bss-section", SA->getName());
1888 if (auto *SA = D->getAttr<PragmaClangDataSectionAttr>())
1889 GV->addAttribute("data-section", SA->getName());
1890 if (auto *SA = D->getAttr<PragmaClangRodataSectionAttr>())
1891 GV->addAttribute("rodata-section", SA->getName());
1892 if (auto *SA = D->getAttr<PragmaClangRelroSectionAttr>())
1893 GV->addAttribute("relro-section", SA->getName());
1894 }
1895
1896 if (auto *F = dyn_cast<llvm::Function>(GO)) {
1897 if (auto *SA = D->getAttr<PragmaClangTextSectionAttr>())
1898 if (!D->getAttr<SectionAttr>())
1899 F->addFnAttr("implicit-section-name", SA->getName());
1900
1901 llvm::AttrBuilder Attrs;
1902 if (GetCPUAndFeaturesAttributes(GD, Attrs)) {
1903 // We know that GetCPUAndFeaturesAttributes will always have the
1904 // newest set, since it has the newest possible FunctionDecl, so the
1905 // new ones should replace the old.
1906 llvm::AttrBuilder RemoveAttrs;
1907 RemoveAttrs.addAttribute("target-cpu");
1908 RemoveAttrs.addAttribute("target-features");
1909 RemoveAttrs.addAttribute("tune-cpu");
1910 F->removeAttributes(llvm::AttributeList::FunctionIndex, RemoveAttrs);
1911 F->addAttributes(llvm::AttributeList::FunctionIndex, Attrs);
1912 }
1913 }
1914
1915 if (const auto *CSA = D->getAttr<CodeSegAttr>())
1916 GO->setSection(CSA->getName());
1917 else if (const auto *SA = D->getAttr<SectionAttr>())
1918 GO->setSection(SA->getName());
1919 }
1920
1921 getTargetCodeGenInfo().setTargetAttributes(D, GO, *this);
1922 }
1923
SetInternalFunctionAttributes(GlobalDecl GD,llvm::Function * F,const CGFunctionInfo & FI)1924 void CodeGenModule::SetInternalFunctionAttributes(GlobalDecl GD,
1925 llvm::Function *F,
1926 const CGFunctionInfo &FI) {
1927 const Decl *D = GD.getDecl();
1928 SetLLVMFunctionAttributes(GD, FI, F);
1929 SetLLVMFunctionAttributesForDefinition(D, F);
1930
1931 F->setLinkage(llvm::Function::InternalLinkage);
1932
1933 setNonAliasAttributes(GD, F);
1934 }
1935
setLinkageForGV(llvm::GlobalValue * GV,const NamedDecl * ND)1936 static void setLinkageForGV(llvm::GlobalValue *GV, const NamedDecl *ND) {
1937 // Set linkage and visibility in case we never see a definition.
1938 LinkageInfo LV = ND->getLinkageAndVisibility();
1939 // Don't set internal linkage on declarations.
1940 // "extern_weak" is overloaded in LLVM; we probably should have
1941 // separate linkage types for this.
1942 if (isExternallyVisible(LV.getLinkage()) &&
1943 (ND->hasAttr<WeakAttr>() || ND->isWeakImported()))
1944 GV->setLinkage(llvm::GlobalValue::ExternalWeakLinkage);
1945 }
1946
CreateFunctionTypeMetadataForIcall(const FunctionDecl * FD,llvm::Function * F)1947 void CodeGenModule::CreateFunctionTypeMetadataForIcall(const FunctionDecl *FD,
1948 llvm::Function *F) {
1949 // Only if we are checking indirect calls.
1950 if (!LangOpts.Sanitize.has(SanitizerKind::CFIICall))
1951 return;
1952
1953 // Non-static class methods are handled via vtable or member function pointer
1954 // checks elsewhere.
1955 if (isa<CXXMethodDecl>(FD) && !cast<CXXMethodDecl>(FD)->isStatic())
1956 return;
1957
1958 llvm::Metadata *MD = CreateMetadataIdentifierForType(FD->getType());
1959 F->addTypeMetadata(0, MD);
1960 F->addTypeMetadata(0, CreateMetadataIdentifierGeneralized(FD->getType()));
1961
1962 // Emit a hash-based bit set entry for cross-DSO calls.
1963 if (CodeGenOpts.SanitizeCfiCrossDso)
1964 if (auto CrossDsoTypeId = CreateCrossDsoCfiTypeId(MD))
1965 F->addTypeMetadata(0, llvm::ConstantAsMetadata::get(CrossDsoTypeId));
1966 }
1967
SetFunctionAttributes(GlobalDecl GD,llvm::Function * F,bool IsIncompleteFunction,bool IsThunk)1968 void CodeGenModule::SetFunctionAttributes(GlobalDecl GD, llvm::Function *F,
1969 bool IsIncompleteFunction,
1970 bool IsThunk) {
1971
1972 if (llvm::Intrinsic::ID IID = F->getIntrinsicID()) {
1973 // If this is an intrinsic function, set the function's attributes
1974 // to the intrinsic's attributes.
1975 F->setAttributes(llvm::Intrinsic::getAttributes(getLLVMContext(), IID));
1976 return;
1977 }
1978
1979 const auto *FD = cast<FunctionDecl>(GD.getDecl());
1980
1981 if (!IsIncompleteFunction)
1982 SetLLVMFunctionAttributes(GD, getTypes().arrangeGlobalDeclaration(GD), F);
1983
1984 // Add the Returned attribute for "this", except for iOS 5 and earlier
1985 // where substantial code, including the libstdc++ dylib, was compiled with
1986 // GCC and does not actually return "this".
1987 if (!IsThunk && getCXXABI().HasThisReturn(GD) &&
1988 !(getTriple().isiOS() && getTriple().isOSVersionLT(6))) {
1989 assert(!F->arg_empty() &&
1990 F->arg_begin()->getType()
1991 ->canLosslesslyBitCastTo(F->getReturnType()) &&
1992 "unexpected this return");
1993 F->addAttribute(1, llvm::Attribute::Returned);
1994 }
1995
1996 // Only a few attributes are set on declarations; these may later be
1997 // overridden by a definition.
1998
1999 setLinkageForGV(F, FD);
2000 setGVProperties(F, FD);
2001
2002 // Setup target-specific attributes.
2003 if (!IsIncompleteFunction && F->isDeclaration())
2004 getTargetCodeGenInfo().setTargetAttributes(FD, F, *this);
2005
2006 if (const auto *CSA = FD->getAttr<CodeSegAttr>())
2007 F->setSection(CSA->getName());
2008 else if (const auto *SA = FD->getAttr<SectionAttr>())
2009 F->setSection(SA->getName());
2010
2011 // If we plan on emitting this inline builtin, we can't treat it as a builtin.
2012 if (FD->isInlineBuiltinDeclaration()) {
2013 const FunctionDecl *FDBody;
2014 bool HasBody = FD->hasBody(FDBody);
2015 (void)HasBody;
2016 assert(HasBody && "Inline builtin declarations should always have an "
2017 "available body!");
2018 if (shouldEmitFunction(FDBody))
2019 F->addAttribute(llvm::AttributeList::FunctionIndex,
2020 llvm::Attribute::NoBuiltin);
2021 }
2022
2023 if (FD->isReplaceableGlobalAllocationFunction()) {
2024 // A replaceable global allocation function does not act like a builtin by
2025 // default, only if it is invoked by a new-expression or delete-expression.
2026 F->addAttribute(llvm::AttributeList::FunctionIndex,
2027 llvm::Attribute::NoBuiltin);
2028 }
2029
2030 if (isa<CXXConstructorDecl>(FD) || isa<CXXDestructorDecl>(FD))
2031 F->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
2032 else if (const auto *MD = dyn_cast<CXXMethodDecl>(FD))
2033 if (MD->isVirtual())
2034 F->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
2035
2036 // Don't emit entries for function declarations in the cross-DSO mode. This
2037 // is handled with better precision by the receiving DSO. But if jump tables
2038 // are non-canonical then we need type metadata in order to produce the local
2039 // jump table.
2040 if (!CodeGenOpts.SanitizeCfiCrossDso ||
2041 !CodeGenOpts.SanitizeCfiCanonicalJumpTables)
2042 CreateFunctionTypeMetadataForIcall(FD, F);
2043
2044 if (getLangOpts().OpenMP && FD->hasAttr<OMPDeclareSimdDeclAttr>())
2045 getOpenMPRuntime().emitDeclareSimdFunction(FD, F);
2046
2047 if (const auto *CB = FD->getAttr<CallbackAttr>()) {
2048 // Annotate the callback behavior as metadata:
2049 // - The callback callee (as argument number).
2050 // - The callback payloads (as argument numbers).
2051 llvm::LLVMContext &Ctx = F->getContext();
2052 llvm::MDBuilder MDB(Ctx);
2053
2054 // The payload indices are all but the first one in the encoding. The first
2055 // identifies the callback callee.
2056 int CalleeIdx = *CB->encoding_begin();
2057 ArrayRef<int> PayloadIndices(CB->encoding_begin() + 1, CB->encoding_end());
2058 F->addMetadata(llvm::LLVMContext::MD_callback,
2059 *llvm::MDNode::get(Ctx, {MDB.createCallbackEncoding(
2060 CalleeIdx, PayloadIndices,
2061 /* VarArgsArePassed */ false)}));
2062 }
2063 }
2064
addUsedGlobal(llvm::GlobalValue * GV)2065 void CodeGenModule::addUsedGlobal(llvm::GlobalValue *GV) {
2066 assert((isa<llvm::Function>(GV) || !GV->isDeclaration()) &&
2067 "Only globals with definition can force usage.");
2068 LLVMUsed.emplace_back(GV);
2069 }
2070
addCompilerUsedGlobal(llvm::GlobalValue * GV)2071 void CodeGenModule::addCompilerUsedGlobal(llvm::GlobalValue *GV) {
2072 assert(!GV->isDeclaration() &&
2073 "Only globals with definition can force usage.");
2074 LLVMCompilerUsed.emplace_back(GV);
2075 }
2076
emitUsed(CodeGenModule & CGM,StringRef Name,std::vector<llvm::WeakTrackingVH> & List)2077 static void emitUsed(CodeGenModule &CGM, StringRef Name,
2078 std::vector<llvm::WeakTrackingVH> &List) {
2079 // Don't create llvm.used if there is no need.
2080 if (List.empty())
2081 return;
2082
2083 // Convert List to what ConstantArray needs.
2084 SmallVector<llvm::Constant*, 8> UsedArray;
2085 UsedArray.resize(List.size());
2086 for (unsigned i = 0, e = List.size(); i != e; ++i) {
2087 UsedArray[i] =
2088 llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(
2089 cast<llvm::Constant>(&*List[i]), CGM.Int8PtrTy);
2090 }
2091
2092 if (UsedArray.empty())
2093 return;
2094 llvm::ArrayType *ATy = llvm::ArrayType::get(CGM.Int8PtrTy, UsedArray.size());
2095
2096 auto *GV = new llvm::GlobalVariable(
2097 CGM.getModule(), ATy, false, llvm::GlobalValue::AppendingLinkage,
2098 llvm::ConstantArray::get(ATy, UsedArray), Name);
2099
2100 GV->setSection("llvm.metadata");
2101 }
2102
emitLLVMUsed()2103 void CodeGenModule::emitLLVMUsed() {
2104 emitUsed(*this, "llvm.used", LLVMUsed);
2105 emitUsed(*this, "llvm.compiler.used", LLVMCompilerUsed);
2106 }
2107
AppendLinkerOptions(StringRef Opts)2108 void CodeGenModule::AppendLinkerOptions(StringRef Opts) {
2109 auto *MDOpts = llvm::MDString::get(getLLVMContext(), Opts);
2110 LinkerOptionsMetadata.push_back(llvm::MDNode::get(getLLVMContext(), MDOpts));
2111 }
2112
AddDetectMismatch(StringRef Name,StringRef Value)2113 void CodeGenModule::AddDetectMismatch(StringRef Name, StringRef Value) {
2114 llvm::SmallString<32> Opt;
2115 getTargetCodeGenInfo().getDetectMismatchOption(Name, Value, Opt);
2116 if (Opt.empty())
2117 return;
2118 auto *MDOpts = llvm::MDString::get(getLLVMContext(), Opt);
2119 LinkerOptionsMetadata.push_back(llvm::MDNode::get(getLLVMContext(), MDOpts));
2120 }
2121
AddDependentLib(StringRef Lib)2122 void CodeGenModule::AddDependentLib(StringRef Lib) {
2123 auto &C = getLLVMContext();
2124 if (getTarget().getTriple().isOSBinFormatELF()) {
2125 ELFDependentLibraries.push_back(
2126 llvm::MDNode::get(C, llvm::MDString::get(C, Lib)));
2127 return;
2128 }
2129
2130 llvm::SmallString<24> Opt;
2131 getTargetCodeGenInfo().getDependentLibraryOption(Lib, Opt);
2132 auto *MDOpts = llvm::MDString::get(getLLVMContext(), Opt);
2133 LinkerOptionsMetadata.push_back(llvm::MDNode::get(C, MDOpts));
2134 }
2135
2136 /// Add link options implied by the given module, including modules
2137 /// it depends on, using a postorder walk.
addLinkOptionsPostorder(CodeGenModule & CGM,Module * Mod,SmallVectorImpl<llvm::MDNode * > & Metadata,llvm::SmallPtrSet<Module *,16> & Visited)2138 static void addLinkOptionsPostorder(CodeGenModule &CGM, Module *Mod,
2139 SmallVectorImpl<llvm::MDNode *> &Metadata,
2140 llvm::SmallPtrSet<Module *, 16> &Visited) {
2141 // Import this module's parent.
2142 if (Mod->Parent && Visited.insert(Mod->Parent).second) {
2143 addLinkOptionsPostorder(CGM, Mod->Parent, Metadata, Visited);
2144 }
2145
2146 // Import this module's dependencies.
2147 for (unsigned I = Mod->Imports.size(); I > 0; --I) {
2148 if (Visited.insert(Mod->Imports[I - 1]).second)
2149 addLinkOptionsPostorder(CGM, Mod->Imports[I-1], Metadata, Visited);
2150 }
2151
2152 // Add linker options to link against the libraries/frameworks
2153 // described by this module.
2154 llvm::LLVMContext &Context = CGM.getLLVMContext();
2155 bool IsELF = CGM.getTarget().getTriple().isOSBinFormatELF();
2156
2157 // For modules that use export_as for linking, use that module
2158 // name instead.
2159 if (Mod->UseExportAsModuleLinkName)
2160 return;
2161
2162 for (unsigned I = Mod->LinkLibraries.size(); I > 0; --I) {
2163 // Link against a framework. Frameworks are currently Darwin only, so we
2164 // don't to ask TargetCodeGenInfo for the spelling of the linker option.
2165 if (Mod->LinkLibraries[I-1].IsFramework) {
2166 llvm::Metadata *Args[2] = {
2167 llvm::MDString::get(Context, "-framework"),
2168 llvm::MDString::get(Context, Mod->LinkLibraries[I - 1].Library)};
2169
2170 Metadata.push_back(llvm::MDNode::get(Context, Args));
2171 continue;
2172 }
2173
2174 // Link against a library.
2175 if (IsELF) {
2176 llvm::Metadata *Args[2] = {
2177 llvm::MDString::get(Context, "lib"),
2178 llvm::MDString::get(Context, Mod->LinkLibraries[I - 1].Library),
2179 };
2180 Metadata.push_back(llvm::MDNode::get(Context, Args));
2181 } else {
2182 llvm::SmallString<24> Opt;
2183 CGM.getTargetCodeGenInfo().getDependentLibraryOption(
2184 Mod->LinkLibraries[I - 1].Library, Opt);
2185 auto *OptString = llvm::MDString::get(Context, Opt);
2186 Metadata.push_back(llvm::MDNode::get(Context, OptString));
2187 }
2188 }
2189 }
2190
EmitModuleLinkOptions()2191 void CodeGenModule::EmitModuleLinkOptions() {
2192 // Collect the set of all of the modules we want to visit to emit link
2193 // options, which is essentially the imported modules and all of their
2194 // non-explicit child modules.
2195 llvm::SetVector<clang::Module *> LinkModules;
2196 llvm::SmallPtrSet<clang::Module *, 16> Visited;
2197 SmallVector<clang::Module *, 16> Stack;
2198
2199 // Seed the stack with imported modules.
2200 for (Module *M : ImportedModules) {
2201 // Do not add any link flags when an implementation TU of a module imports
2202 // a header of that same module.
2203 if (M->getTopLevelModuleName() == getLangOpts().CurrentModule &&
2204 !getLangOpts().isCompilingModule())
2205 continue;
2206 if (Visited.insert(M).second)
2207 Stack.push_back(M);
2208 }
2209
2210 // Find all of the modules to import, making a little effort to prune
2211 // non-leaf modules.
2212 while (!Stack.empty()) {
2213 clang::Module *Mod = Stack.pop_back_val();
2214
2215 bool AnyChildren = false;
2216
2217 // Visit the submodules of this module.
2218 for (const auto &SM : Mod->submodules()) {
2219 // Skip explicit children; they need to be explicitly imported to be
2220 // linked against.
2221 if (SM->IsExplicit)
2222 continue;
2223
2224 if (Visited.insert(SM).second) {
2225 Stack.push_back(SM);
2226 AnyChildren = true;
2227 }
2228 }
2229
2230 // We didn't find any children, so add this module to the list of
2231 // modules to link against.
2232 if (!AnyChildren) {
2233 LinkModules.insert(Mod);
2234 }
2235 }
2236
2237 // Add link options for all of the imported modules in reverse topological
2238 // order. We don't do anything to try to order import link flags with respect
2239 // to linker options inserted by things like #pragma comment().
2240 SmallVector<llvm::MDNode *, 16> MetadataArgs;
2241 Visited.clear();
2242 for (Module *M : LinkModules)
2243 if (Visited.insert(M).second)
2244 addLinkOptionsPostorder(*this, M, MetadataArgs, Visited);
2245 std::reverse(MetadataArgs.begin(), MetadataArgs.end());
2246 LinkerOptionsMetadata.append(MetadataArgs.begin(), MetadataArgs.end());
2247
2248 // Add the linker options metadata flag.
2249 auto *NMD = getModule().getOrInsertNamedMetadata("llvm.linker.options");
2250 for (auto *MD : LinkerOptionsMetadata)
2251 NMD->addOperand(MD);
2252 }
2253
EmitDeferred()2254 void CodeGenModule::EmitDeferred() {
2255 // Emit deferred declare target declarations.
2256 if (getLangOpts().OpenMP && !getLangOpts().OpenMPSimd)
2257 getOpenMPRuntime().emitDeferredTargetDecls();
2258
2259 // Emit code for any potentially referenced deferred decls. Since a
2260 // previously unused static decl may become used during the generation of code
2261 // for a static function, iterate until no changes are made.
2262
2263 if (!DeferredVTables.empty()) {
2264 EmitDeferredVTables();
2265
2266 // Emitting a vtable doesn't directly cause more vtables to
2267 // become deferred, although it can cause functions to be
2268 // emitted that then need those vtables.
2269 assert(DeferredVTables.empty());
2270 }
2271
2272 // Emit CUDA/HIP static device variables referenced by host code only.
2273 if (getLangOpts().CUDA)
2274 for (auto V : getContext().CUDAStaticDeviceVarReferencedByHost)
2275 DeferredDeclsToEmit.push_back(V);
2276
2277 // Stop if we're out of both deferred vtables and deferred declarations.
2278 if (DeferredDeclsToEmit.empty())
2279 return;
2280
2281 // Grab the list of decls to emit. If EmitGlobalDefinition schedules more
2282 // work, it will not interfere with this.
2283 std::vector<GlobalDecl> CurDeclsToEmit;
2284 CurDeclsToEmit.swap(DeferredDeclsToEmit);
2285
2286 for (GlobalDecl &D : CurDeclsToEmit) {
2287 // We should call GetAddrOfGlobal with IsForDefinition set to true in order
2288 // to get GlobalValue with exactly the type we need, not something that
2289 // might had been created for another decl with the same mangled name but
2290 // different type.
2291 llvm::GlobalValue *GV = dyn_cast<llvm::GlobalValue>(
2292 GetAddrOfGlobal(D, ForDefinition));
2293
2294 // In case of different address spaces, we may still get a cast, even with
2295 // IsForDefinition equal to true. Query mangled names table to get
2296 // GlobalValue.
2297 if (!GV)
2298 GV = GetGlobalValue(getMangledName(D));
2299
2300 // Make sure GetGlobalValue returned non-null.
2301 assert(GV);
2302
2303 // Check to see if we've already emitted this. This is necessary
2304 // for a couple of reasons: first, decls can end up in the
2305 // deferred-decls queue multiple times, and second, decls can end
2306 // up with definitions in unusual ways (e.g. by an extern inline
2307 // function acquiring a strong function redefinition). Just
2308 // ignore these cases.
2309 if (!GV->isDeclaration())
2310 continue;
2311
2312 // If this is OpenMP, check if it is legal to emit this global normally.
2313 if (LangOpts.OpenMP && OpenMPRuntime && OpenMPRuntime->emitTargetGlobal(D))
2314 continue;
2315
2316 // Otherwise, emit the definition and move on to the next one.
2317 EmitGlobalDefinition(D, GV);
2318
2319 // If we found out that we need to emit more decls, do that recursively.
2320 // This has the advantage that the decls are emitted in a DFS and related
2321 // ones are close together, which is convenient for testing.
2322 if (!DeferredVTables.empty() || !DeferredDeclsToEmit.empty()) {
2323 EmitDeferred();
2324 assert(DeferredVTables.empty() && DeferredDeclsToEmit.empty());
2325 }
2326 }
2327 }
2328
EmitVTablesOpportunistically()2329 void CodeGenModule::EmitVTablesOpportunistically() {
2330 // Try to emit external vtables as available_externally if they have emitted
2331 // all inlined virtual functions. It runs after EmitDeferred() and therefore
2332 // is not allowed to create new references to things that need to be emitted
2333 // lazily. Note that it also uses fact that we eagerly emitting RTTI.
2334
2335 assert((OpportunisticVTables.empty() || shouldOpportunisticallyEmitVTables())
2336 && "Only emit opportunistic vtables with optimizations");
2337
2338 for (const CXXRecordDecl *RD : OpportunisticVTables) {
2339 assert(getVTables().isVTableExternal(RD) &&
2340 "This queue should only contain external vtables");
2341 if (getCXXABI().canSpeculativelyEmitVTable(RD))
2342 VTables.GenerateClassData(RD);
2343 }
2344 OpportunisticVTables.clear();
2345 }
2346
EmitGlobalAnnotations()2347 void CodeGenModule::EmitGlobalAnnotations() {
2348 if (Annotations.empty())
2349 return;
2350
2351 // Create a new global variable for the ConstantStruct in the Module.
2352 llvm::Constant *Array = llvm::ConstantArray::get(llvm::ArrayType::get(
2353 Annotations[0]->getType(), Annotations.size()), Annotations);
2354 auto *gv = new llvm::GlobalVariable(getModule(), Array->getType(), false,
2355 llvm::GlobalValue::AppendingLinkage,
2356 Array, "llvm.global.annotations");
2357 gv->setSection(AnnotationSection);
2358 }
2359
EmitAnnotationString(StringRef Str)2360 llvm::Constant *CodeGenModule::EmitAnnotationString(StringRef Str) {
2361 llvm::Constant *&AStr = AnnotationStrings[Str];
2362 if (AStr)
2363 return AStr;
2364
2365 // Not found yet, create a new global.
2366 llvm::Constant *s = llvm::ConstantDataArray::getString(getLLVMContext(), Str);
2367 auto *gv =
2368 new llvm::GlobalVariable(getModule(), s->getType(), true,
2369 llvm::GlobalValue::PrivateLinkage, s, ".str");
2370 gv->setSection(AnnotationSection);
2371 gv->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
2372 AStr = gv;
2373 return gv;
2374 }
2375
EmitAnnotationUnit(SourceLocation Loc)2376 llvm::Constant *CodeGenModule::EmitAnnotationUnit(SourceLocation Loc) {
2377 SourceManager &SM = getContext().getSourceManager();
2378 PresumedLoc PLoc = SM.getPresumedLoc(Loc);
2379 if (PLoc.isValid())
2380 return EmitAnnotationString(PLoc.getFilename());
2381 return EmitAnnotationString(SM.getBufferName(Loc));
2382 }
2383
EmitAnnotationLineNo(SourceLocation L)2384 llvm::Constant *CodeGenModule::EmitAnnotationLineNo(SourceLocation L) {
2385 SourceManager &SM = getContext().getSourceManager();
2386 PresumedLoc PLoc = SM.getPresumedLoc(L);
2387 unsigned LineNo = PLoc.isValid() ? PLoc.getLine() :
2388 SM.getExpansionLineNumber(L);
2389 return llvm::ConstantInt::get(Int32Ty, LineNo);
2390 }
2391
EmitAnnotationArgs(const AnnotateAttr * Attr)2392 llvm::Constant *CodeGenModule::EmitAnnotationArgs(const AnnotateAttr *Attr) {
2393 ArrayRef<Expr *> Exprs = {Attr->args_begin(), Attr->args_size()};
2394 if (Exprs.empty())
2395 return llvm::ConstantPointerNull::get(Int8PtrTy);
2396
2397 llvm::FoldingSetNodeID ID;
2398 for (Expr *E : Exprs) {
2399 ID.Add(cast<clang::ConstantExpr>(E)->getAPValueResult());
2400 }
2401 llvm::Constant *&Lookup = AnnotationArgs[ID.ComputeHash()];
2402 if (Lookup)
2403 return Lookup;
2404
2405 llvm::SmallVector<llvm::Constant *, 4> LLVMArgs;
2406 LLVMArgs.reserve(Exprs.size());
2407 ConstantEmitter ConstEmiter(*this);
2408 llvm::transform(Exprs, std::back_inserter(LLVMArgs), [&](const Expr *E) {
2409 const auto *CE = cast<clang::ConstantExpr>(E);
2410 return ConstEmiter.emitAbstract(CE->getBeginLoc(), CE->getAPValueResult(),
2411 CE->getType());
2412 });
2413 auto *Struct = llvm::ConstantStruct::getAnon(LLVMArgs);
2414 auto *GV = new llvm::GlobalVariable(getModule(), Struct->getType(), true,
2415 llvm::GlobalValue::PrivateLinkage, Struct,
2416 ".args");
2417 GV->setSection(AnnotationSection);
2418 GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
2419 auto *Bitcasted = llvm::ConstantExpr::getBitCast(GV, Int8PtrTy);
2420
2421 Lookup = Bitcasted;
2422 return Bitcasted;
2423 }
2424
EmitAnnotateAttr(llvm::GlobalValue * GV,const AnnotateAttr * AA,SourceLocation L)2425 llvm::Constant *CodeGenModule::EmitAnnotateAttr(llvm::GlobalValue *GV,
2426 const AnnotateAttr *AA,
2427 SourceLocation L) {
2428 // Get the globals for file name, annotation, and the line number.
2429 llvm::Constant *AnnoGV = EmitAnnotationString(AA->getAnnotation()),
2430 *UnitGV = EmitAnnotationUnit(L),
2431 *LineNoCst = EmitAnnotationLineNo(L),
2432 *Args = EmitAnnotationArgs(AA);
2433
2434 llvm::Constant *ASZeroGV = GV;
2435 if (GV->getAddressSpace() != 0) {
2436 ASZeroGV = llvm::ConstantExpr::getAddrSpaceCast(
2437 GV, GV->getValueType()->getPointerTo(0));
2438 }
2439
2440 // Create the ConstantStruct for the global annotation.
2441 llvm::Constant *Fields[] = {
2442 llvm::ConstantExpr::getBitCast(ASZeroGV, Int8PtrTy),
2443 llvm::ConstantExpr::getBitCast(AnnoGV, Int8PtrTy),
2444 llvm::ConstantExpr::getBitCast(UnitGV, Int8PtrTy),
2445 LineNoCst,
2446 Args,
2447 };
2448 return llvm::ConstantStruct::getAnon(Fields);
2449 }
2450
AddGlobalAnnotations(const ValueDecl * D,llvm::GlobalValue * GV)2451 void CodeGenModule::AddGlobalAnnotations(const ValueDecl *D,
2452 llvm::GlobalValue *GV) {
2453 assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute");
2454 // Get the struct elements for these annotations.
2455 for (const auto *I : D->specific_attrs<AnnotateAttr>())
2456 Annotations.push_back(EmitAnnotateAttr(GV, I, D->getLocation()));
2457 }
2458
isInSanitizerBlacklist(SanitizerMask Kind,llvm::Function * Fn,SourceLocation Loc) const2459 bool CodeGenModule::isInSanitizerBlacklist(SanitizerMask Kind,
2460 llvm::Function *Fn,
2461 SourceLocation Loc) const {
2462 const auto &SanitizerBL = getContext().getSanitizerBlacklist();
2463 // Blacklist by function name.
2464 if (SanitizerBL.isBlacklistedFunction(Kind, Fn->getName()))
2465 return true;
2466 // Blacklist by location.
2467 if (Loc.isValid())
2468 return SanitizerBL.isBlacklistedLocation(Kind, Loc);
2469 // If location is unknown, this may be a compiler-generated function. Assume
2470 // it's located in the main file.
2471 auto &SM = Context.getSourceManager();
2472 if (const auto *MainFile = SM.getFileEntryForID(SM.getMainFileID())) {
2473 return SanitizerBL.isBlacklistedFile(Kind, MainFile->getName());
2474 }
2475 return false;
2476 }
2477
isInSanitizerBlacklist(llvm::GlobalVariable * GV,SourceLocation Loc,QualType Ty,StringRef Category) const2478 bool CodeGenModule::isInSanitizerBlacklist(llvm::GlobalVariable *GV,
2479 SourceLocation Loc, QualType Ty,
2480 StringRef Category) const {
2481 // For now globals can be blacklisted only in ASan and KASan.
2482 const SanitizerMask EnabledAsanMask =
2483 LangOpts.Sanitize.Mask &
2484 (SanitizerKind::Address | SanitizerKind::KernelAddress |
2485 SanitizerKind::HWAddress | SanitizerKind::KernelHWAddress |
2486 SanitizerKind::MemTag);
2487 if (!EnabledAsanMask)
2488 return false;
2489 const auto &SanitizerBL = getContext().getSanitizerBlacklist();
2490 if (SanitizerBL.isBlacklistedGlobal(EnabledAsanMask, GV->getName(), Category))
2491 return true;
2492 if (SanitizerBL.isBlacklistedLocation(EnabledAsanMask, Loc, Category))
2493 return true;
2494 // Check global type.
2495 if (!Ty.isNull()) {
2496 // Drill down the array types: if global variable of a fixed type is
2497 // blacklisted, we also don't instrument arrays of them.
2498 while (auto AT = dyn_cast<ArrayType>(Ty.getTypePtr()))
2499 Ty = AT->getElementType();
2500 Ty = Ty.getCanonicalType().getUnqualifiedType();
2501 // We allow to blacklist only record types (classes, structs etc.)
2502 if (Ty->isRecordType()) {
2503 std::string TypeStr = Ty.getAsString(getContext().getPrintingPolicy());
2504 if (SanitizerBL.isBlacklistedType(EnabledAsanMask, TypeStr, Category))
2505 return true;
2506 }
2507 }
2508 return false;
2509 }
2510
imbueXRayAttrs(llvm::Function * Fn,SourceLocation Loc,StringRef Category) const2511 bool CodeGenModule::imbueXRayAttrs(llvm::Function *Fn, SourceLocation Loc,
2512 StringRef Category) const {
2513 const auto &XRayFilter = getContext().getXRayFilter();
2514 using ImbueAttr = XRayFunctionFilter::ImbueAttribute;
2515 auto Attr = ImbueAttr::NONE;
2516 if (Loc.isValid())
2517 Attr = XRayFilter.shouldImbueLocation(Loc, Category);
2518 if (Attr == ImbueAttr::NONE)
2519 Attr = XRayFilter.shouldImbueFunction(Fn->getName());
2520 switch (Attr) {
2521 case ImbueAttr::NONE:
2522 return false;
2523 case ImbueAttr::ALWAYS:
2524 Fn->addFnAttr("function-instrument", "xray-always");
2525 break;
2526 case ImbueAttr::ALWAYS_ARG1:
2527 Fn->addFnAttr("function-instrument", "xray-always");
2528 Fn->addFnAttr("xray-log-args", "1");
2529 break;
2530 case ImbueAttr::NEVER:
2531 Fn->addFnAttr("function-instrument", "xray-never");
2532 break;
2533 }
2534 return true;
2535 }
2536
MustBeEmitted(const ValueDecl * Global)2537 bool CodeGenModule::MustBeEmitted(const ValueDecl *Global) {
2538 // Never defer when EmitAllDecls is specified.
2539 if (LangOpts.EmitAllDecls)
2540 return true;
2541
2542 if (CodeGenOpts.KeepStaticConsts) {
2543 const auto *VD = dyn_cast<VarDecl>(Global);
2544 if (VD && VD->getType().isConstQualified() &&
2545 VD->getStorageDuration() == SD_Static)
2546 return true;
2547 }
2548
2549 return getContext().DeclMustBeEmitted(Global);
2550 }
2551
MayBeEmittedEagerly(const ValueDecl * Global)2552 bool CodeGenModule::MayBeEmittedEagerly(const ValueDecl *Global) {
2553 if (const auto *FD = dyn_cast<FunctionDecl>(Global)) {
2554 if (FD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation)
2555 // Implicit template instantiations may change linkage if they are later
2556 // explicitly instantiated, so they should not be emitted eagerly.
2557 return false;
2558 // In OpenMP 5.0 function may be marked as device_type(nohost) and we should
2559 // not emit them eagerly unless we sure that the function must be emitted on
2560 // the host.
2561 if (LangOpts.OpenMP >= 50 && !LangOpts.OpenMPSimd &&
2562 !LangOpts.OpenMPIsDevice &&
2563 !OMPDeclareTargetDeclAttr::getDeviceType(FD) &&
2564 !FD->isUsed(/*CheckUsedAttr=*/false) && !FD->isReferenced())
2565 return false;
2566 }
2567 if (const auto *VD = dyn_cast<VarDecl>(Global))
2568 if (Context.getInlineVariableDefinitionKind(VD) ==
2569 ASTContext::InlineVariableDefinitionKind::WeakUnknown)
2570 // A definition of an inline constexpr static data member may change
2571 // linkage later if it's redeclared outside the class.
2572 return false;
2573 // If OpenMP is enabled and threadprivates must be generated like TLS, delay
2574 // codegen for global variables, because they may be marked as threadprivate.
2575 if (LangOpts.OpenMP && LangOpts.OpenMPUseTLS &&
2576 getContext().getTargetInfo().isTLSSupported() && isa<VarDecl>(Global) &&
2577 !isTypeConstant(Global->getType(), false) &&
2578 !OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(Global))
2579 return false;
2580
2581 return true;
2582 }
2583
GetAddrOfMSGuidDecl(const MSGuidDecl * GD)2584 ConstantAddress CodeGenModule::GetAddrOfMSGuidDecl(const MSGuidDecl *GD) {
2585 StringRef Name = getMangledName(GD);
2586
2587 // The UUID descriptor should be pointer aligned.
2588 CharUnits Alignment = CharUnits::fromQuantity(PointerAlignInBytes);
2589
2590 // Look for an existing global.
2591 if (llvm::GlobalVariable *GV = getModule().getNamedGlobal(Name))
2592 return ConstantAddress(GV, Alignment);
2593
2594 ConstantEmitter Emitter(*this);
2595 llvm::Constant *Init;
2596
2597 APValue &V = GD->getAsAPValue();
2598 if (!V.isAbsent()) {
2599 // If possible, emit the APValue version of the initializer. In particular,
2600 // this gets the type of the constant right.
2601 Init = Emitter.emitForInitializer(
2602 GD->getAsAPValue(), GD->getType().getAddressSpace(), GD->getType());
2603 } else {
2604 // As a fallback, directly construct the constant.
2605 // FIXME: This may get padding wrong under esoteric struct layout rules.
2606 // MSVC appears to create a complete type 'struct __s_GUID' that it
2607 // presumably uses to represent these constants.
2608 MSGuidDecl::Parts Parts = GD->getParts();
2609 llvm::Constant *Fields[4] = {
2610 llvm::ConstantInt::get(Int32Ty, Parts.Part1),
2611 llvm::ConstantInt::get(Int16Ty, Parts.Part2),
2612 llvm::ConstantInt::get(Int16Ty, Parts.Part3),
2613 llvm::ConstantDataArray::getRaw(
2614 StringRef(reinterpret_cast<char *>(Parts.Part4And5), 8), 8,
2615 Int8Ty)};
2616 Init = llvm::ConstantStruct::getAnon(Fields);
2617 }
2618
2619 auto *GV = new llvm::GlobalVariable(
2620 getModule(), Init->getType(),
2621 /*isConstant=*/true, llvm::GlobalValue::LinkOnceODRLinkage, Init, Name);
2622 if (supportsCOMDAT())
2623 GV->setComdat(TheModule.getOrInsertComdat(GV->getName()));
2624 setDSOLocal(GV);
2625
2626 llvm::Constant *Addr = GV;
2627 if (!V.isAbsent()) {
2628 Emitter.finalize(GV);
2629 } else {
2630 llvm::Type *Ty = getTypes().ConvertTypeForMem(GD->getType());
2631 Addr = llvm::ConstantExpr::getBitCast(
2632 GV, Ty->getPointerTo(GV->getAddressSpace()));
2633 }
2634 return ConstantAddress(Addr, Alignment);
2635 }
2636
GetAddrOfTemplateParamObject(const TemplateParamObjectDecl * TPO)2637 ConstantAddress CodeGenModule::GetAddrOfTemplateParamObject(
2638 const TemplateParamObjectDecl *TPO) {
2639 StringRef Name = getMangledName(TPO);
2640 CharUnits Alignment = getNaturalTypeAlignment(TPO->getType());
2641
2642 if (llvm::GlobalVariable *GV = getModule().getNamedGlobal(Name))
2643 return ConstantAddress(GV, Alignment);
2644
2645 ConstantEmitter Emitter(*this);
2646 llvm::Constant *Init = Emitter.emitForInitializer(
2647 TPO->getValue(), TPO->getType().getAddressSpace(), TPO->getType());
2648
2649 if (!Init) {
2650 ErrorUnsupported(TPO, "template parameter object");
2651 return ConstantAddress::invalid();
2652 }
2653
2654 auto *GV = new llvm::GlobalVariable(
2655 getModule(), Init->getType(),
2656 /*isConstant=*/true, llvm::GlobalValue::LinkOnceODRLinkage, Init, Name);
2657 if (supportsCOMDAT())
2658 GV->setComdat(TheModule.getOrInsertComdat(GV->getName()));
2659 Emitter.finalize(GV);
2660
2661 return ConstantAddress(GV, Alignment);
2662 }
2663
GetWeakRefReference(const ValueDecl * VD)2664 ConstantAddress CodeGenModule::GetWeakRefReference(const ValueDecl *VD) {
2665 const AliasAttr *AA = VD->getAttr<AliasAttr>();
2666 assert(AA && "No alias?");
2667
2668 CharUnits Alignment = getContext().getDeclAlign(VD);
2669 llvm::Type *DeclTy = getTypes().ConvertTypeForMem(VD->getType());
2670
2671 // See if there is already something with the target's name in the module.
2672 llvm::GlobalValue *Entry = GetGlobalValue(AA->getAliasee());
2673 if (Entry) {
2674 unsigned AS = getContext().getTargetAddressSpace(VD->getType());
2675 auto Ptr = llvm::ConstantExpr::getBitCast(Entry, DeclTy->getPointerTo(AS));
2676 return ConstantAddress(Ptr, Alignment);
2677 }
2678
2679 llvm::Constant *Aliasee;
2680 if (isa<llvm::FunctionType>(DeclTy))
2681 Aliasee = GetOrCreateLLVMFunction(AA->getAliasee(), DeclTy,
2682 GlobalDecl(cast<FunctionDecl>(VD)),
2683 /*ForVTable=*/false);
2684 else
2685 Aliasee = GetOrCreateLLVMGlobal(AA->getAliasee(),
2686 llvm::PointerType::getUnqual(DeclTy),
2687 nullptr);
2688
2689 auto *F = cast<llvm::GlobalValue>(Aliasee);
2690 F->setLinkage(llvm::Function::ExternalWeakLinkage);
2691 WeakRefReferences.insert(F);
2692
2693 return ConstantAddress(Aliasee, Alignment);
2694 }
2695
EmitGlobal(GlobalDecl GD)2696 void CodeGenModule::EmitGlobal(GlobalDecl GD) {
2697 const auto *Global = cast<ValueDecl>(GD.getDecl());
2698
2699 // Weak references don't produce any output by themselves.
2700 if (Global->hasAttr<WeakRefAttr>())
2701 return;
2702
2703 // If this is an alias definition (which otherwise looks like a declaration)
2704 // emit it now.
2705 if (Global->hasAttr<AliasAttr>())
2706 return EmitAliasDefinition(GD);
2707
2708 // IFunc like an alias whose value is resolved at runtime by calling resolver.
2709 if (Global->hasAttr<IFuncAttr>())
2710 return emitIFuncDefinition(GD);
2711
2712 // If this is a cpu_dispatch multiversion function, emit the resolver.
2713 if (Global->hasAttr<CPUDispatchAttr>())
2714 return emitCPUDispatchDefinition(GD);
2715
2716 // If this is CUDA, be selective about which declarations we emit.
2717 if (LangOpts.CUDA) {
2718 if (LangOpts.CUDAIsDevice) {
2719 if (!Global->hasAttr<CUDADeviceAttr>() &&
2720 !Global->hasAttr<CUDAGlobalAttr>() &&
2721 !Global->hasAttr<CUDAConstantAttr>() &&
2722 !Global->hasAttr<CUDASharedAttr>() &&
2723 !Global->getType()->isCUDADeviceBuiltinSurfaceType() &&
2724 !Global->getType()->isCUDADeviceBuiltinTextureType())
2725 return;
2726 } else {
2727 // We need to emit host-side 'shadows' for all global
2728 // device-side variables because the CUDA runtime needs their
2729 // size and host-side address in order to provide access to
2730 // their device-side incarnations.
2731
2732 // So device-only functions are the only things we skip.
2733 if (isa<FunctionDecl>(Global) && !Global->hasAttr<CUDAHostAttr>() &&
2734 Global->hasAttr<CUDADeviceAttr>())
2735 return;
2736
2737 assert((isa<FunctionDecl>(Global) || isa<VarDecl>(Global)) &&
2738 "Expected Variable or Function");
2739 }
2740 }
2741
2742 if (LangOpts.OpenMP) {
2743 // If this is OpenMP, check if it is legal to emit this global normally.
2744 if (OpenMPRuntime && OpenMPRuntime->emitTargetGlobal(GD))
2745 return;
2746 if (auto *DRD = dyn_cast<OMPDeclareReductionDecl>(Global)) {
2747 if (MustBeEmitted(Global))
2748 EmitOMPDeclareReduction(DRD);
2749 return;
2750 } else if (auto *DMD = dyn_cast<OMPDeclareMapperDecl>(Global)) {
2751 if (MustBeEmitted(Global))
2752 EmitOMPDeclareMapper(DMD);
2753 return;
2754 }
2755 }
2756
2757 // Ignore declarations, they will be emitted on their first use.
2758 if (const auto *FD = dyn_cast<FunctionDecl>(Global)) {
2759 // Forward declarations are emitted lazily on first use.
2760 if (!FD->doesThisDeclarationHaveABody()) {
2761 if (!FD->doesDeclarationForceExternallyVisibleDefinition())
2762 return;
2763
2764 StringRef MangledName = getMangledName(GD);
2765
2766 // Compute the function info and LLVM type.
2767 const CGFunctionInfo &FI = getTypes().arrangeGlobalDeclaration(GD);
2768 llvm::Type *Ty = getTypes().GetFunctionType(FI);
2769
2770 GetOrCreateLLVMFunction(MangledName, Ty, GD, /*ForVTable=*/false,
2771 /*DontDefer=*/false);
2772 return;
2773 }
2774 } else {
2775 const auto *VD = cast<VarDecl>(Global);
2776 assert(VD->isFileVarDecl() && "Cannot emit local var decl as global.");
2777 if (VD->isThisDeclarationADefinition() != VarDecl::Definition &&
2778 !Context.isMSStaticDataMemberInlineDefinition(VD)) {
2779 if (LangOpts.OpenMP) {
2780 // Emit declaration of the must-be-emitted declare target variable.
2781 if (llvm::Optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
2782 OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD)) {
2783 bool UnifiedMemoryEnabled =
2784 getOpenMPRuntime().hasRequiresUnifiedSharedMemory();
2785 if (*Res == OMPDeclareTargetDeclAttr::MT_To &&
2786 !UnifiedMemoryEnabled) {
2787 (void)GetAddrOfGlobalVar(VD);
2788 } else {
2789 assert(((*Res == OMPDeclareTargetDeclAttr::MT_Link) ||
2790 (*Res == OMPDeclareTargetDeclAttr::MT_To &&
2791 UnifiedMemoryEnabled)) &&
2792 "Link clause or to clause with unified memory expected.");
2793 (void)getOpenMPRuntime().getAddrOfDeclareTargetVar(VD);
2794 }
2795
2796 return;
2797 }
2798 }
2799 // If this declaration may have caused an inline variable definition to
2800 // change linkage, make sure that it's emitted.
2801 if (Context.getInlineVariableDefinitionKind(VD) ==
2802 ASTContext::InlineVariableDefinitionKind::Strong)
2803 GetAddrOfGlobalVar(VD);
2804 return;
2805 }
2806 }
2807
2808 // Defer code generation to first use when possible, e.g. if this is an inline
2809 // function. If the global must always be emitted, do it eagerly if possible
2810 // to benefit from cache locality.
2811 if (MustBeEmitted(Global) && MayBeEmittedEagerly(Global)) {
2812 // Emit the definition if it can't be deferred.
2813 EmitGlobalDefinition(GD);
2814 return;
2815 }
2816
2817 // If we're deferring emission of a C++ variable with an
2818 // initializer, remember the order in which it appeared in the file.
2819 if (getLangOpts().CPlusPlus && isa<VarDecl>(Global) &&
2820 cast<VarDecl>(Global)->hasInit()) {
2821 DelayedCXXInitPosition[Global] = CXXGlobalInits.size();
2822 CXXGlobalInits.push_back(nullptr);
2823 }
2824
2825 StringRef MangledName = getMangledName(GD);
2826 if (GetGlobalValue(MangledName) != nullptr) {
2827 // The value has already been used and should therefore be emitted.
2828 addDeferredDeclToEmit(GD);
2829 } else if (MustBeEmitted(Global)) {
2830 // The value must be emitted, but cannot be emitted eagerly.
2831 assert(!MayBeEmittedEagerly(Global));
2832 addDeferredDeclToEmit(GD);
2833 } else {
2834 // Otherwise, remember that we saw a deferred decl with this name. The
2835 // first use of the mangled name will cause it to move into
2836 // DeferredDeclsToEmit.
2837 DeferredDecls[MangledName] = GD;
2838 }
2839 }
2840
2841 // Check if T is a class type with a destructor that's not dllimport.
HasNonDllImportDtor(QualType T)2842 static bool HasNonDllImportDtor(QualType T) {
2843 if (const auto *RT = T->getBaseElementTypeUnsafe()->getAs<RecordType>())
2844 if (CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl()))
2845 if (RD->getDestructor() && !RD->getDestructor()->hasAttr<DLLImportAttr>())
2846 return true;
2847
2848 return false;
2849 }
2850
2851 namespace {
2852 struct FunctionIsDirectlyRecursive
2853 : public ConstStmtVisitor<FunctionIsDirectlyRecursive, bool> {
2854 const StringRef Name;
2855 const Builtin::Context &BI;
FunctionIsDirectlyRecursive__anone3083ad20711::FunctionIsDirectlyRecursive2856 FunctionIsDirectlyRecursive(StringRef N, const Builtin::Context &C)
2857 : Name(N), BI(C) {}
2858
VisitCallExpr__anone3083ad20711::FunctionIsDirectlyRecursive2859 bool VisitCallExpr(const CallExpr *E) {
2860 const FunctionDecl *FD = E->getDirectCallee();
2861 if (!FD)
2862 return false;
2863 AsmLabelAttr *Attr = FD->getAttr<AsmLabelAttr>();
2864 if (Attr && Name == Attr->getLabel())
2865 return true;
2866 unsigned BuiltinID = FD->getBuiltinID();
2867 if (!BuiltinID || !BI.isLibFunction(BuiltinID))
2868 return false;
2869 StringRef BuiltinName = BI.getName(BuiltinID);
2870 if (BuiltinName.startswith("__builtin_") &&
2871 Name == BuiltinName.slice(strlen("__builtin_"), StringRef::npos)) {
2872 return true;
2873 }
2874 return false;
2875 }
2876
VisitStmt__anone3083ad20711::FunctionIsDirectlyRecursive2877 bool VisitStmt(const Stmt *S) {
2878 for (const Stmt *Child : S->children())
2879 if (Child && this->Visit(Child))
2880 return true;
2881 return false;
2882 }
2883 };
2884
2885 // Make sure we're not referencing non-imported vars or functions.
2886 struct DLLImportFunctionVisitor
2887 : public RecursiveASTVisitor<DLLImportFunctionVisitor> {
2888 bool SafeToInline = true;
2889
shouldVisitImplicitCode__anone3083ad20711::DLLImportFunctionVisitor2890 bool shouldVisitImplicitCode() const { return true; }
2891
VisitVarDecl__anone3083ad20711::DLLImportFunctionVisitor2892 bool VisitVarDecl(VarDecl *VD) {
2893 if (VD->getTLSKind()) {
2894 // A thread-local variable cannot be imported.
2895 SafeToInline = false;
2896 return SafeToInline;
2897 }
2898
2899 // A variable definition might imply a destructor call.
2900 if (VD->isThisDeclarationADefinition())
2901 SafeToInline = !HasNonDllImportDtor(VD->getType());
2902
2903 return SafeToInline;
2904 }
2905
VisitCXXBindTemporaryExpr__anone3083ad20711::DLLImportFunctionVisitor2906 bool VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) {
2907 if (const auto *D = E->getTemporary()->getDestructor())
2908 SafeToInline = D->hasAttr<DLLImportAttr>();
2909 return SafeToInline;
2910 }
2911
VisitDeclRefExpr__anone3083ad20711::DLLImportFunctionVisitor2912 bool VisitDeclRefExpr(DeclRefExpr *E) {
2913 ValueDecl *VD = E->getDecl();
2914 if (isa<FunctionDecl>(VD))
2915 SafeToInline = VD->hasAttr<DLLImportAttr>();
2916 else if (VarDecl *V = dyn_cast<VarDecl>(VD))
2917 SafeToInline = !V->hasGlobalStorage() || V->hasAttr<DLLImportAttr>();
2918 return SafeToInline;
2919 }
2920
VisitCXXConstructExpr__anone3083ad20711::DLLImportFunctionVisitor2921 bool VisitCXXConstructExpr(CXXConstructExpr *E) {
2922 SafeToInline = E->getConstructor()->hasAttr<DLLImportAttr>();
2923 return SafeToInline;
2924 }
2925
VisitCXXMemberCallExpr__anone3083ad20711::DLLImportFunctionVisitor2926 bool VisitCXXMemberCallExpr(CXXMemberCallExpr *E) {
2927 CXXMethodDecl *M = E->getMethodDecl();
2928 if (!M) {
2929 // Call through a pointer to member function. This is safe to inline.
2930 SafeToInline = true;
2931 } else {
2932 SafeToInline = M->hasAttr<DLLImportAttr>();
2933 }
2934 return SafeToInline;
2935 }
2936
VisitCXXDeleteExpr__anone3083ad20711::DLLImportFunctionVisitor2937 bool VisitCXXDeleteExpr(CXXDeleteExpr *E) {
2938 SafeToInline = E->getOperatorDelete()->hasAttr<DLLImportAttr>();
2939 return SafeToInline;
2940 }
2941
VisitCXXNewExpr__anone3083ad20711::DLLImportFunctionVisitor2942 bool VisitCXXNewExpr(CXXNewExpr *E) {
2943 SafeToInline = E->getOperatorNew()->hasAttr<DLLImportAttr>();
2944 return SafeToInline;
2945 }
2946 };
2947 }
2948
2949 // isTriviallyRecursive - Check if this function calls another
2950 // decl that, because of the asm attribute or the other decl being a builtin,
2951 // ends up pointing to itself.
2952 bool
isTriviallyRecursive(const FunctionDecl * FD)2953 CodeGenModule::isTriviallyRecursive(const FunctionDecl *FD) {
2954 StringRef Name;
2955 if (getCXXABI().getMangleContext().shouldMangleDeclName(FD)) {
2956 // asm labels are a special kind of mangling we have to support.
2957 AsmLabelAttr *Attr = FD->getAttr<AsmLabelAttr>();
2958 if (!Attr)
2959 return false;
2960 Name = Attr->getLabel();
2961 } else {
2962 Name = FD->getName();
2963 }
2964
2965 FunctionIsDirectlyRecursive Walker(Name, Context.BuiltinInfo);
2966 const Stmt *Body = FD->getBody();
2967 return Body ? Walker.Visit(Body) : false;
2968 }
2969
shouldEmitFunction(GlobalDecl GD)2970 bool CodeGenModule::shouldEmitFunction(GlobalDecl GD) {
2971 if (getFunctionLinkage(GD) != llvm::Function::AvailableExternallyLinkage)
2972 return true;
2973 const auto *F = cast<FunctionDecl>(GD.getDecl());
2974 if (CodeGenOpts.OptimizationLevel == 0 && !F->hasAttr<AlwaysInlineAttr>())
2975 return false;
2976
2977 if (F->hasAttr<DLLImportAttr>()) {
2978 // Check whether it would be safe to inline this dllimport function.
2979 DLLImportFunctionVisitor Visitor;
2980 Visitor.TraverseFunctionDecl(const_cast<FunctionDecl*>(F));
2981 if (!Visitor.SafeToInline)
2982 return false;
2983
2984 if (const CXXDestructorDecl *Dtor = dyn_cast<CXXDestructorDecl>(F)) {
2985 // Implicit destructor invocations aren't captured in the AST, so the
2986 // check above can't see them. Check for them manually here.
2987 for (const Decl *Member : Dtor->getParent()->decls())
2988 if (isa<FieldDecl>(Member))
2989 if (HasNonDllImportDtor(cast<FieldDecl>(Member)->getType()))
2990 return false;
2991 for (const CXXBaseSpecifier &B : Dtor->getParent()->bases())
2992 if (HasNonDllImportDtor(B.getType()))
2993 return false;
2994 }
2995 }
2996
2997 // PR9614. Avoid cases where the source code is lying to us. An available
2998 // externally function should have an equivalent function somewhere else,
2999 // but a function that calls itself through asm label/`__builtin_` trickery is
3000 // clearly not equivalent to the real implementation.
3001 // This happens in glibc's btowc and in some configure checks.
3002 return !isTriviallyRecursive(F);
3003 }
3004
shouldOpportunisticallyEmitVTables()3005 bool CodeGenModule::shouldOpportunisticallyEmitVTables() {
3006 return CodeGenOpts.OptimizationLevel > 0;
3007 }
3008
EmitMultiVersionFunctionDefinition(GlobalDecl GD,llvm::GlobalValue * GV)3009 void CodeGenModule::EmitMultiVersionFunctionDefinition(GlobalDecl GD,
3010 llvm::GlobalValue *GV) {
3011 const auto *FD = cast<FunctionDecl>(GD.getDecl());
3012
3013 if (FD->isCPUSpecificMultiVersion()) {
3014 auto *Spec = FD->getAttr<CPUSpecificAttr>();
3015 for (unsigned I = 0; I < Spec->cpus_size(); ++I)
3016 EmitGlobalFunctionDefinition(GD.getWithMultiVersionIndex(I), nullptr);
3017 // Requires multiple emits.
3018 } else
3019 EmitGlobalFunctionDefinition(GD, GV);
3020 }
3021
EmitGlobalDefinition(GlobalDecl GD,llvm::GlobalValue * GV)3022 void CodeGenModule::EmitGlobalDefinition(GlobalDecl GD, llvm::GlobalValue *GV) {
3023 const auto *D = cast<ValueDecl>(GD.getDecl());
3024
3025 PrettyStackTraceDecl CrashInfo(const_cast<ValueDecl *>(D), D->getLocation(),
3026 Context.getSourceManager(),
3027 "Generating code for declaration");
3028
3029 if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
3030 // At -O0, don't generate IR for functions with available_externally
3031 // linkage.
3032 if (!shouldEmitFunction(GD))
3033 return;
3034
3035 llvm::TimeTraceScope TimeScope("CodeGen Function", [&]() {
3036 std::string Name;
3037 llvm::raw_string_ostream OS(Name);
3038 FD->getNameForDiagnostic(OS, getContext().getPrintingPolicy(),
3039 /*Qualified=*/true);
3040 return Name;
3041 });
3042
3043 if (const auto *Method = dyn_cast<CXXMethodDecl>(D)) {
3044 // Make sure to emit the definition(s) before we emit the thunks.
3045 // This is necessary for the generation of certain thunks.
3046 if (isa<CXXConstructorDecl>(Method) || isa<CXXDestructorDecl>(Method))
3047 ABI->emitCXXStructor(GD);
3048 else if (FD->isMultiVersion())
3049 EmitMultiVersionFunctionDefinition(GD, GV);
3050 else
3051 EmitGlobalFunctionDefinition(GD, GV);
3052
3053 if (Method->isVirtual())
3054 getVTables().EmitThunks(GD);
3055
3056 return;
3057 }
3058
3059 if (FD->isMultiVersion())
3060 return EmitMultiVersionFunctionDefinition(GD, GV);
3061 return EmitGlobalFunctionDefinition(GD, GV);
3062 }
3063
3064 if (const auto *VD = dyn_cast<VarDecl>(D))
3065 return EmitGlobalVarDefinition(VD, !VD->hasDefinition());
3066
3067 llvm_unreachable("Invalid argument to EmitGlobalDefinition()");
3068 }
3069
3070 static void ReplaceUsesOfNonProtoTypeWithRealFunction(llvm::GlobalValue *Old,
3071 llvm::Function *NewFn);
3072
3073 static unsigned
TargetMVPriority(const TargetInfo & TI,const CodeGenFunction::MultiVersionResolverOption & RO)3074 TargetMVPriority(const TargetInfo &TI,
3075 const CodeGenFunction::MultiVersionResolverOption &RO) {
3076 unsigned Priority = 0;
3077 for (StringRef Feat : RO.Conditions.Features)
3078 Priority = std::max(Priority, TI.multiVersionSortPriority(Feat));
3079
3080 if (!RO.Conditions.Architecture.empty())
3081 Priority = std::max(
3082 Priority, TI.multiVersionSortPriority(RO.Conditions.Architecture));
3083 return Priority;
3084 }
3085
emitMultiVersionFunctions()3086 void CodeGenModule::emitMultiVersionFunctions() {
3087 for (GlobalDecl GD : MultiVersionFuncs) {
3088 SmallVector<CodeGenFunction::MultiVersionResolverOption, 10> Options;
3089 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
3090 getContext().forEachMultiversionedFunctionVersion(
3091 FD, [this, &GD, &Options](const FunctionDecl *CurFD) {
3092 GlobalDecl CurGD{
3093 (CurFD->isDefined() ? CurFD->getDefinition() : CurFD)};
3094 StringRef MangledName = getMangledName(CurGD);
3095 llvm::Constant *Func = GetGlobalValue(MangledName);
3096 if (!Func) {
3097 if (CurFD->isDefined()) {
3098 EmitGlobalFunctionDefinition(CurGD, nullptr);
3099 Func = GetGlobalValue(MangledName);
3100 } else {
3101 const CGFunctionInfo &FI =
3102 getTypes().arrangeGlobalDeclaration(GD);
3103 llvm::FunctionType *Ty = getTypes().GetFunctionType(FI);
3104 Func = GetAddrOfFunction(CurGD, Ty, /*ForVTable=*/false,
3105 /*DontDefer=*/false, ForDefinition);
3106 }
3107 assert(Func && "This should have just been created");
3108 }
3109
3110 const auto *TA = CurFD->getAttr<TargetAttr>();
3111 llvm::SmallVector<StringRef, 8> Feats;
3112 TA->getAddedFeatures(Feats);
3113
3114 Options.emplace_back(cast<llvm::Function>(Func),
3115 TA->getArchitecture(), Feats);
3116 });
3117
3118 llvm::Function *ResolverFunc;
3119 const TargetInfo &TI = getTarget();
3120
3121 if (TI.supportsIFunc() || FD->isTargetMultiVersion()) {
3122 ResolverFunc = cast<llvm::Function>(
3123 GetGlobalValue((getMangledName(GD) + ".resolver").str()));
3124 ResolverFunc->setLinkage(llvm::Function::WeakODRLinkage);
3125 } else {
3126 ResolverFunc = cast<llvm::Function>(GetGlobalValue(getMangledName(GD)));
3127 }
3128
3129 if (supportsCOMDAT())
3130 ResolverFunc->setComdat(
3131 getModule().getOrInsertComdat(ResolverFunc->getName()));
3132
3133 llvm::stable_sort(
3134 Options, [&TI](const CodeGenFunction::MultiVersionResolverOption &LHS,
3135 const CodeGenFunction::MultiVersionResolverOption &RHS) {
3136 return TargetMVPriority(TI, LHS) > TargetMVPriority(TI, RHS);
3137 });
3138 CodeGenFunction CGF(*this);
3139 CGF.EmitMultiVersionResolver(ResolverFunc, Options);
3140 }
3141 }
3142
emitCPUDispatchDefinition(GlobalDecl GD)3143 void CodeGenModule::emitCPUDispatchDefinition(GlobalDecl GD) {
3144 const auto *FD = cast<FunctionDecl>(GD.getDecl());
3145 assert(FD && "Not a FunctionDecl?");
3146 const auto *DD = FD->getAttr<CPUDispatchAttr>();
3147 assert(DD && "Not a cpu_dispatch Function?");
3148 llvm::Type *DeclTy = getTypes().ConvertType(FD->getType());
3149
3150 if (const auto *CXXFD = dyn_cast<CXXMethodDecl>(FD)) {
3151 const CGFunctionInfo &FInfo = getTypes().arrangeCXXMethodDeclaration(CXXFD);
3152 DeclTy = getTypes().GetFunctionType(FInfo);
3153 }
3154
3155 StringRef ResolverName = getMangledName(GD);
3156
3157 llvm::Type *ResolverType;
3158 GlobalDecl ResolverGD;
3159 if (getTarget().supportsIFunc())
3160 ResolverType = llvm::FunctionType::get(
3161 llvm::PointerType::get(DeclTy,
3162 Context.getTargetAddressSpace(FD->getType())),
3163 false);
3164 else {
3165 ResolverType = DeclTy;
3166 ResolverGD = GD;
3167 }
3168
3169 auto *ResolverFunc = cast<llvm::Function>(GetOrCreateLLVMFunction(
3170 ResolverName, ResolverType, ResolverGD, /*ForVTable=*/false));
3171 ResolverFunc->setLinkage(llvm::Function::WeakODRLinkage);
3172 if (supportsCOMDAT())
3173 ResolverFunc->setComdat(
3174 getModule().getOrInsertComdat(ResolverFunc->getName()));
3175
3176 SmallVector<CodeGenFunction::MultiVersionResolverOption, 10> Options;
3177 const TargetInfo &Target = getTarget();
3178 unsigned Index = 0;
3179 for (const IdentifierInfo *II : DD->cpus()) {
3180 // Get the name of the target function so we can look it up/create it.
3181 std::string MangledName = getMangledNameImpl(*this, GD, FD, true) +
3182 getCPUSpecificMangling(*this, II->getName());
3183
3184 llvm::Constant *Func = GetGlobalValue(MangledName);
3185
3186 if (!Func) {
3187 GlobalDecl ExistingDecl = Manglings.lookup(MangledName);
3188 if (ExistingDecl.getDecl() &&
3189 ExistingDecl.getDecl()->getAsFunction()->isDefined()) {
3190 EmitGlobalFunctionDefinition(ExistingDecl, nullptr);
3191 Func = GetGlobalValue(MangledName);
3192 } else {
3193 if (!ExistingDecl.getDecl())
3194 ExistingDecl = GD.getWithMultiVersionIndex(Index);
3195
3196 Func = GetOrCreateLLVMFunction(
3197 MangledName, DeclTy, ExistingDecl,
3198 /*ForVTable=*/false, /*DontDefer=*/true,
3199 /*IsThunk=*/false, llvm::AttributeList(), ForDefinition);
3200 }
3201 }
3202
3203 llvm::SmallVector<StringRef, 32> Features;
3204 Target.getCPUSpecificCPUDispatchFeatures(II->getName(), Features);
3205 llvm::transform(Features, Features.begin(),
3206 [](StringRef Str) { return Str.substr(1); });
3207 Features.erase(std::remove_if(
3208 Features.begin(), Features.end(), [&Target](StringRef Feat) {
3209 return !Target.validateCpuSupports(Feat);
3210 }), Features.end());
3211 Options.emplace_back(cast<llvm::Function>(Func), StringRef{}, Features);
3212 ++Index;
3213 }
3214
3215 llvm::sort(
3216 Options, [](const CodeGenFunction::MultiVersionResolverOption &LHS,
3217 const CodeGenFunction::MultiVersionResolverOption &RHS) {
3218 return CodeGenFunction::GetX86CpuSupportsMask(LHS.Conditions.Features) >
3219 CodeGenFunction::GetX86CpuSupportsMask(RHS.Conditions.Features);
3220 });
3221
3222 // If the list contains multiple 'default' versions, such as when it contains
3223 // 'pentium' and 'generic', don't emit the call to the generic one (since we
3224 // always run on at least a 'pentium'). We do this by deleting the 'least
3225 // advanced' (read, lowest mangling letter).
3226 while (Options.size() > 1 &&
3227 CodeGenFunction::GetX86CpuSupportsMask(
3228 (Options.end() - 2)->Conditions.Features) == 0) {
3229 StringRef LHSName = (Options.end() - 2)->Function->getName();
3230 StringRef RHSName = (Options.end() - 1)->Function->getName();
3231 if (LHSName.compare(RHSName) < 0)
3232 Options.erase(Options.end() - 2);
3233 else
3234 Options.erase(Options.end() - 1);
3235 }
3236
3237 CodeGenFunction CGF(*this);
3238 CGF.EmitMultiVersionResolver(ResolverFunc, Options);
3239
3240 if (getTarget().supportsIFunc()) {
3241 std::string AliasName = getMangledNameImpl(
3242 *this, GD, FD, /*OmitMultiVersionMangling=*/true);
3243 llvm::Constant *AliasFunc = GetGlobalValue(AliasName);
3244 if (!AliasFunc) {
3245 auto *IFunc = cast<llvm::GlobalIFunc>(GetOrCreateLLVMFunction(
3246 AliasName, DeclTy, GD, /*ForVTable=*/false, /*DontDefer=*/true,
3247 /*IsThunk=*/false, llvm::AttributeList(), NotForDefinition));
3248 auto *GA = llvm::GlobalAlias::create(
3249 DeclTy, 0, getFunctionLinkage(GD), AliasName, IFunc, &getModule());
3250 GA->setLinkage(llvm::Function::WeakODRLinkage);
3251 SetCommonAttributes(GD, GA);
3252 }
3253 }
3254 }
3255
3256 /// If a dispatcher for the specified mangled name is not in the module, create
3257 /// and return an llvm Function with the specified type.
GetOrCreateMultiVersionResolver(GlobalDecl GD,llvm::Type * DeclTy,const FunctionDecl * FD)3258 llvm::Constant *CodeGenModule::GetOrCreateMultiVersionResolver(
3259 GlobalDecl GD, llvm::Type *DeclTy, const FunctionDecl *FD) {
3260 std::string MangledName =
3261 getMangledNameImpl(*this, GD, FD, /*OmitMultiVersionMangling=*/true);
3262
3263 // Holds the name of the resolver, in ifunc mode this is the ifunc (which has
3264 // a separate resolver).
3265 std::string ResolverName = MangledName;
3266 if (getTarget().supportsIFunc())
3267 ResolverName += ".ifunc";
3268 else if (FD->isTargetMultiVersion())
3269 ResolverName += ".resolver";
3270
3271 // If this already exists, just return that one.
3272 if (llvm::GlobalValue *ResolverGV = GetGlobalValue(ResolverName))
3273 return ResolverGV;
3274
3275 // Since this is the first time we've created this IFunc, make sure
3276 // that we put this multiversioned function into the list to be
3277 // replaced later if necessary (target multiversioning only).
3278 if (!FD->isCPUDispatchMultiVersion() && !FD->isCPUSpecificMultiVersion())
3279 MultiVersionFuncs.push_back(GD);
3280
3281 if (getTarget().supportsIFunc()) {
3282 llvm::Type *ResolverType = llvm::FunctionType::get(
3283 llvm::PointerType::get(
3284 DeclTy, getContext().getTargetAddressSpace(FD->getType())),
3285 false);
3286 llvm::Constant *Resolver = GetOrCreateLLVMFunction(
3287 MangledName + ".resolver", ResolverType, GlobalDecl{},
3288 /*ForVTable=*/false);
3289 llvm::GlobalIFunc *GIF = llvm::GlobalIFunc::create(
3290 DeclTy, 0, llvm::Function::WeakODRLinkage, "", Resolver, &getModule());
3291 GIF->setName(ResolverName);
3292 SetCommonAttributes(FD, GIF);
3293
3294 return GIF;
3295 }
3296
3297 llvm::Constant *Resolver = GetOrCreateLLVMFunction(
3298 ResolverName, DeclTy, GlobalDecl{}, /*ForVTable=*/false);
3299 assert(isa<llvm::GlobalValue>(Resolver) &&
3300 "Resolver should be created for the first time");
3301 SetCommonAttributes(FD, cast<llvm::GlobalValue>(Resolver));
3302 return Resolver;
3303 }
3304
3305 /// GetOrCreateLLVMFunction - If the specified mangled name is not in the
3306 /// module, create and return an llvm Function with the specified type. If there
3307 /// is something in the module with the specified name, return it potentially
3308 /// bitcasted to the right type.
3309 ///
3310 /// If D is non-null, it specifies a decl that correspond to this. This is used
3311 /// to set the attributes on the function when it is first created.
GetOrCreateLLVMFunction(StringRef MangledName,llvm::Type * Ty,GlobalDecl GD,bool ForVTable,bool DontDefer,bool IsThunk,llvm::AttributeList ExtraAttrs,ForDefinition_t IsForDefinition)3312 llvm::Constant *CodeGenModule::GetOrCreateLLVMFunction(
3313 StringRef MangledName, llvm::Type *Ty, GlobalDecl GD, bool ForVTable,
3314 bool DontDefer, bool IsThunk, llvm::AttributeList ExtraAttrs,
3315 ForDefinition_t IsForDefinition) {
3316 const Decl *D = GD.getDecl();
3317
3318 // Any attempts to use a MultiVersion function should result in retrieving
3319 // the iFunc instead. Name Mangling will handle the rest of the changes.
3320 if (const FunctionDecl *FD = cast_or_null<FunctionDecl>(D)) {
3321 // For the device mark the function as one that should be emitted.
3322 if (getLangOpts().OpenMPIsDevice && OpenMPRuntime &&
3323 !OpenMPRuntime->markAsGlobalTarget(GD) && FD->isDefined() &&
3324 !DontDefer && !IsForDefinition) {
3325 if (const FunctionDecl *FDDef = FD->getDefinition()) {
3326 GlobalDecl GDDef;
3327 if (const auto *CD = dyn_cast<CXXConstructorDecl>(FDDef))
3328 GDDef = GlobalDecl(CD, GD.getCtorType());
3329 else if (const auto *DD = dyn_cast<CXXDestructorDecl>(FDDef))
3330 GDDef = GlobalDecl(DD, GD.getDtorType());
3331 else
3332 GDDef = GlobalDecl(FDDef);
3333 EmitGlobal(GDDef);
3334 }
3335 }
3336
3337 if (FD->isMultiVersion()) {
3338 if (FD->hasAttr<TargetAttr>())
3339 UpdateMultiVersionNames(GD, FD);
3340 if (!IsForDefinition)
3341 return GetOrCreateMultiVersionResolver(GD, Ty, FD);
3342 }
3343 }
3344
3345 // Lookup the entry, lazily creating it if necessary.
3346 llvm::GlobalValue *Entry = GetGlobalValue(MangledName);
3347 if (Entry) {
3348 if (WeakRefReferences.erase(Entry)) {
3349 const FunctionDecl *FD = cast_or_null<FunctionDecl>(D);
3350 if (FD && !FD->hasAttr<WeakAttr>())
3351 Entry->setLinkage(llvm::Function::ExternalLinkage);
3352 }
3353
3354 // Handle dropped DLL attributes.
3355 if (D && !D->hasAttr<DLLImportAttr>() && !D->hasAttr<DLLExportAttr>()) {
3356 Entry->setDLLStorageClass(llvm::GlobalValue::DefaultStorageClass);
3357 setDSOLocal(Entry);
3358 }
3359
3360 // If there are two attempts to define the same mangled name, issue an
3361 // error.
3362 if (IsForDefinition && !Entry->isDeclaration()) {
3363 GlobalDecl OtherGD;
3364 // Check that GD is not yet in DiagnosedConflictingDefinitions is required
3365 // to make sure that we issue an error only once.
3366 if (lookupRepresentativeDecl(MangledName, OtherGD) &&
3367 (GD.getCanonicalDecl().getDecl() !=
3368 OtherGD.getCanonicalDecl().getDecl()) &&
3369 DiagnosedConflictingDefinitions.insert(GD).second) {
3370 getDiags().Report(D->getLocation(), diag::err_duplicate_mangled_name)
3371 << MangledName;
3372 getDiags().Report(OtherGD.getDecl()->getLocation(),
3373 diag::note_previous_definition);
3374 }
3375 }
3376
3377 if ((isa<llvm::Function>(Entry) || isa<llvm::GlobalAlias>(Entry)) &&
3378 (Entry->getValueType() == Ty)) {
3379 return Entry;
3380 }
3381
3382 // Make sure the result is of the correct type.
3383 // (If function is requested for a definition, we always need to create a new
3384 // function, not just return a bitcast.)
3385 if (!IsForDefinition)
3386 return llvm::ConstantExpr::getBitCast(Entry, Ty->getPointerTo());
3387 }
3388
3389 // This function doesn't have a complete type (for example, the return
3390 // type is an incomplete struct). Use a fake type instead, and make
3391 // sure not to try to set attributes.
3392 bool IsIncompleteFunction = false;
3393
3394 llvm::FunctionType *FTy;
3395 if (isa<llvm::FunctionType>(Ty)) {
3396 FTy = cast<llvm::FunctionType>(Ty);
3397 } else {
3398 FTy = llvm::FunctionType::get(VoidTy, false);
3399 IsIncompleteFunction = true;
3400 }
3401
3402 llvm::Function *F =
3403 llvm::Function::Create(FTy, llvm::Function::ExternalLinkage,
3404 Entry ? StringRef() : MangledName, &getModule());
3405
3406 // If we already created a function with the same mangled name (but different
3407 // type) before, take its name and add it to the list of functions to be
3408 // replaced with F at the end of CodeGen.
3409 //
3410 // This happens if there is a prototype for a function (e.g. "int f()") and
3411 // then a definition of a different type (e.g. "int f(int x)").
3412 if (Entry) {
3413 F->takeName(Entry);
3414
3415 // This might be an implementation of a function without a prototype, in
3416 // which case, try to do special replacement of calls which match the new
3417 // prototype. The really key thing here is that we also potentially drop
3418 // arguments from the call site so as to make a direct call, which makes the
3419 // inliner happier and suppresses a number of optimizer warnings (!) about
3420 // dropping arguments.
3421 if (!Entry->use_empty()) {
3422 ReplaceUsesOfNonProtoTypeWithRealFunction(Entry, F);
3423 Entry->removeDeadConstantUsers();
3424 }
3425
3426 llvm::Constant *BC = llvm::ConstantExpr::getBitCast(
3427 F, Entry->getValueType()->getPointerTo());
3428 addGlobalValReplacement(Entry, BC);
3429 }
3430
3431 assert(F->getName() == MangledName && "name was uniqued!");
3432 if (D)
3433 SetFunctionAttributes(GD, F, IsIncompleteFunction, IsThunk);
3434 if (ExtraAttrs.hasAttributes(llvm::AttributeList::FunctionIndex)) {
3435 llvm::AttrBuilder B(ExtraAttrs, llvm::AttributeList::FunctionIndex);
3436 F->addAttributes(llvm::AttributeList::FunctionIndex, B);
3437 }
3438
3439 if (!DontDefer) {
3440 // All MSVC dtors other than the base dtor are linkonce_odr and delegate to
3441 // each other bottoming out with the base dtor. Therefore we emit non-base
3442 // dtors on usage, even if there is no dtor definition in the TU.
3443 if (D && isa<CXXDestructorDecl>(D) &&
3444 getCXXABI().useThunkForDtorVariant(cast<CXXDestructorDecl>(D),
3445 GD.getDtorType()))
3446 addDeferredDeclToEmit(GD);
3447
3448 // This is the first use or definition of a mangled name. If there is a
3449 // deferred decl with this name, remember that we need to emit it at the end
3450 // of the file.
3451 auto DDI = DeferredDecls.find(MangledName);
3452 if (DDI != DeferredDecls.end()) {
3453 // Move the potentially referenced deferred decl to the
3454 // DeferredDeclsToEmit list, and remove it from DeferredDecls (since we
3455 // don't need it anymore).
3456 addDeferredDeclToEmit(DDI->second);
3457 DeferredDecls.erase(DDI);
3458
3459 // Otherwise, there are cases we have to worry about where we're
3460 // using a declaration for which we must emit a definition but where
3461 // we might not find a top-level definition:
3462 // - member functions defined inline in their classes
3463 // - friend functions defined inline in some class
3464 // - special member functions with implicit definitions
3465 // If we ever change our AST traversal to walk into class methods,
3466 // this will be unnecessary.
3467 //
3468 // We also don't emit a definition for a function if it's going to be an
3469 // entry in a vtable, unless it's already marked as used.
3470 } else if (getLangOpts().CPlusPlus && D) {
3471 // Look for a declaration that's lexically in a record.
3472 for (const auto *FD = cast<FunctionDecl>(D)->getMostRecentDecl(); FD;
3473 FD = FD->getPreviousDecl()) {
3474 if (isa<CXXRecordDecl>(FD->getLexicalDeclContext())) {
3475 if (FD->doesThisDeclarationHaveABody()) {
3476 addDeferredDeclToEmit(GD.getWithDecl(FD));
3477 break;
3478 }
3479 }
3480 }
3481 }
3482 }
3483
3484 // Make sure the result is of the requested type.
3485 if (!IsIncompleteFunction) {
3486 assert(F->getFunctionType() == Ty);
3487 return F;
3488 }
3489
3490 llvm::Type *PTy = llvm::PointerType::getUnqual(Ty);
3491 return llvm::ConstantExpr::getBitCast(F, PTy);
3492 }
3493
3494 /// GetAddrOfFunction - Return the address of the given function. If Ty is
3495 /// non-null, then this function will use the specified type if it has to
3496 /// create it (this occurs when we see a definition of the function).
GetAddrOfFunction(GlobalDecl GD,llvm::Type * Ty,bool ForVTable,bool DontDefer,ForDefinition_t IsForDefinition)3497 llvm::Constant *CodeGenModule::GetAddrOfFunction(GlobalDecl GD,
3498 llvm::Type *Ty,
3499 bool ForVTable,
3500 bool DontDefer,
3501 ForDefinition_t IsForDefinition) {
3502 assert(!cast<FunctionDecl>(GD.getDecl())->isConsteval() &&
3503 "consteval function should never be emitted");
3504 // If there was no specific requested type, just convert it now.
3505 if (!Ty) {
3506 const auto *FD = cast<FunctionDecl>(GD.getDecl());
3507 Ty = getTypes().ConvertType(FD->getType());
3508 }
3509
3510 // Devirtualized destructor calls may come through here instead of via
3511 // getAddrOfCXXStructor. Make sure we use the MS ABI base destructor instead
3512 // of the complete destructor when necessary.
3513 if (const auto *DD = dyn_cast<CXXDestructorDecl>(GD.getDecl())) {
3514 if (getTarget().getCXXABI().isMicrosoft() &&
3515 GD.getDtorType() == Dtor_Complete &&
3516 DD->getParent()->getNumVBases() == 0)
3517 GD = GlobalDecl(DD, Dtor_Base);
3518 }
3519
3520 StringRef MangledName = getMangledName(GD);
3521 return GetOrCreateLLVMFunction(MangledName, Ty, GD, ForVTable, DontDefer,
3522 /*IsThunk=*/false, llvm::AttributeList(),
3523 IsForDefinition);
3524 }
3525
3526 static const FunctionDecl *
GetRuntimeFunctionDecl(ASTContext & C,StringRef Name)3527 GetRuntimeFunctionDecl(ASTContext &C, StringRef Name) {
3528 TranslationUnitDecl *TUDecl = C.getTranslationUnitDecl();
3529 DeclContext *DC = TranslationUnitDecl::castToDeclContext(TUDecl);
3530
3531 IdentifierInfo &CII = C.Idents.get(Name);
3532 for (const auto &Result : DC->lookup(&CII))
3533 if (const auto FD = dyn_cast<FunctionDecl>(Result))
3534 return FD;
3535
3536 if (!C.getLangOpts().CPlusPlus)
3537 return nullptr;
3538
3539 // Demangle the premangled name from getTerminateFn()
3540 IdentifierInfo &CXXII =
3541 (Name == "_ZSt9terminatev" || Name == "?terminate@@YAXXZ")
3542 ? C.Idents.get("terminate")
3543 : C.Idents.get(Name);
3544
3545 for (const auto &N : {"__cxxabiv1", "std"}) {
3546 IdentifierInfo &NS = C.Idents.get(N);
3547 for (const auto &Result : DC->lookup(&NS)) {
3548 NamespaceDecl *ND = dyn_cast<NamespaceDecl>(Result);
3549 if (auto LSD = dyn_cast<LinkageSpecDecl>(Result))
3550 for (const auto &Result : LSD->lookup(&NS))
3551 if ((ND = dyn_cast<NamespaceDecl>(Result)))
3552 break;
3553
3554 if (ND)
3555 for (const auto &Result : ND->lookup(&CXXII))
3556 if (const auto *FD = dyn_cast<FunctionDecl>(Result))
3557 return FD;
3558 }
3559 }
3560
3561 return nullptr;
3562 }
3563
3564 /// CreateRuntimeFunction - Create a new runtime function with the specified
3565 /// type and name.
3566 llvm::FunctionCallee
CreateRuntimeFunction(llvm::FunctionType * FTy,StringRef Name,llvm::AttributeList ExtraAttrs,bool Local,bool AssumeConvergent)3567 CodeGenModule::CreateRuntimeFunction(llvm::FunctionType *FTy, StringRef Name,
3568 llvm::AttributeList ExtraAttrs, bool Local,
3569 bool AssumeConvergent) {
3570 if (AssumeConvergent) {
3571 ExtraAttrs =
3572 ExtraAttrs.addAttribute(VMContext, llvm::AttributeList::FunctionIndex,
3573 llvm::Attribute::Convergent);
3574 }
3575
3576 llvm::Constant *C =
3577 GetOrCreateLLVMFunction(Name, FTy, GlobalDecl(), /*ForVTable=*/false,
3578 /*DontDefer=*/false, /*IsThunk=*/false,
3579 ExtraAttrs);
3580
3581 if (auto *F = dyn_cast<llvm::Function>(C)) {
3582 if (F->empty()) {
3583 F->setCallingConv(getRuntimeCC());
3584
3585 // In Windows Itanium environments, try to mark runtime functions
3586 // dllimport. For Mingw and MSVC, don't. We don't really know if the user
3587 // will link their standard library statically or dynamically. Marking
3588 // functions imported when they are not imported can cause linker errors
3589 // and warnings.
3590 if (!Local && getTriple().isWindowsItaniumEnvironment() &&
3591 !getCodeGenOpts().LTOVisibilityPublicStd) {
3592 const FunctionDecl *FD = GetRuntimeFunctionDecl(Context, Name);
3593 if (!FD || FD->hasAttr<DLLImportAttr>()) {
3594 F->setDLLStorageClass(llvm::GlobalValue::DLLImportStorageClass);
3595 F->setLinkage(llvm::GlobalValue::ExternalLinkage);
3596 }
3597 }
3598 setDSOLocal(F);
3599 }
3600 }
3601
3602 return {FTy, C};
3603 }
3604
3605 /// isTypeConstant - Determine whether an object of this type can be emitted
3606 /// as a constant.
3607 ///
3608 /// If ExcludeCtor is true, the duration when the object's constructor runs
3609 /// will not be considered. The caller will need to verify that the object is
3610 /// not written to during its construction.
isTypeConstant(QualType Ty,bool ExcludeCtor)3611 bool CodeGenModule::isTypeConstant(QualType Ty, bool ExcludeCtor) {
3612 if (!Ty.isConstant(Context) && !Ty->isReferenceType())
3613 return false;
3614
3615 if (Context.getLangOpts().CPlusPlus) {
3616 if (const CXXRecordDecl *Record
3617 = Context.getBaseElementType(Ty)->getAsCXXRecordDecl())
3618 return ExcludeCtor && !Record->hasMutableFields() &&
3619 Record->hasTrivialDestructor();
3620 }
3621
3622 return true;
3623 }
3624
3625 /// GetOrCreateLLVMGlobal - If the specified mangled name is not in the module,
3626 /// create and return an llvm GlobalVariable with the specified type. If there
3627 /// is something in the module with the specified name, return it potentially
3628 /// bitcasted to the right type.
3629 ///
3630 /// If D is non-null, it specifies a decl that correspond to this. This is used
3631 /// to set the attributes on the global when it is first created.
3632 ///
3633 /// If IsForDefinition is true, it is guaranteed that an actual global with
3634 /// type Ty will be returned, not conversion of a variable with the same
3635 /// mangled name but some other type.
3636 llvm::Constant *
GetOrCreateLLVMGlobal(StringRef MangledName,llvm::PointerType * Ty,const VarDecl * D,ForDefinition_t IsForDefinition)3637 CodeGenModule::GetOrCreateLLVMGlobal(StringRef MangledName,
3638 llvm::PointerType *Ty,
3639 const VarDecl *D,
3640 ForDefinition_t IsForDefinition) {
3641 // Lookup the entry, lazily creating it if necessary.
3642 llvm::GlobalValue *Entry = GetGlobalValue(MangledName);
3643 if (Entry) {
3644 if (WeakRefReferences.erase(Entry)) {
3645 if (D && !D->hasAttr<WeakAttr>())
3646 Entry->setLinkage(llvm::Function::ExternalLinkage);
3647 }
3648
3649 // Handle dropped DLL attributes.
3650 if (D && !D->hasAttr<DLLImportAttr>() && !D->hasAttr<DLLExportAttr>())
3651 Entry->setDLLStorageClass(llvm::GlobalValue::DefaultStorageClass);
3652
3653 if (LangOpts.OpenMP && !LangOpts.OpenMPSimd && D)
3654 getOpenMPRuntime().registerTargetGlobalVariable(D, Entry);
3655
3656 if (Entry->getType() == Ty)
3657 return Entry;
3658
3659 // If there are two attempts to define the same mangled name, issue an
3660 // error.
3661 if (IsForDefinition && !Entry->isDeclaration()) {
3662 GlobalDecl OtherGD;
3663 const VarDecl *OtherD;
3664
3665 // Check that D is not yet in DiagnosedConflictingDefinitions is required
3666 // to make sure that we issue an error only once.
3667 if (D && lookupRepresentativeDecl(MangledName, OtherGD) &&
3668 (D->getCanonicalDecl() != OtherGD.getCanonicalDecl().getDecl()) &&
3669 (OtherD = dyn_cast<VarDecl>(OtherGD.getDecl())) &&
3670 OtherD->hasInit() &&
3671 DiagnosedConflictingDefinitions.insert(D).second) {
3672 getDiags().Report(D->getLocation(), diag::err_duplicate_mangled_name)
3673 << MangledName;
3674 getDiags().Report(OtherGD.getDecl()->getLocation(),
3675 diag::note_previous_definition);
3676 }
3677 }
3678
3679 // Make sure the result is of the correct type.
3680 if (Entry->getType()->getAddressSpace() != Ty->getAddressSpace())
3681 return llvm::ConstantExpr::getAddrSpaceCast(Entry, Ty);
3682
3683 // (If global is requested for a definition, we always need to create a new
3684 // global, not just return a bitcast.)
3685 if (!IsForDefinition)
3686 return llvm::ConstantExpr::getBitCast(Entry, Ty);
3687 }
3688
3689 auto AddrSpace = GetGlobalVarAddressSpace(D);
3690 auto TargetAddrSpace = getContext().getTargetAddressSpace(AddrSpace);
3691
3692 auto *GV = new llvm::GlobalVariable(
3693 getModule(), Ty->getElementType(), false,
3694 llvm::GlobalValue::ExternalLinkage, nullptr, MangledName, nullptr,
3695 llvm::GlobalVariable::NotThreadLocal, TargetAddrSpace);
3696
3697 // If we already created a global with the same mangled name (but different
3698 // type) before, take its name and remove it from its parent.
3699 if (Entry) {
3700 GV->takeName(Entry);
3701
3702 if (!Entry->use_empty()) {
3703 llvm::Constant *NewPtrForOldDecl =
3704 llvm::ConstantExpr::getBitCast(GV, Entry->getType());
3705 Entry->replaceAllUsesWith(NewPtrForOldDecl);
3706 }
3707
3708 Entry->eraseFromParent();
3709 }
3710
3711 // This is the first use or definition of a mangled name. If there is a
3712 // deferred decl with this name, remember that we need to emit it at the end
3713 // of the file.
3714 auto DDI = DeferredDecls.find(MangledName);
3715 if (DDI != DeferredDecls.end()) {
3716 // Move the potentially referenced deferred decl to the DeferredDeclsToEmit
3717 // list, and remove it from DeferredDecls (since we don't need it anymore).
3718 addDeferredDeclToEmit(DDI->second);
3719 DeferredDecls.erase(DDI);
3720 }
3721
3722 // Handle things which are present even on external declarations.
3723 if (D) {
3724 if (LangOpts.OpenMP && !LangOpts.OpenMPSimd)
3725 getOpenMPRuntime().registerTargetGlobalVariable(D, GV);
3726
3727 // FIXME: This code is overly simple and should be merged with other global
3728 // handling.
3729 GV->setConstant(isTypeConstant(D->getType(), false));
3730
3731 GV->setAlignment(getContext().getDeclAlign(D).getAsAlign());
3732
3733 setLinkageForGV(GV, D);
3734
3735 if (D->getTLSKind()) {
3736 if (D->getTLSKind() == VarDecl::TLS_Dynamic)
3737 CXXThreadLocals.push_back(D);
3738 setTLSMode(GV, *D);
3739 }
3740
3741 setGVProperties(GV, D);
3742
3743 // If required by the ABI, treat declarations of static data members with
3744 // inline initializers as definitions.
3745 if (getContext().isMSStaticDataMemberInlineDefinition(D)) {
3746 EmitGlobalVarDefinition(D);
3747 }
3748
3749 // Emit section information for extern variables.
3750 if (D->hasExternalStorage()) {
3751 if (const SectionAttr *SA = D->getAttr<SectionAttr>())
3752 GV->setSection(SA->getName());
3753 }
3754
3755 // Handle XCore specific ABI requirements.
3756 if (getTriple().getArch() == llvm::Triple::xcore &&
3757 D->getLanguageLinkage() == CLanguageLinkage &&
3758 D->getType().isConstant(Context) &&
3759 isExternallyVisible(D->getLinkageAndVisibility().getLinkage()))
3760 GV->setSection(".cp.rodata");
3761
3762 // Check if we a have a const declaration with an initializer, we may be
3763 // able to emit it as available_externally to expose it's value to the
3764 // optimizer.
3765 if (Context.getLangOpts().CPlusPlus && GV->hasExternalLinkage() &&
3766 D->getType().isConstQualified() && !GV->hasInitializer() &&
3767 !D->hasDefinition() && D->hasInit() && !D->hasAttr<DLLImportAttr>()) {
3768 const auto *Record =
3769 Context.getBaseElementType(D->getType())->getAsCXXRecordDecl();
3770 bool HasMutableFields = Record && Record->hasMutableFields();
3771 if (!HasMutableFields) {
3772 const VarDecl *InitDecl;
3773 const Expr *InitExpr = D->getAnyInitializer(InitDecl);
3774 if (InitExpr) {
3775 ConstantEmitter emitter(*this);
3776 llvm::Constant *Init = emitter.tryEmitForInitializer(*InitDecl);
3777 if (Init) {
3778 auto *InitType = Init->getType();
3779 if (GV->getValueType() != InitType) {
3780 // The type of the initializer does not match the definition.
3781 // This happens when an initializer has a different type from
3782 // the type of the global (because of padding at the end of a
3783 // structure for instance).
3784 GV->setName(StringRef());
3785 // Make a new global with the correct type, this is now guaranteed
3786 // to work.
3787 auto *NewGV = cast<llvm::GlobalVariable>(
3788 GetAddrOfGlobalVar(D, InitType, IsForDefinition)
3789 ->stripPointerCasts());
3790
3791 // Erase the old global, since it is no longer used.
3792 GV->eraseFromParent();
3793 GV = NewGV;
3794 } else {
3795 GV->setInitializer(Init);
3796 GV->setConstant(true);
3797 GV->setLinkage(llvm::GlobalValue::AvailableExternallyLinkage);
3798 }
3799 emitter.finalize(GV);
3800 }
3801 }
3802 }
3803 }
3804 }
3805
3806 if (GV->isDeclaration())
3807 getTargetCodeGenInfo().setTargetAttributes(D, GV, *this);
3808
3809 LangAS ExpectedAS =
3810 D ? D->getType().getAddressSpace()
3811 : (LangOpts.OpenCL ? LangAS::opencl_global : LangAS::Default);
3812 assert(getContext().getTargetAddressSpace(ExpectedAS) ==
3813 Ty->getPointerAddressSpace());
3814 if (AddrSpace != ExpectedAS)
3815 return getTargetCodeGenInfo().performAddrSpaceCast(*this, GV, AddrSpace,
3816 ExpectedAS, Ty);
3817
3818 return GV;
3819 }
3820
3821 llvm::Constant *
GetAddrOfGlobal(GlobalDecl GD,ForDefinition_t IsForDefinition)3822 CodeGenModule::GetAddrOfGlobal(GlobalDecl GD, ForDefinition_t IsForDefinition) {
3823 const Decl *D = GD.getDecl();
3824
3825 if (isa<CXXConstructorDecl>(D) || isa<CXXDestructorDecl>(D))
3826 return getAddrOfCXXStructor(GD, /*FnInfo=*/nullptr, /*FnType=*/nullptr,
3827 /*DontDefer=*/false, IsForDefinition);
3828
3829 if (isa<CXXMethodDecl>(D)) {
3830 auto FInfo =
3831 &getTypes().arrangeCXXMethodDeclaration(cast<CXXMethodDecl>(D));
3832 auto Ty = getTypes().GetFunctionType(*FInfo);
3833 return GetAddrOfFunction(GD, Ty, /*ForVTable=*/false, /*DontDefer=*/false,
3834 IsForDefinition);
3835 }
3836
3837 if (isa<FunctionDecl>(D)) {
3838 const CGFunctionInfo &FI = getTypes().arrangeGlobalDeclaration(GD);
3839 llvm::FunctionType *Ty = getTypes().GetFunctionType(FI);
3840 return GetAddrOfFunction(GD, Ty, /*ForVTable=*/false, /*DontDefer=*/false,
3841 IsForDefinition);
3842 }
3843
3844 return GetAddrOfGlobalVar(cast<VarDecl>(D), /*Ty=*/nullptr, IsForDefinition);
3845 }
3846
CreateOrReplaceCXXRuntimeVariable(StringRef Name,llvm::Type * Ty,llvm::GlobalValue::LinkageTypes Linkage,unsigned Alignment)3847 llvm::GlobalVariable *CodeGenModule::CreateOrReplaceCXXRuntimeVariable(
3848 StringRef Name, llvm::Type *Ty, llvm::GlobalValue::LinkageTypes Linkage,
3849 unsigned Alignment) {
3850 llvm::GlobalVariable *GV = getModule().getNamedGlobal(Name);
3851 llvm::GlobalVariable *OldGV = nullptr;
3852
3853 if (GV) {
3854 // Check if the variable has the right type.
3855 if (GV->getValueType() == Ty)
3856 return GV;
3857
3858 // Because C++ name mangling, the only way we can end up with an already
3859 // existing global with the same name is if it has been declared extern "C".
3860 assert(GV->isDeclaration() && "Declaration has wrong type!");
3861 OldGV = GV;
3862 }
3863
3864 // Create a new variable.
3865 GV = new llvm::GlobalVariable(getModule(), Ty, /*isConstant=*/true,
3866 Linkage, nullptr, Name);
3867
3868 if (OldGV) {
3869 // Replace occurrences of the old variable if needed.
3870 GV->takeName(OldGV);
3871
3872 if (!OldGV->use_empty()) {
3873 llvm::Constant *NewPtrForOldDecl =
3874 llvm::ConstantExpr::getBitCast(GV, OldGV->getType());
3875 OldGV->replaceAllUsesWith(NewPtrForOldDecl);
3876 }
3877
3878 OldGV->eraseFromParent();
3879 }
3880
3881 if (supportsCOMDAT() && GV->isWeakForLinker() &&
3882 !GV->hasAvailableExternallyLinkage())
3883 GV->setComdat(TheModule.getOrInsertComdat(GV->getName()));
3884
3885 GV->setAlignment(llvm::MaybeAlign(Alignment));
3886
3887 return GV;
3888 }
3889
3890 /// GetAddrOfGlobalVar - Return the llvm::Constant for the address of the
3891 /// given global variable. If Ty is non-null and if the global doesn't exist,
3892 /// then it will be created with the specified type instead of whatever the
3893 /// normal requested type would be. If IsForDefinition is true, it is guaranteed
3894 /// that an actual global with type Ty will be returned, not conversion of a
3895 /// variable with the same mangled name but some other type.
GetAddrOfGlobalVar(const VarDecl * D,llvm::Type * Ty,ForDefinition_t IsForDefinition)3896 llvm::Constant *CodeGenModule::GetAddrOfGlobalVar(const VarDecl *D,
3897 llvm::Type *Ty,
3898 ForDefinition_t IsForDefinition) {
3899 assert(D->hasGlobalStorage() && "Not a global variable");
3900 QualType ASTTy = D->getType();
3901 if (!Ty)
3902 Ty = getTypes().ConvertTypeForMem(ASTTy);
3903
3904 llvm::PointerType *PTy =
3905 llvm::PointerType::get(Ty, getContext().getTargetAddressSpace(ASTTy));
3906
3907 StringRef MangledName = getMangledName(D);
3908 return GetOrCreateLLVMGlobal(MangledName, PTy, D, IsForDefinition);
3909 }
3910
3911 /// CreateRuntimeVariable - Create a new runtime global variable with the
3912 /// specified type and name.
3913 llvm::Constant *
CreateRuntimeVariable(llvm::Type * Ty,StringRef Name)3914 CodeGenModule::CreateRuntimeVariable(llvm::Type *Ty,
3915 StringRef Name) {
3916 auto PtrTy =
3917 getContext().getLangOpts().OpenCL
3918 ? llvm::PointerType::get(
3919 Ty, getContext().getTargetAddressSpace(LangAS::opencl_global))
3920 : llvm::PointerType::getUnqual(Ty);
3921 auto *Ret = GetOrCreateLLVMGlobal(Name, PtrTy, nullptr);
3922 setDSOLocal(cast<llvm::GlobalValue>(Ret->stripPointerCasts()));
3923 return Ret;
3924 }
3925
EmitTentativeDefinition(const VarDecl * D)3926 void CodeGenModule::EmitTentativeDefinition(const VarDecl *D) {
3927 assert(!D->getInit() && "Cannot emit definite definitions here!");
3928
3929 StringRef MangledName = getMangledName(D);
3930 llvm::GlobalValue *GV = GetGlobalValue(MangledName);
3931
3932 // We already have a definition, not declaration, with the same mangled name.
3933 // Emitting of declaration is not required (and actually overwrites emitted
3934 // definition).
3935 if (GV && !GV->isDeclaration())
3936 return;
3937
3938 // If we have not seen a reference to this variable yet, place it into the
3939 // deferred declarations table to be emitted if needed later.
3940 if (!MustBeEmitted(D) && !GV) {
3941 DeferredDecls[MangledName] = D;
3942 return;
3943 }
3944
3945 // The tentative definition is the only definition.
3946 EmitGlobalVarDefinition(D);
3947 }
3948
EmitExternalDeclaration(const VarDecl * D)3949 void CodeGenModule::EmitExternalDeclaration(const VarDecl *D) {
3950 EmitExternalVarDeclaration(D);
3951 }
3952
GetTargetTypeStoreSize(llvm::Type * Ty) const3953 CharUnits CodeGenModule::GetTargetTypeStoreSize(llvm::Type *Ty) const {
3954 return Context.toCharUnitsFromBits(
3955 getDataLayout().getTypeStoreSizeInBits(Ty));
3956 }
3957
GetGlobalVarAddressSpace(const VarDecl * D)3958 LangAS CodeGenModule::GetGlobalVarAddressSpace(const VarDecl *D) {
3959 LangAS AddrSpace = LangAS::Default;
3960 if (LangOpts.OpenCL) {
3961 AddrSpace = D ? D->getType().getAddressSpace() : LangAS::opencl_global;
3962 assert(AddrSpace == LangAS::opencl_global ||
3963 AddrSpace == LangAS::opencl_global_device ||
3964 AddrSpace == LangAS::opencl_global_host ||
3965 AddrSpace == LangAS::opencl_constant ||
3966 AddrSpace == LangAS::opencl_local ||
3967 AddrSpace >= LangAS::FirstTargetAddressSpace);
3968 return AddrSpace;
3969 }
3970
3971 if (LangOpts.CUDA && LangOpts.CUDAIsDevice) {
3972 if (D && D->hasAttr<CUDAConstantAttr>())
3973 return LangAS::cuda_constant;
3974 else if (D && D->hasAttr<CUDASharedAttr>())
3975 return LangAS::cuda_shared;
3976 else if (D && D->hasAttr<CUDADeviceAttr>())
3977 return LangAS::cuda_device;
3978 else if (D && D->getType().isConstQualified())
3979 return LangAS::cuda_constant;
3980 else
3981 return LangAS::cuda_device;
3982 }
3983
3984 if (LangOpts.OpenMP) {
3985 LangAS AS;
3986 if (OpenMPRuntime->hasAllocateAttributeForGlobalVar(D, AS))
3987 return AS;
3988 }
3989 return getTargetCodeGenInfo().getGlobalVarAddressSpace(*this, D);
3990 }
3991
getStringLiteralAddressSpace() const3992 LangAS CodeGenModule::getStringLiteralAddressSpace() const {
3993 // OpenCL v1.2 s6.5.3: a string literal is in the constant address space.
3994 if (LangOpts.OpenCL)
3995 return LangAS::opencl_constant;
3996 if (auto AS = getTarget().getConstantAddressSpace())
3997 return AS.getValue();
3998 return LangAS::Default;
3999 }
4000
4001 // In address space agnostic languages, string literals are in default address
4002 // space in AST. However, certain targets (e.g. amdgcn) request them to be
4003 // emitted in constant address space in LLVM IR. To be consistent with other
4004 // parts of AST, string literal global variables in constant address space
4005 // need to be casted to default address space before being put into address
4006 // map and referenced by other part of CodeGen.
4007 // In OpenCL, string literals are in constant address space in AST, therefore
4008 // they should not be casted to default address space.
4009 static llvm::Constant *
castStringLiteralToDefaultAddressSpace(CodeGenModule & CGM,llvm::GlobalVariable * GV)4010 castStringLiteralToDefaultAddressSpace(CodeGenModule &CGM,
4011 llvm::GlobalVariable *GV) {
4012 llvm::Constant *Cast = GV;
4013 if (!CGM.getLangOpts().OpenCL) {
4014 if (auto AS = CGM.getTarget().getConstantAddressSpace()) {
4015 if (AS != LangAS::Default)
4016 Cast = CGM.getTargetCodeGenInfo().performAddrSpaceCast(
4017 CGM, GV, AS.getValue(), LangAS::Default,
4018 GV->getValueType()->getPointerTo(
4019 CGM.getContext().getTargetAddressSpace(LangAS::Default)));
4020 }
4021 }
4022 return Cast;
4023 }
4024
4025 template<typename SomeDecl>
MaybeHandleStaticInExternC(const SomeDecl * D,llvm::GlobalValue * GV)4026 void CodeGenModule::MaybeHandleStaticInExternC(const SomeDecl *D,
4027 llvm::GlobalValue *GV) {
4028 if (!getLangOpts().CPlusPlus)
4029 return;
4030
4031 // Must have 'used' attribute, or else inline assembly can't rely on
4032 // the name existing.
4033 if (!D->template hasAttr<UsedAttr>())
4034 return;
4035
4036 // Must have internal linkage and an ordinary name.
4037 if (!D->getIdentifier() || D->getFormalLinkage() != InternalLinkage)
4038 return;
4039
4040 // Must be in an extern "C" context. Entities declared directly within
4041 // a record are not extern "C" even if the record is in such a context.
4042 const SomeDecl *First = D->getFirstDecl();
4043 if (First->getDeclContext()->isRecord() || !First->isInExternCContext())
4044 return;
4045
4046 // OK, this is an internal linkage entity inside an extern "C" linkage
4047 // specification. Make a note of that so we can give it the "expected"
4048 // mangled name if nothing else is using that name.
4049 std::pair<StaticExternCMap::iterator, bool> R =
4050 StaticExternCValues.insert(std::make_pair(D->getIdentifier(), GV));
4051
4052 // If we have multiple internal linkage entities with the same name
4053 // in extern "C" regions, none of them gets that name.
4054 if (!R.second)
4055 R.first->second = nullptr;
4056 }
4057
shouldBeInCOMDAT(CodeGenModule & CGM,const Decl & D)4058 static bool shouldBeInCOMDAT(CodeGenModule &CGM, const Decl &D) {
4059 if (!CGM.supportsCOMDAT())
4060 return false;
4061
4062 // Do not set COMDAT attribute for CUDA/HIP stub functions to prevent
4063 // them being "merged" by the COMDAT Folding linker optimization.
4064 if (D.hasAttr<CUDAGlobalAttr>())
4065 return false;
4066
4067 if (D.hasAttr<SelectAnyAttr>())
4068 return true;
4069
4070 GVALinkage Linkage;
4071 if (auto *VD = dyn_cast<VarDecl>(&D))
4072 Linkage = CGM.getContext().GetGVALinkageForVariable(VD);
4073 else
4074 Linkage = CGM.getContext().GetGVALinkageForFunction(cast<FunctionDecl>(&D));
4075
4076 switch (Linkage) {
4077 case GVA_Internal:
4078 case GVA_AvailableExternally:
4079 case GVA_StrongExternal:
4080 return false;
4081 case GVA_DiscardableODR:
4082 case GVA_StrongODR:
4083 return true;
4084 }
4085 llvm_unreachable("No such linkage");
4086 }
4087
maybeSetTrivialComdat(const Decl & D,llvm::GlobalObject & GO)4088 void CodeGenModule::maybeSetTrivialComdat(const Decl &D,
4089 llvm::GlobalObject &GO) {
4090 if (!shouldBeInCOMDAT(*this, D))
4091 return;
4092 GO.setComdat(TheModule.getOrInsertComdat(GO.getName()));
4093 }
4094
4095 /// Pass IsTentative as true if you want to create a tentative definition.
EmitGlobalVarDefinition(const VarDecl * D,bool IsTentative)4096 void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D,
4097 bool IsTentative) {
4098 // OpenCL global variables of sampler type are translated to function calls,
4099 // therefore no need to be translated.
4100 QualType ASTTy = D->getType();
4101 if (getLangOpts().OpenCL && ASTTy->isSamplerT())
4102 return;
4103
4104 // If this is OpenMP device, check if it is legal to emit this global
4105 // normally.
4106 if (LangOpts.OpenMPIsDevice && OpenMPRuntime &&
4107 OpenMPRuntime->emitTargetGlobalVariable(D))
4108 return;
4109
4110 llvm::Constant *Init = nullptr;
4111 bool NeedsGlobalCtor = false;
4112 bool NeedsGlobalDtor =
4113 D->needsDestruction(getContext()) == QualType::DK_cxx_destructor;
4114
4115 const VarDecl *InitDecl;
4116 const Expr *InitExpr = D->getAnyInitializer(InitDecl);
4117
4118 Optional<ConstantEmitter> emitter;
4119
4120 // CUDA E.2.4.1 "__shared__ variables cannot have an initialization
4121 // as part of their declaration." Sema has already checked for
4122 // error cases, so we just need to set Init to UndefValue.
4123 bool IsCUDASharedVar =
4124 getLangOpts().CUDAIsDevice && D->hasAttr<CUDASharedAttr>();
4125 // Shadows of initialized device-side global variables are also left
4126 // undefined.
4127 bool IsCUDAShadowVar =
4128 !getLangOpts().CUDAIsDevice &&
4129 (D->hasAttr<CUDAConstantAttr>() || D->hasAttr<CUDADeviceAttr>() ||
4130 D->hasAttr<CUDASharedAttr>());
4131 bool IsCUDADeviceShadowVar =
4132 getLangOpts().CUDAIsDevice &&
4133 (D->getType()->isCUDADeviceBuiltinSurfaceType() ||
4134 D->getType()->isCUDADeviceBuiltinTextureType());
4135 // HIP pinned shadow of initialized host-side global variables are also
4136 // left undefined.
4137 if (getLangOpts().CUDA &&
4138 (IsCUDASharedVar || IsCUDAShadowVar || IsCUDADeviceShadowVar))
4139 Init = llvm::UndefValue::get(getTypes().ConvertType(ASTTy));
4140 else if (D->hasAttr<LoaderUninitializedAttr>())
4141 Init = llvm::UndefValue::get(getTypes().ConvertType(ASTTy));
4142 else if (!InitExpr) {
4143 // This is a tentative definition; tentative definitions are
4144 // implicitly initialized with { 0 }.
4145 //
4146 // Note that tentative definitions are only emitted at the end of
4147 // a translation unit, so they should never have incomplete
4148 // type. In addition, EmitTentativeDefinition makes sure that we
4149 // never attempt to emit a tentative definition if a real one
4150 // exists. A use may still exists, however, so we still may need
4151 // to do a RAUW.
4152 assert(!ASTTy->isIncompleteType() && "Unexpected incomplete type");
4153 Init = EmitNullConstant(D->getType());
4154 } else {
4155 initializedGlobalDecl = GlobalDecl(D);
4156 emitter.emplace(*this);
4157 Init = emitter->tryEmitForInitializer(*InitDecl);
4158
4159 if (!Init) {
4160 QualType T = InitExpr->getType();
4161 if (D->getType()->isReferenceType())
4162 T = D->getType();
4163
4164 if (getLangOpts().CPlusPlus) {
4165 Init = EmitNullConstant(T);
4166 NeedsGlobalCtor = true;
4167 } else {
4168 ErrorUnsupported(D, "static initializer");
4169 Init = llvm::UndefValue::get(getTypes().ConvertType(T));
4170 }
4171 } else {
4172 // We don't need an initializer, so remove the entry for the delayed
4173 // initializer position (just in case this entry was delayed) if we
4174 // also don't need to register a destructor.
4175 if (getLangOpts().CPlusPlus && !NeedsGlobalDtor)
4176 DelayedCXXInitPosition.erase(D);
4177 }
4178 }
4179
4180 llvm::Type* InitType = Init->getType();
4181 llvm::Constant *Entry =
4182 GetAddrOfGlobalVar(D, InitType, ForDefinition_t(!IsTentative));
4183
4184 // Strip off pointer casts if we got them.
4185 Entry = Entry->stripPointerCasts();
4186
4187 // Entry is now either a Function or GlobalVariable.
4188 auto *GV = dyn_cast<llvm::GlobalVariable>(Entry);
4189
4190 // We have a definition after a declaration with the wrong type.
4191 // We must make a new GlobalVariable* and update everything that used OldGV
4192 // (a declaration or tentative definition) with the new GlobalVariable*
4193 // (which will be a definition).
4194 //
4195 // This happens if there is a prototype for a global (e.g.
4196 // "extern int x[];") and then a definition of a different type (e.g.
4197 // "int x[10];"). This also happens when an initializer has a different type
4198 // from the type of the global (this happens with unions).
4199 if (!GV || GV->getValueType() != InitType ||
4200 GV->getType()->getAddressSpace() !=
4201 getContext().getTargetAddressSpace(GetGlobalVarAddressSpace(D))) {
4202
4203 // Move the old entry aside so that we'll create a new one.
4204 Entry->setName(StringRef());
4205
4206 // Make a new global with the correct type, this is now guaranteed to work.
4207 GV = cast<llvm::GlobalVariable>(
4208 GetAddrOfGlobalVar(D, InitType, ForDefinition_t(!IsTentative))
4209 ->stripPointerCasts());
4210
4211 // Replace all uses of the old global with the new global
4212 llvm::Constant *NewPtrForOldDecl =
4213 llvm::ConstantExpr::getBitCast(GV, Entry->getType());
4214 Entry->replaceAllUsesWith(NewPtrForOldDecl);
4215
4216 // Erase the old global, since it is no longer used.
4217 cast<llvm::GlobalValue>(Entry)->eraseFromParent();
4218 }
4219
4220 MaybeHandleStaticInExternC(D, GV);
4221
4222 if (D->hasAttr<AnnotateAttr>())
4223 AddGlobalAnnotations(D, GV);
4224
4225 // Set the llvm linkage type as appropriate.
4226 llvm::GlobalValue::LinkageTypes Linkage =
4227 getLLVMLinkageVarDefinition(D, GV->isConstant());
4228
4229 // CUDA B.2.1 "The __device__ qualifier declares a variable that resides on
4230 // the device. [...]"
4231 // CUDA B.2.2 "The __constant__ qualifier, optionally used together with
4232 // __device__, declares a variable that: [...]
4233 // Is accessible from all the threads within the grid and from the host
4234 // through the runtime library (cudaGetSymbolAddress() / cudaGetSymbolSize()
4235 // / cudaMemcpyToSymbol() / cudaMemcpyFromSymbol())."
4236 if (GV && LangOpts.CUDA) {
4237 if (LangOpts.CUDAIsDevice) {
4238 if (Linkage != llvm::GlobalValue::InternalLinkage &&
4239 (D->hasAttr<CUDADeviceAttr>() || D->hasAttr<CUDAConstantAttr>()))
4240 GV->setExternallyInitialized(true);
4241 } else {
4242 // Host-side shadows of external declarations of device-side
4243 // global variables become internal definitions. These have to
4244 // be internal in order to prevent name conflicts with global
4245 // host variables with the same name in a different TUs.
4246 if (D->hasAttr<CUDADeviceAttr>() || D->hasAttr<CUDAConstantAttr>()) {
4247 Linkage = llvm::GlobalValue::InternalLinkage;
4248 // Shadow variables and their properties must be registered with CUDA
4249 // runtime. Skip Extern global variables, which will be registered in
4250 // the TU where they are defined.
4251 //
4252 // Don't register a C++17 inline variable. The local symbol can be
4253 // discarded and referencing a discarded local symbol from outside the
4254 // comdat (__cuda_register_globals) is disallowed by the ELF spec.
4255 // TODO: Reject __device__ constexpr and __device__ inline in Sema.
4256 if (!D->hasExternalStorage() && !D->isInline())
4257 getCUDARuntime().registerDeviceVar(D, *GV, !D->hasDefinition(),
4258 D->hasAttr<CUDAConstantAttr>());
4259 } else if (D->hasAttr<CUDASharedAttr>()) {
4260 // __shared__ variables are odd. Shadows do get created, but
4261 // they are not registered with the CUDA runtime, so they
4262 // can't really be used to access their device-side
4263 // counterparts. It's not clear yet whether it's nvcc's bug or
4264 // a feature, but we've got to do the same for compatibility.
4265 Linkage = llvm::GlobalValue::InternalLinkage;
4266 } else if (D->getType()->isCUDADeviceBuiltinSurfaceType() ||
4267 D->getType()->isCUDADeviceBuiltinTextureType()) {
4268 // Builtin surfaces and textures and their template arguments are
4269 // also registered with CUDA runtime.
4270 Linkage = llvm::GlobalValue::InternalLinkage;
4271 const ClassTemplateSpecializationDecl *TD =
4272 cast<ClassTemplateSpecializationDecl>(
4273 D->getType()->getAs<RecordType>()->getDecl());
4274 const TemplateArgumentList &Args = TD->getTemplateArgs();
4275 if (TD->hasAttr<CUDADeviceBuiltinSurfaceTypeAttr>()) {
4276 assert(Args.size() == 2 &&
4277 "Unexpected number of template arguments of CUDA device "
4278 "builtin surface type.");
4279 auto SurfType = Args[1].getAsIntegral();
4280 if (!D->hasExternalStorage())
4281 getCUDARuntime().registerDeviceSurf(D, *GV, !D->hasDefinition(),
4282 SurfType.getSExtValue());
4283 } else {
4284 assert(Args.size() == 3 &&
4285 "Unexpected number of template arguments of CUDA device "
4286 "builtin texture type.");
4287 auto TexType = Args[1].getAsIntegral();
4288 auto Normalized = Args[2].getAsIntegral();
4289 if (!D->hasExternalStorage())
4290 getCUDARuntime().registerDeviceTex(D, *GV, !D->hasDefinition(),
4291 TexType.getSExtValue(),
4292 Normalized.getZExtValue());
4293 }
4294 }
4295 }
4296 }
4297
4298 GV->setInitializer(Init);
4299 if (emitter)
4300 emitter->finalize(GV);
4301
4302 // If it is safe to mark the global 'constant', do so now.
4303 GV->setConstant(!NeedsGlobalCtor && !NeedsGlobalDtor &&
4304 isTypeConstant(D->getType(), true));
4305
4306 // If it is in a read-only section, mark it 'constant'.
4307 if (const SectionAttr *SA = D->getAttr<SectionAttr>()) {
4308 const ASTContext::SectionInfo &SI = Context.SectionInfos[SA->getName()];
4309 if ((SI.SectionFlags & ASTContext::PSF_Write) == 0)
4310 GV->setConstant(true);
4311 }
4312
4313 GV->setAlignment(getContext().getDeclAlign(D).getAsAlign());
4314
4315 // On Darwin, unlike other Itanium C++ ABI platforms, the thread-wrapper
4316 // function is only defined alongside the variable, not also alongside
4317 // callers. Normally, all accesses to a thread_local go through the
4318 // thread-wrapper in order to ensure initialization has occurred, underlying
4319 // variable will never be used other than the thread-wrapper, so it can be
4320 // converted to internal linkage.
4321 //
4322 // However, if the variable has the 'constinit' attribute, it _can_ be
4323 // referenced directly, without calling the thread-wrapper, so the linkage
4324 // must not be changed.
4325 //
4326 // Additionally, if the variable isn't plain external linkage, e.g. if it's
4327 // weak or linkonce, the de-duplication semantics are important to preserve,
4328 // so we don't change the linkage.
4329 if (D->getTLSKind() == VarDecl::TLS_Dynamic &&
4330 Linkage == llvm::GlobalValue::ExternalLinkage &&
4331 Context.getTargetInfo().getTriple().isOSDarwin() &&
4332 !D->hasAttr<ConstInitAttr>())
4333 Linkage = llvm::GlobalValue::InternalLinkage;
4334
4335 GV->setLinkage(Linkage);
4336 if (D->hasAttr<DLLImportAttr>())
4337 GV->setDLLStorageClass(llvm::GlobalVariable::DLLImportStorageClass);
4338 else if (D->hasAttr<DLLExportAttr>())
4339 GV->setDLLStorageClass(llvm::GlobalVariable::DLLExportStorageClass);
4340 else
4341 GV->setDLLStorageClass(llvm::GlobalVariable::DefaultStorageClass);
4342
4343 if (Linkage == llvm::GlobalVariable::CommonLinkage) {
4344 // common vars aren't constant even if declared const.
4345 GV->setConstant(false);
4346 // Tentative definition of global variables may be initialized with
4347 // non-zero null pointers. In this case they should have weak linkage
4348 // since common linkage must have zero initializer and must not have
4349 // explicit section therefore cannot have non-zero initial value.
4350 if (!GV->getInitializer()->isNullValue())
4351 GV->setLinkage(llvm::GlobalVariable::WeakAnyLinkage);
4352 }
4353
4354 setNonAliasAttributes(D, GV);
4355
4356 if (D->getTLSKind() && !GV->isThreadLocal()) {
4357 if (D->getTLSKind() == VarDecl::TLS_Dynamic)
4358 CXXThreadLocals.push_back(D);
4359 setTLSMode(GV, *D);
4360 }
4361
4362 maybeSetTrivialComdat(*D, *GV);
4363
4364 // Emit the initializer function if necessary.
4365 if (NeedsGlobalCtor || NeedsGlobalDtor)
4366 EmitCXXGlobalVarDeclInitFunc(D, GV, NeedsGlobalCtor);
4367
4368 SanitizerMD->reportGlobalToASan(GV, *D, NeedsGlobalCtor);
4369
4370 // Emit global variable debug information.
4371 if (CGDebugInfo *DI = getModuleDebugInfo())
4372 if (getCodeGenOpts().hasReducedDebugInfo())
4373 DI->EmitGlobalVariable(GV, D);
4374 }
4375
EmitExternalVarDeclaration(const VarDecl * D)4376 void CodeGenModule::EmitExternalVarDeclaration(const VarDecl *D) {
4377 if (CGDebugInfo *DI = getModuleDebugInfo())
4378 if (getCodeGenOpts().hasReducedDebugInfo()) {
4379 QualType ASTTy = D->getType();
4380 llvm::Type *Ty = getTypes().ConvertTypeForMem(D->getType());
4381 llvm::PointerType *PTy =
4382 llvm::PointerType::get(Ty, getContext().getTargetAddressSpace(ASTTy));
4383 llvm::Constant *GV = GetOrCreateLLVMGlobal(D->getName(), PTy, D);
4384 DI->EmitExternalVariable(
4385 cast<llvm::GlobalVariable>(GV->stripPointerCasts()), D);
4386 }
4387 }
4388
isVarDeclStrongDefinition(const ASTContext & Context,CodeGenModule & CGM,const VarDecl * D,bool NoCommon)4389 static bool isVarDeclStrongDefinition(const ASTContext &Context,
4390 CodeGenModule &CGM, const VarDecl *D,
4391 bool NoCommon) {
4392 // Don't give variables common linkage if -fno-common was specified unless it
4393 // was overridden by a NoCommon attribute.
4394 if ((NoCommon || D->hasAttr<NoCommonAttr>()) && !D->hasAttr<CommonAttr>())
4395 return true;
4396
4397 // C11 6.9.2/2:
4398 // A declaration of an identifier for an object that has file scope without
4399 // an initializer, and without a storage-class specifier or with the
4400 // storage-class specifier static, constitutes a tentative definition.
4401 if (D->getInit() || D->hasExternalStorage())
4402 return true;
4403
4404 // A variable cannot be both common and exist in a section.
4405 if (D->hasAttr<SectionAttr>())
4406 return true;
4407
4408 // A variable cannot be both common and exist in a section.
4409 // We don't try to determine which is the right section in the front-end.
4410 // If no specialized section name is applicable, it will resort to default.
4411 if (D->hasAttr<PragmaClangBSSSectionAttr>() ||
4412 D->hasAttr<PragmaClangDataSectionAttr>() ||
4413 D->hasAttr<PragmaClangRelroSectionAttr>() ||
4414 D->hasAttr<PragmaClangRodataSectionAttr>())
4415 return true;
4416
4417 // Thread local vars aren't considered common linkage.
4418 if (D->getTLSKind())
4419 return true;
4420
4421 // Tentative definitions marked with WeakImportAttr are true definitions.
4422 if (D->hasAttr<WeakImportAttr>())
4423 return true;
4424
4425 // A variable cannot be both common and exist in a comdat.
4426 if (shouldBeInCOMDAT(CGM, *D))
4427 return true;
4428
4429 // Declarations with a required alignment do not have common linkage in MSVC
4430 // mode.
4431 if (Context.getTargetInfo().getCXXABI().isMicrosoft()) {
4432 if (D->hasAttr<AlignedAttr>())
4433 return true;
4434 QualType VarType = D->getType();
4435 if (Context.isAlignmentRequired(VarType))
4436 return true;
4437
4438 if (const auto *RT = VarType->getAs<RecordType>()) {
4439 const RecordDecl *RD = RT->getDecl();
4440 for (const FieldDecl *FD : RD->fields()) {
4441 if (FD->isBitField())
4442 continue;
4443 if (FD->hasAttr<AlignedAttr>())
4444 return true;
4445 if (Context.isAlignmentRequired(FD->getType()))
4446 return true;
4447 }
4448 }
4449 }
4450
4451 // Microsoft's link.exe doesn't support alignments greater than 32 bytes for
4452 // common symbols, so symbols with greater alignment requirements cannot be
4453 // common.
4454 // Other COFF linkers (ld.bfd and LLD) support arbitrary power-of-two
4455 // alignments for common symbols via the aligncomm directive, so this
4456 // restriction only applies to MSVC environments.
4457 if (Context.getTargetInfo().getTriple().isKnownWindowsMSVCEnvironment() &&
4458 Context.getTypeAlignIfKnown(D->getType()) >
4459 Context.toBits(CharUnits::fromQuantity(32)))
4460 return true;
4461
4462 return false;
4463 }
4464
getLLVMLinkageForDeclarator(const DeclaratorDecl * D,GVALinkage Linkage,bool IsConstantVariable)4465 llvm::GlobalValue::LinkageTypes CodeGenModule::getLLVMLinkageForDeclarator(
4466 const DeclaratorDecl *D, GVALinkage Linkage, bool IsConstantVariable) {
4467 if (Linkage == GVA_Internal)
4468 return llvm::Function::InternalLinkage;
4469
4470 if (D->hasAttr<WeakAttr>()) {
4471 if (IsConstantVariable)
4472 return llvm::GlobalVariable::WeakODRLinkage;
4473 else
4474 return llvm::GlobalVariable::WeakAnyLinkage;
4475 }
4476
4477 if (const auto *FD = D->getAsFunction())
4478 if (FD->isMultiVersion() && Linkage == GVA_AvailableExternally)
4479 return llvm::GlobalVariable::LinkOnceAnyLinkage;
4480
4481 // We are guaranteed to have a strong definition somewhere else,
4482 // so we can use available_externally linkage.
4483 if (Linkage == GVA_AvailableExternally)
4484 return llvm::GlobalValue::AvailableExternallyLinkage;
4485
4486 // Note that Apple's kernel linker doesn't support symbol
4487 // coalescing, so we need to avoid linkonce and weak linkages there.
4488 // Normally, this means we just map to internal, but for explicit
4489 // instantiations we'll map to external.
4490
4491 // In C++, the compiler has to emit a definition in every translation unit
4492 // that references the function. We should use linkonce_odr because
4493 // a) if all references in this translation unit are optimized away, we
4494 // don't need to codegen it. b) if the function persists, it needs to be
4495 // merged with other definitions. c) C++ has the ODR, so we know the
4496 // definition is dependable.
4497 if (Linkage == GVA_DiscardableODR)
4498 return !Context.getLangOpts().AppleKext ? llvm::Function::LinkOnceODRLinkage
4499 : llvm::Function::InternalLinkage;
4500
4501 // An explicit instantiation of a template has weak linkage, since
4502 // explicit instantiations can occur in multiple translation units
4503 // and must all be equivalent. However, we are not allowed to
4504 // throw away these explicit instantiations.
4505 //
4506 // CUDA/HIP: For -fno-gpu-rdc case, device code is limited to one TU,
4507 // so say that CUDA templates are either external (for kernels) or internal.
4508 // This lets llvm perform aggressive inter-procedural optimizations. For
4509 // -fgpu-rdc case, device function calls across multiple TU's are allowed,
4510 // therefore we need to follow the normal linkage paradigm.
4511 if (Linkage == GVA_StrongODR) {
4512 if (getLangOpts().AppleKext)
4513 return llvm::Function::ExternalLinkage;
4514 if (getLangOpts().CUDA && getLangOpts().CUDAIsDevice &&
4515 !getLangOpts().GPURelocatableDeviceCode)
4516 return D->hasAttr<CUDAGlobalAttr>() ? llvm::Function::ExternalLinkage
4517 : llvm::Function::InternalLinkage;
4518 return llvm::Function::WeakODRLinkage;
4519 }
4520
4521 // C++ doesn't have tentative definitions and thus cannot have common
4522 // linkage.
4523 if (!getLangOpts().CPlusPlus && isa<VarDecl>(D) &&
4524 !isVarDeclStrongDefinition(Context, *this, cast<VarDecl>(D),
4525 CodeGenOpts.NoCommon))
4526 return llvm::GlobalVariable::CommonLinkage;
4527
4528 // selectany symbols are externally visible, so use weak instead of
4529 // linkonce. MSVC optimizes away references to const selectany globals, so
4530 // all definitions should be the same and ODR linkage should be used.
4531 // http://msdn.microsoft.com/en-us/library/5tkz6s71.aspx
4532 if (D->hasAttr<SelectAnyAttr>())
4533 return llvm::GlobalVariable::WeakODRLinkage;
4534
4535 // Otherwise, we have strong external linkage.
4536 assert(Linkage == GVA_StrongExternal);
4537 return llvm::GlobalVariable::ExternalLinkage;
4538 }
4539
getLLVMLinkageVarDefinition(const VarDecl * VD,bool IsConstant)4540 llvm::GlobalValue::LinkageTypes CodeGenModule::getLLVMLinkageVarDefinition(
4541 const VarDecl *VD, bool IsConstant) {
4542 GVALinkage Linkage = getContext().GetGVALinkageForVariable(VD);
4543 return getLLVMLinkageForDeclarator(VD, Linkage, IsConstant);
4544 }
4545
4546 /// Replace the uses of a function that was declared with a non-proto type.
4547 /// We want to silently drop extra arguments from call sites
replaceUsesOfNonProtoConstant(llvm::Constant * old,llvm::Function * newFn)4548 static void replaceUsesOfNonProtoConstant(llvm::Constant *old,
4549 llvm::Function *newFn) {
4550 // Fast path.
4551 if (old->use_empty()) return;
4552
4553 llvm::Type *newRetTy = newFn->getReturnType();
4554 SmallVector<llvm::Value*, 4> newArgs;
4555 SmallVector<llvm::OperandBundleDef, 1> newBundles;
4556
4557 for (llvm::Value::use_iterator ui = old->use_begin(), ue = old->use_end();
4558 ui != ue; ) {
4559 llvm::Value::use_iterator use = ui++; // Increment before the use is erased.
4560 llvm::User *user = use->getUser();
4561
4562 // Recognize and replace uses of bitcasts. Most calls to
4563 // unprototyped functions will use bitcasts.
4564 if (auto *bitcast = dyn_cast<llvm::ConstantExpr>(user)) {
4565 if (bitcast->getOpcode() == llvm::Instruction::BitCast)
4566 replaceUsesOfNonProtoConstant(bitcast, newFn);
4567 continue;
4568 }
4569
4570 // Recognize calls to the function.
4571 llvm::CallBase *callSite = dyn_cast<llvm::CallBase>(user);
4572 if (!callSite) continue;
4573 if (!callSite->isCallee(&*use))
4574 continue;
4575
4576 // If the return types don't match exactly, then we can't
4577 // transform this call unless it's dead.
4578 if (callSite->getType() != newRetTy && !callSite->use_empty())
4579 continue;
4580
4581 // Get the call site's attribute list.
4582 SmallVector<llvm::AttributeSet, 8> newArgAttrs;
4583 llvm::AttributeList oldAttrs = callSite->getAttributes();
4584
4585 // If the function was passed too few arguments, don't transform.
4586 unsigned newNumArgs = newFn->arg_size();
4587 if (callSite->arg_size() < newNumArgs)
4588 continue;
4589
4590 // If extra arguments were passed, we silently drop them.
4591 // If any of the types mismatch, we don't transform.
4592 unsigned argNo = 0;
4593 bool dontTransform = false;
4594 for (llvm::Argument &A : newFn->args()) {
4595 if (callSite->getArgOperand(argNo)->getType() != A.getType()) {
4596 dontTransform = true;
4597 break;
4598 }
4599
4600 // Add any parameter attributes.
4601 newArgAttrs.push_back(oldAttrs.getParamAttributes(argNo));
4602 argNo++;
4603 }
4604 if (dontTransform)
4605 continue;
4606
4607 // Okay, we can transform this. Create the new call instruction and copy
4608 // over the required information.
4609 newArgs.append(callSite->arg_begin(), callSite->arg_begin() + argNo);
4610
4611 // Copy over any operand bundles.
4612 callSite->getOperandBundlesAsDefs(newBundles);
4613
4614 llvm::CallBase *newCall;
4615 if (dyn_cast<llvm::CallInst>(callSite)) {
4616 newCall =
4617 llvm::CallInst::Create(newFn, newArgs, newBundles, "", callSite);
4618 } else {
4619 auto *oldInvoke = cast<llvm::InvokeInst>(callSite);
4620 newCall = llvm::InvokeInst::Create(newFn, oldInvoke->getNormalDest(),
4621 oldInvoke->getUnwindDest(), newArgs,
4622 newBundles, "", callSite);
4623 }
4624 newArgs.clear(); // for the next iteration
4625
4626 if (!newCall->getType()->isVoidTy())
4627 newCall->takeName(callSite);
4628 newCall->setAttributes(llvm::AttributeList::get(
4629 newFn->getContext(), oldAttrs.getFnAttributes(),
4630 oldAttrs.getRetAttributes(), newArgAttrs));
4631 newCall->setCallingConv(callSite->getCallingConv());
4632
4633 // Finally, remove the old call, replacing any uses with the new one.
4634 if (!callSite->use_empty())
4635 callSite->replaceAllUsesWith(newCall);
4636
4637 // Copy debug location attached to CI.
4638 if (callSite->getDebugLoc())
4639 newCall->setDebugLoc(callSite->getDebugLoc());
4640
4641 callSite->eraseFromParent();
4642 }
4643 }
4644
4645 /// ReplaceUsesOfNonProtoTypeWithRealFunction - This function is called when we
4646 /// implement a function with no prototype, e.g. "int foo() {}". If there are
4647 /// existing call uses of the old function in the module, this adjusts them to
4648 /// call the new function directly.
4649 ///
4650 /// This is not just a cleanup: the always_inline pass requires direct calls to
4651 /// functions to be able to inline them. If there is a bitcast in the way, it
4652 /// won't inline them. Instcombine normally deletes these calls, but it isn't
4653 /// run at -O0.
ReplaceUsesOfNonProtoTypeWithRealFunction(llvm::GlobalValue * Old,llvm::Function * NewFn)4654 static void ReplaceUsesOfNonProtoTypeWithRealFunction(llvm::GlobalValue *Old,
4655 llvm::Function *NewFn) {
4656 // If we're redefining a global as a function, don't transform it.
4657 if (!isa<llvm::Function>(Old)) return;
4658
4659 replaceUsesOfNonProtoConstant(Old, NewFn);
4660 }
4661
HandleCXXStaticMemberVarInstantiation(VarDecl * VD)4662 void CodeGenModule::HandleCXXStaticMemberVarInstantiation(VarDecl *VD) {
4663 auto DK = VD->isThisDeclarationADefinition();
4664 if (DK == VarDecl::Definition && VD->hasAttr<DLLImportAttr>())
4665 return;
4666
4667 TemplateSpecializationKind TSK = VD->getTemplateSpecializationKind();
4668 // If we have a definition, this might be a deferred decl. If the
4669 // instantiation is explicit, make sure we emit it at the end.
4670 if (VD->getDefinition() && TSK == TSK_ExplicitInstantiationDefinition)
4671 GetAddrOfGlobalVar(VD);
4672
4673 EmitTopLevelDecl(VD);
4674 }
4675
EmitGlobalFunctionDefinition(GlobalDecl GD,llvm::GlobalValue * GV)4676 void CodeGenModule::EmitGlobalFunctionDefinition(GlobalDecl GD,
4677 llvm::GlobalValue *GV) {
4678 const auto *D = cast<FunctionDecl>(GD.getDecl());
4679
4680 // Compute the function info and LLVM type.
4681 const CGFunctionInfo &FI = getTypes().arrangeGlobalDeclaration(GD);
4682 llvm::FunctionType *Ty = getTypes().GetFunctionType(FI);
4683
4684 // Get or create the prototype for the function.
4685 if (!GV || (GV->getValueType() != Ty))
4686 GV = cast<llvm::GlobalValue>(GetAddrOfFunction(GD, Ty, /*ForVTable=*/false,
4687 /*DontDefer=*/true,
4688 ForDefinition));
4689
4690 // Already emitted.
4691 if (!GV->isDeclaration())
4692 return;
4693
4694 // We need to set linkage and visibility on the function before
4695 // generating code for it because various parts of IR generation
4696 // want to propagate this information down (e.g. to local static
4697 // declarations).
4698 auto *Fn = cast<llvm::Function>(GV);
4699 setFunctionLinkage(GD, Fn);
4700
4701 // FIXME: this is redundant with part of setFunctionDefinitionAttributes
4702 setGVProperties(Fn, GD);
4703
4704 MaybeHandleStaticInExternC(D, Fn);
4705
4706 maybeSetTrivialComdat(*D, *Fn);
4707
4708 // Set CodeGen attributes that represent floating point environment.
4709 setLLVMFunctionFEnvAttributes(D, Fn);
4710
4711 CodeGenFunction(*this).GenerateCode(GD, Fn, FI);
4712
4713 setNonAliasAttributes(GD, Fn);
4714 SetLLVMFunctionAttributesForDefinition(D, Fn);
4715
4716 if (const ConstructorAttr *CA = D->getAttr<ConstructorAttr>())
4717 AddGlobalCtor(Fn, CA->getPriority());
4718 if (const DestructorAttr *DA = D->getAttr<DestructorAttr>())
4719 AddGlobalDtor(Fn, DA->getPriority());
4720 if (D->hasAttr<AnnotateAttr>())
4721 AddGlobalAnnotations(D, Fn);
4722 }
4723
EmitAliasDefinition(GlobalDecl GD)4724 void CodeGenModule::EmitAliasDefinition(GlobalDecl GD) {
4725 const auto *D = cast<ValueDecl>(GD.getDecl());
4726 const AliasAttr *AA = D->getAttr<AliasAttr>();
4727 assert(AA && "Not an alias?");
4728
4729 StringRef MangledName = getMangledName(GD);
4730
4731 if (AA->getAliasee() == MangledName) {
4732 Diags.Report(AA->getLocation(), diag::err_cyclic_alias) << 0;
4733 return;
4734 }
4735
4736 // If there is a definition in the module, then it wins over the alias.
4737 // This is dubious, but allow it to be safe. Just ignore the alias.
4738 llvm::GlobalValue *Entry = GetGlobalValue(MangledName);
4739 if (Entry && !Entry->isDeclaration())
4740 return;
4741
4742 Aliases.push_back(GD);
4743
4744 llvm::Type *DeclTy = getTypes().ConvertTypeForMem(D->getType());
4745
4746 // Create a reference to the named value. This ensures that it is emitted
4747 // if a deferred decl.
4748 llvm::Constant *Aliasee;
4749 llvm::GlobalValue::LinkageTypes LT;
4750 if (isa<llvm::FunctionType>(DeclTy)) {
4751 Aliasee = GetOrCreateLLVMFunction(AA->getAliasee(), DeclTy, GD,
4752 /*ForVTable=*/false);
4753 LT = getFunctionLinkage(GD);
4754 } else {
4755 Aliasee = GetOrCreateLLVMGlobal(AA->getAliasee(),
4756 llvm::PointerType::getUnqual(DeclTy),
4757 /*D=*/nullptr);
4758 if (const auto *VD = dyn_cast<VarDecl>(GD.getDecl()))
4759 LT = getLLVMLinkageVarDefinition(VD, D->getType().isConstQualified());
4760 else
4761 LT = getFunctionLinkage(GD);
4762 }
4763
4764 // Create the new alias itself, but don't set a name yet.
4765 unsigned AS = Aliasee->getType()->getPointerAddressSpace();
4766 auto *GA =
4767 llvm::GlobalAlias::create(DeclTy, AS, LT, "", Aliasee, &getModule());
4768
4769 if (Entry) {
4770 if (GA->getAliasee() == Entry) {
4771 Diags.Report(AA->getLocation(), diag::err_cyclic_alias) << 0;
4772 return;
4773 }
4774
4775 assert(Entry->isDeclaration());
4776
4777 // If there is a declaration in the module, then we had an extern followed
4778 // by the alias, as in:
4779 // extern int test6();
4780 // ...
4781 // int test6() __attribute__((alias("test7")));
4782 //
4783 // Remove it and replace uses of it with the alias.
4784 GA->takeName(Entry);
4785
4786 Entry->replaceAllUsesWith(llvm::ConstantExpr::getBitCast(GA,
4787 Entry->getType()));
4788 Entry->eraseFromParent();
4789 } else {
4790 GA->setName(MangledName);
4791 }
4792
4793 // Set attributes which are particular to an alias; this is a
4794 // specialization of the attributes which may be set on a global
4795 // variable/function.
4796 if (D->hasAttr<WeakAttr>() || D->hasAttr<WeakRefAttr>() ||
4797 D->isWeakImported()) {
4798 GA->setLinkage(llvm::Function::WeakAnyLinkage);
4799 }
4800
4801 if (const auto *VD = dyn_cast<VarDecl>(D))
4802 if (VD->getTLSKind())
4803 setTLSMode(GA, *VD);
4804
4805 SetCommonAttributes(GD, GA);
4806 }
4807
emitIFuncDefinition(GlobalDecl GD)4808 void CodeGenModule::emitIFuncDefinition(GlobalDecl GD) {
4809 const auto *D = cast<ValueDecl>(GD.getDecl());
4810 const IFuncAttr *IFA = D->getAttr<IFuncAttr>();
4811 assert(IFA && "Not an ifunc?");
4812
4813 StringRef MangledName = getMangledName(GD);
4814
4815 if (IFA->getResolver() == MangledName) {
4816 Diags.Report(IFA->getLocation(), diag::err_cyclic_alias) << 1;
4817 return;
4818 }
4819
4820 // Report an error if some definition overrides ifunc.
4821 llvm::GlobalValue *Entry = GetGlobalValue(MangledName);
4822 if (Entry && !Entry->isDeclaration()) {
4823 GlobalDecl OtherGD;
4824 if (lookupRepresentativeDecl(MangledName, OtherGD) &&
4825 DiagnosedConflictingDefinitions.insert(GD).second) {
4826 Diags.Report(D->getLocation(), diag::err_duplicate_mangled_name)
4827 << MangledName;
4828 Diags.Report(OtherGD.getDecl()->getLocation(),
4829 diag::note_previous_definition);
4830 }
4831 return;
4832 }
4833
4834 Aliases.push_back(GD);
4835
4836 llvm::Type *DeclTy = getTypes().ConvertTypeForMem(D->getType());
4837 llvm::Constant *Resolver =
4838 GetOrCreateLLVMFunction(IFA->getResolver(), DeclTy, GD,
4839 /*ForVTable=*/false);
4840 llvm::GlobalIFunc *GIF =
4841 llvm::GlobalIFunc::create(DeclTy, 0, llvm::Function::ExternalLinkage,
4842 "", Resolver, &getModule());
4843 if (Entry) {
4844 if (GIF->getResolver() == Entry) {
4845 Diags.Report(IFA->getLocation(), diag::err_cyclic_alias) << 1;
4846 return;
4847 }
4848 assert(Entry->isDeclaration());
4849
4850 // If there is a declaration in the module, then we had an extern followed
4851 // by the ifunc, as in:
4852 // extern int test();
4853 // ...
4854 // int test() __attribute__((ifunc("resolver")));
4855 //
4856 // Remove it and replace uses of it with the ifunc.
4857 GIF->takeName(Entry);
4858
4859 Entry->replaceAllUsesWith(llvm::ConstantExpr::getBitCast(GIF,
4860 Entry->getType()));
4861 Entry->eraseFromParent();
4862 } else
4863 GIF->setName(MangledName);
4864
4865 SetCommonAttributes(GD, GIF);
4866 }
4867
getIntrinsic(unsigned IID,ArrayRef<llvm::Type * > Tys)4868 llvm::Function *CodeGenModule::getIntrinsic(unsigned IID,
4869 ArrayRef<llvm::Type*> Tys) {
4870 return llvm::Intrinsic::getDeclaration(&getModule(), (llvm::Intrinsic::ID)IID,
4871 Tys);
4872 }
4873
4874 static llvm::StringMapEntry<llvm::GlobalVariable *> &
GetConstantCFStringEntry(llvm::StringMap<llvm::GlobalVariable * > & Map,const StringLiteral * Literal,bool TargetIsLSB,bool & IsUTF16,unsigned & StringLength)4875 GetConstantCFStringEntry(llvm::StringMap<llvm::GlobalVariable *> &Map,
4876 const StringLiteral *Literal, bool TargetIsLSB,
4877 bool &IsUTF16, unsigned &StringLength) {
4878 StringRef String = Literal->getString();
4879 unsigned NumBytes = String.size();
4880
4881 // Check for simple case.
4882 if (!Literal->containsNonAsciiOrNull()) {
4883 StringLength = NumBytes;
4884 return *Map.insert(std::make_pair(String, nullptr)).first;
4885 }
4886
4887 // Otherwise, convert the UTF8 literals into a string of shorts.
4888 IsUTF16 = true;
4889
4890 SmallVector<llvm::UTF16, 128> ToBuf(NumBytes + 1); // +1 for ending nulls.
4891 const llvm::UTF8 *FromPtr = (const llvm::UTF8 *)String.data();
4892 llvm::UTF16 *ToPtr = &ToBuf[0];
4893
4894 (void)llvm::ConvertUTF8toUTF16(&FromPtr, FromPtr + NumBytes, &ToPtr,
4895 ToPtr + NumBytes, llvm::strictConversion);
4896
4897 // ConvertUTF8toUTF16 returns the length in ToPtr.
4898 StringLength = ToPtr - &ToBuf[0];
4899
4900 // Add an explicit null.
4901 *ToPtr = 0;
4902 return *Map.insert(std::make_pair(
4903 StringRef(reinterpret_cast<const char *>(ToBuf.data()),
4904 (StringLength + 1) * 2),
4905 nullptr)).first;
4906 }
4907
4908 ConstantAddress
GetAddrOfConstantCFString(const StringLiteral * Literal)4909 CodeGenModule::GetAddrOfConstantCFString(const StringLiteral *Literal) {
4910 unsigned StringLength = 0;
4911 bool isUTF16 = false;
4912 llvm::StringMapEntry<llvm::GlobalVariable *> &Entry =
4913 GetConstantCFStringEntry(CFConstantStringMap, Literal,
4914 getDataLayout().isLittleEndian(), isUTF16,
4915 StringLength);
4916
4917 if (auto *C = Entry.second)
4918 return ConstantAddress(C, CharUnits::fromQuantity(C->getAlignment()));
4919
4920 llvm::Constant *Zero = llvm::Constant::getNullValue(Int32Ty);
4921 llvm::Constant *Zeros[] = { Zero, Zero };
4922
4923 const ASTContext &Context = getContext();
4924 const llvm::Triple &Triple = getTriple();
4925
4926 const auto CFRuntime = getLangOpts().CFRuntime;
4927 const bool IsSwiftABI =
4928 static_cast<unsigned>(CFRuntime) >=
4929 static_cast<unsigned>(LangOptions::CoreFoundationABI::Swift);
4930 const bool IsSwift4_1 = CFRuntime == LangOptions::CoreFoundationABI::Swift4_1;
4931
4932 // If we don't already have it, get __CFConstantStringClassReference.
4933 if (!CFConstantStringClassRef) {
4934 const char *CFConstantStringClassName = "__CFConstantStringClassReference";
4935 llvm::Type *Ty = getTypes().ConvertType(getContext().IntTy);
4936 Ty = llvm::ArrayType::get(Ty, 0);
4937
4938 switch (CFRuntime) {
4939 default: break;
4940 case LangOptions::CoreFoundationABI::Swift: LLVM_FALLTHROUGH;
4941 case LangOptions::CoreFoundationABI::Swift5_0:
4942 CFConstantStringClassName =
4943 Triple.isOSDarwin() ? "$s15SwiftFoundation19_NSCFConstantStringCN"
4944 : "$s10Foundation19_NSCFConstantStringCN";
4945 Ty = IntPtrTy;
4946 break;
4947 case LangOptions::CoreFoundationABI::Swift4_2:
4948 CFConstantStringClassName =
4949 Triple.isOSDarwin() ? "$S15SwiftFoundation19_NSCFConstantStringCN"
4950 : "$S10Foundation19_NSCFConstantStringCN";
4951 Ty = IntPtrTy;
4952 break;
4953 case LangOptions::CoreFoundationABI::Swift4_1:
4954 CFConstantStringClassName =
4955 Triple.isOSDarwin() ? "__T015SwiftFoundation19_NSCFConstantStringCN"
4956 : "__T010Foundation19_NSCFConstantStringCN";
4957 Ty = IntPtrTy;
4958 break;
4959 }
4960
4961 llvm::Constant *C = CreateRuntimeVariable(Ty, CFConstantStringClassName);
4962
4963 if (Triple.isOSBinFormatELF() || Triple.isOSBinFormatCOFF()) {
4964 llvm::GlobalValue *GV = nullptr;
4965
4966 if ((GV = dyn_cast<llvm::GlobalValue>(C))) {
4967 IdentifierInfo &II = Context.Idents.get(GV->getName());
4968 TranslationUnitDecl *TUDecl = Context.getTranslationUnitDecl();
4969 DeclContext *DC = TranslationUnitDecl::castToDeclContext(TUDecl);
4970
4971 const VarDecl *VD = nullptr;
4972 for (const auto &Result : DC->lookup(&II))
4973 if ((VD = dyn_cast<VarDecl>(Result)))
4974 break;
4975
4976 if (Triple.isOSBinFormatELF()) {
4977 if (!VD)
4978 GV->setLinkage(llvm::GlobalValue::ExternalLinkage);
4979 } else {
4980 GV->setLinkage(llvm::GlobalValue::ExternalLinkage);
4981 if (!VD || !VD->hasAttr<DLLExportAttr>())
4982 GV->setDLLStorageClass(llvm::GlobalValue::DLLImportStorageClass);
4983 else
4984 GV->setDLLStorageClass(llvm::GlobalValue::DLLExportStorageClass);
4985 }
4986
4987 setDSOLocal(GV);
4988 }
4989 }
4990
4991 // Decay array -> ptr
4992 CFConstantStringClassRef =
4993 IsSwiftABI ? llvm::ConstantExpr::getPtrToInt(C, Ty)
4994 : llvm::ConstantExpr::getGetElementPtr(Ty, C, Zeros);
4995 }
4996
4997 QualType CFTy = Context.getCFConstantStringType();
4998
4999 auto *STy = cast<llvm::StructType>(getTypes().ConvertType(CFTy));
5000
5001 ConstantInitBuilder Builder(*this);
5002 auto Fields = Builder.beginStruct(STy);
5003
5004 // Class pointer.
5005 Fields.add(cast<llvm::ConstantExpr>(CFConstantStringClassRef));
5006
5007 // Flags.
5008 if (IsSwiftABI) {
5009 Fields.addInt(IntPtrTy, IsSwift4_1 ? 0x05 : 0x01);
5010 Fields.addInt(Int64Ty, isUTF16 ? 0x07d0 : 0x07c8);
5011 } else {
5012 Fields.addInt(IntTy, isUTF16 ? 0x07d0 : 0x07C8);
5013 }
5014
5015 // String pointer.
5016 llvm::Constant *C = nullptr;
5017 if (isUTF16) {
5018 auto Arr = llvm::makeArrayRef(
5019 reinterpret_cast<uint16_t *>(const_cast<char *>(Entry.first().data())),
5020 Entry.first().size() / 2);
5021 C = llvm::ConstantDataArray::get(VMContext, Arr);
5022 } else {
5023 C = llvm::ConstantDataArray::getString(VMContext, Entry.first());
5024 }
5025
5026 // Note: -fwritable-strings doesn't make the backing store strings of
5027 // CFStrings writable. (See <rdar://problem/10657500>)
5028 auto *GV =
5029 new llvm::GlobalVariable(getModule(), C->getType(), /*isConstant=*/true,
5030 llvm::GlobalValue::PrivateLinkage, C, ".str");
5031 GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
5032 // Don't enforce the target's minimum global alignment, since the only use
5033 // of the string is via this class initializer.
5034 CharUnits Align = isUTF16 ? Context.getTypeAlignInChars(Context.ShortTy)
5035 : Context.getTypeAlignInChars(Context.CharTy);
5036 GV->setAlignment(Align.getAsAlign());
5037
5038 // FIXME: We set the section explicitly to avoid a bug in ld64 224.1.
5039 // Without it LLVM can merge the string with a non unnamed_addr one during
5040 // LTO. Doing that changes the section it ends in, which surprises ld64.
5041 if (Triple.isOSBinFormatMachO())
5042 GV->setSection(isUTF16 ? "__TEXT,__ustring"
5043 : "__TEXT,__cstring,cstring_literals");
5044 // Make sure the literal ends up in .rodata to allow for safe ICF and for
5045 // the static linker to adjust permissions to read-only later on.
5046 else if (Triple.isOSBinFormatELF())
5047 GV->setSection(".rodata");
5048
5049 // String.
5050 llvm::Constant *Str =
5051 llvm::ConstantExpr::getGetElementPtr(GV->getValueType(), GV, Zeros);
5052
5053 if (isUTF16)
5054 // Cast the UTF16 string to the correct type.
5055 Str = llvm::ConstantExpr::getBitCast(Str, Int8PtrTy);
5056 Fields.add(Str);
5057
5058 // String length.
5059 llvm::IntegerType *LengthTy =
5060 llvm::IntegerType::get(getModule().getContext(),
5061 Context.getTargetInfo().getLongWidth());
5062 if (IsSwiftABI) {
5063 if (CFRuntime == LangOptions::CoreFoundationABI::Swift4_1 ||
5064 CFRuntime == LangOptions::CoreFoundationABI::Swift4_2)
5065 LengthTy = Int32Ty;
5066 else
5067 LengthTy = IntPtrTy;
5068 }
5069 Fields.addInt(LengthTy, StringLength);
5070
5071 // Swift ABI requires 8-byte alignment to ensure that the _Atomic(uint64_t) is
5072 // properly aligned on 32-bit platforms.
5073 CharUnits Alignment =
5074 IsSwiftABI ? Context.toCharUnitsFromBits(64) : getPointerAlign();
5075
5076 // The struct.
5077 GV = Fields.finishAndCreateGlobal("_unnamed_cfstring_", Alignment,
5078 /*isConstant=*/false,
5079 llvm::GlobalVariable::PrivateLinkage);
5080 GV->addAttribute("objc_arc_inert");
5081 switch (Triple.getObjectFormat()) {
5082 case llvm::Triple::UnknownObjectFormat:
5083 llvm_unreachable("unknown file format");
5084 case llvm::Triple::GOFF:
5085 llvm_unreachable("GOFF is not yet implemented");
5086 case llvm::Triple::XCOFF:
5087 llvm_unreachable("XCOFF is not yet implemented");
5088 case llvm::Triple::COFF:
5089 case llvm::Triple::ELF:
5090 case llvm::Triple::Wasm:
5091 GV->setSection("cfstring");
5092 break;
5093 case llvm::Triple::MachO:
5094 GV->setSection("__DATA,__cfstring");
5095 break;
5096 }
5097 Entry.second = GV;
5098
5099 return ConstantAddress(GV, Alignment);
5100 }
5101
getExpressionLocationsEnabled() const5102 bool CodeGenModule::getExpressionLocationsEnabled() const {
5103 return !CodeGenOpts.EmitCodeView || CodeGenOpts.DebugColumnInfo;
5104 }
5105
getObjCFastEnumerationStateType()5106 QualType CodeGenModule::getObjCFastEnumerationStateType() {
5107 if (ObjCFastEnumerationStateType.isNull()) {
5108 RecordDecl *D = Context.buildImplicitRecord("__objcFastEnumerationState");
5109 D->startDefinition();
5110
5111 QualType FieldTypes[] = {
5112 Context.UnsignedLongTy,
5113 Context.getPointerType(Context.getObjCIdType()),
5114 Context.getPointerType(Context.UnsignedLongTy),
5115 Context.getConstantArrayType(Context.UnsignedLongTy,
5116 llvm::APInt(32, 5), nullptr, ArrayType::Normal, 0)
5117 };
5118
5119 for (size_t i = 0; i < 4; ++i) {
5120 FieldDecl *Field = FieldDecl::Create(Context,
5121 D,
5122 SourceLocation(),
5123 SourceLocation(), nullptr,
5124 FieldTypes[i], /*TInfo=*/nullptr,
5125 /*BitWidth=*/nullptr,
5126 /*Mutable=*/false,
5127 ICIS_NoInit);
5128 Field->setAccess(AS_public);
5129 D->addDecl(Field);
5130 }
5131
5132 D->completeDefinition();
5133 ObjCFastEnumerationStateType = Context.getTagDeclType(D);
5134 }
5135
5136 return ObjCFastEnumerationStateType;
5137 }
5138
5139 llvm::Constant *
GetConstantArrayFromStringLiteral(const StringLiteral * E)5140 CodeGenModule::GetConstantArrayFromStringLiteral(const StringLiteral *E) {
5141 assert(!E->getType()->isPointerType() && "Strings are always arrays");
5142
5143 // Don't emit it as the address of the string, emit the string data itself
5144 // as an inline array.
5145 if (E->getCharByteWidth() == 1) {
5146 SmallString<64> Str(E->getString());
5147
5148 // Resize the string to the right size, which is indicated by its type.
5149 const ConstantArrayType *CAT = Context.getAsConstantArrayType(E->getType());
5150 Str.resize(CAT->getSize().getZExtValue());
5151 return llvm::ConstantDataArray::getString(VMContext, Str, false);
5152 }
5153
5154 auto *AType = cast<llvm::ArrayType>(getTypes().ConvertType(E->getType()));
5155 llvm::Type *ElemTy = AType->getElementType();
5156 unsigned NumElements = AType->getNumElements();
5157
5158 // Wide strings have either 2-byte or 4-byte elements.
5159 if (ElemTy->getPrimitiveSizeInBits() == 16) {
5160 SmallVector<uint16_t, 32> Elements;
5161 Elements.reserve(NumElements);
5162
5163 for(unsigned i = 0, e = E->getLength(); i != e; ++i)
5164 Elements.push_back(E->getCodeUnit(i));
5165 Elements.resize(NumElements);
5166 return llvm::ConstantDataArray::get(VMContext, Elements);
5167 }
5168
5169 assert(ElemTy->getPrimitiveSizeInBits() == 32);
5170 SmallVector<uint32_t, 32> Elements;
5171 Elements.reserve(NumElements);
5172
5173 for(unsigned i = 0, e = E->getLength(); i != e; ++i)
5174 Elements.push_back(E->getCodeUnit(i));
5175 Elements.resize(NumElements);
5176 return llvm::ConstantDataArray::get(VMContext, Elements);
5177 }
5178
5179 static llvm::GlobalVariable *
GenerateStringLiteral(llvm::Constant * C,llvm::GlobalValue::LinkageTypes LT,CodeGenModule & CGM,StringRef GlobalName,CharUnits Alignment)5180 GenerateStringLiteral(llvm::Constant *C, llvm::GlobalValue::LinkageTypes LT,
5181 CodeGenModule &CGM, StringRef GlobalName,
5182 CharUnits Alignment) {
5183 unsigned AddrSpace = CGM.getContext().getTargetAddressSpace(
5184 CGM.getStringLiteralAddressSpace());
5185
5186 llvm::Module &M = CGM.getModule();
5187 // Create a global variable for this string
5188 auto *GV = new llvm::GlobalVariable(
5189 M, C->getType(), !CGM.getLangOpts().WritableStrings, LT, C, GlobalName,
5190 nullptr, llvm::GlobalVariable::NotThreadLocal, AddrSpace);
5191 GV->setAlignment(Alignment.getAsAlign());
5192 GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
5193 if (GV->isWeakForLinker()) {
5194 assert(CGM.supportsCOMDAT() && "Only COFF uses weak string literals");
5195 GV->setComdat(M.getOrInsertComdat(GV->getName()));
5196 }
5197 CGM.setDSOLocal(GV);
5198
5199 return GV;
5200 }
5201
5202 /// GetAddrOfConstantStringFromLiteral - Return a pointer to a
5203 /// constant array for the given string literal.
5204 ConstantAddress
GetAddrOfConstantStringFromLiteral(const StringLiteral * S,StringRef Name)5205 CodeGenModule::GetAddrOfConstantStringFromLiteral(const StringLiteral *S,
5206 StringRef Name) {
5207 CharUnits Alignment = getContext().getAlignOfGlobalVarInChars(S->getType());
5208
5209 llvm::Constant *C = GetConstantArrayFromStringLiteral(S);
5210 llvm::GlobalVariable **Entry = nullptr;
5211 if (!LangOpts.WritableStrings) {
5212 Entry = &ConstantStringMap[C];
5213 if (auto GV = *Entry) {
5214 if (Alignment.getQuantity() > GV->getAlignment())
5215 GV->setAlignment(Alignment.getAsAlign());
5216 return ConstantAddress(castStringLiteralToDefaultAddressSpace(*this, GV),
5217 Alignment);
5218 }
5219 }
5220
5221 SmallString<256> MangledNameBuffer;
5222 StringRef GlobalVariableName;
5223 llvm::GlobalValue::LinkageTypes LT;
5224
5225 // Mangle the string literal if that's how the ABI merges duplicate strings.
5226 // Don't do it if they are writable, since we don't want writes in one TU to
5227 // affect strings in another.
5228 if (getCXXABI().getMangleContext().shouldMangleStringLiteral(S) &&
5229 !LangOpts.WritableStrings) {
5230 llvm::raw_svector_ostream Out(MangledNameBuffer);
5231 getCXXABI().getMangleContext().mangleStringLiteral(S, Out);
5232 LT = llvm::GlobalValue::LinkOnceODRLinkage;
5233 GlobalVariableName = MangledNameBuffer;
5234 } else {
5235 LT = llvm::GlobalValue::PrivateLinkage;
5236 GlobalVariableName = Name;
5237 }
5238
5239 auto GV = GenerateStringLiteral(C, LT, *this, GlobalVariableName, Alignment);
5240 if (Entry)
5241 *Entry = GV;
5242
5243 SanitizerMD->reportGlobalToASan(GV, S->getStrTokenLoc(0), "<string literal>",
5244 QualType());
5245
5246 return ConstantAddress(castStringLiteralToDefaultAddressSpace(*this, GV),
5247 Alignment);
5248 }
5249
5250 /// GetAddrOfConstantStringFromObjCEncode - Return a pointer to a constant
5251 /// array for the given ObjCEncodeExpr node.
5252 ConstantAddress
GetAddrOfConstantStringFromObjCEncode(const ObjCEncodeExpr * E)5253 CodeGenModule::GetAddrOfConstantStringFromObjCEncode(const ObjCEncodeExpr *E) {
5254 std::string Str;
5255 getContext().getObjCEncodingForType(E->getEncodedType(), Str);
5256
5257 return GetAddrOfConstantCString(Str);
5258 }
5259
5260 /// GetAddrOfConstantCString - Returns a pointer to a character array containing
5261 /// the literal and a terminating '\0' character.
5262 /// The result has pointer to array type.
GetAddrOfConstantCString(const std::string & Str,const char * GlobalName)5263 ConstantAddress CodeGenModule::GetAddrOfConstantCString(
5264 const std::string &Str, const char *GlobalName) {
5265 StringRef StrWithNull(Str.c_str(), Str.size() + 1);
5266 CharUnits Alignment =
5267 getContext().getAlignOfGlobalVarInChars(getContext().CharTy);
5268
5269 llvm::Constant *C =
5270 llvm::ConstantDataArray::getString(getLLVMContext(), StrWithNull, false);
5271
5272 // Don't share any string literals if strings aren't constant.
5273 llvm::GlobalVariable **Entry = nullptr;
5274 if (!LangOpts.WritableStrings) {
5275 Entry = &ConstantStringMap[C];
5276 if (auto GV = *Entry) {
5277 if (Alignment.getQuantity() > GV->getAlignment())
5278 GV->setAlignment(Alignment.getAsAlign());
5279 return ConstantAddress(castStringLiteralToDefaultAddressSpace(*this, GV),
5280 Alignment);
5281 }
5282 }
5283
5284 // Get the default prefix if a name wasn't specified.
5285 if (!GlobalName)
5286 GlobalName = ".str";
5287 // Create a global variable for this.
5288 auto GV = GenerateStringLiteral(C, llvm::GlobalValue::PrivateLinkage, *this,
5289 GlobalName, Alignment);
5290 if (Entry)
5291 *Entry = GV;
5292
5293 return ConstantAddress(castStringLiteralToDefaultAddressSpace(*this, GV),
5294 Alignment);
5295 }
5296
GetAddrOfGlobalTemporary(const MaterializeTemporaryExpr * E,const Expr * Init)5297 ConstantAddress CodeGenModule::GetAddrOfGlobalTemporary(
5298 const MaterializeTemporaryExpr *E, const Expr *Init) {
5299 assert((E->getStorageDuration() == SD_Static ||
5300 E->getStorageDuration() == SD_Thread) && "not a global temporary");
5301 const auto *VD = cast<VarDecl>(E->getExtendingDecl());
5302
5303 // If we're not materializing a subobject of the temporary, keep the
5304 // cv-qualifiers from the type of the MaterializeTemporaryExpr.
5305 QualType MaterializedType = Init->getType();
5306 if (Init == E->getSubExpr())
5307 MaterializedType = E->getType();
5308
5309 CharUnits Align = getContext().getTypeAlignInChars(MaterializedType);
5310
5311 if (llvm::Constant *Slot = MaterializedGlobalTemporaryMap[E])
5312 return ConstantAddress(Slot, Align);
5313
5314 // FIXME: If an externally-visible declaration extends multiple temporaries,
5315 // we need to give each temporary the same name in every translation unit (and
5316 // we also need to make the temporaries externally-visible).
5317 SmallString<256> Name;
5318 llvm::raw_svector_ostream Out(Name);
5319 getCXXABI().getMangleContext().mangleReferenceTemporary(
5320 VD, E->getManglingNumber(), Out);
5321
5322 APValue *Value = nullptr;
5323 if (E->getStorageDuration() == SD_Static && VD && VD->evaluateValue()) {
5324 // If the initializer of the extending declaration is a constant
5325 // initializer, we should have a cached constant initializer for this
5326 // temporary. Note that this might have a different value from the value
5327 // computed by evaluating the initializer if the surrounding constant
5328 // expression modifies the temporary.
5329 Value = E->getOrCreateValue(false);
5330 }
5331
5332 // Try evaluating it now, it might have a constant initializer.
5333 Expr::EvalResult EvalResult;
5334 if (!Value && Init->EvaluateAsRValue(EvalResult, getContext()) &&
5335 !EvalResult.hasSideEffects())
5336 Value = &EvalResult.Val;
5337
5338 LangAS AddrSpace =
5339 VD ? GetGlobalVarAddressSpace(VD) : MaterializedType.getAddressSpace();
5340
5341 Optional<ConstantEmitter> emitter;
5342 llvm::Constant *InitialValue = nullptr;
5343 bool Constant = false;
5344 llvm::Type *Type;
5345 if (Value) {
5346 // The temporary has a constant initializer, use it.
5347 emitter.emplace(*this);
5348 InitialValue = emitter->emitForInitializer(*Value, AddrSpace,
5349 MaterializedType);
5350 Constant = isTypeConstant(MaterializedType, /*ExcludeCtor*/Value);
5351 Type = InitialValue->getType();
5352 } else {
5353 // No initializer, the initialization will be provided when we
5354 // initialize the declaration which performed lifetime extension.
5355 Type = getTypes().ConvertTypeForMem(MaterializedType);
5356 }
5357
5358 // Create a global variable for this lifetime-extended temporary.
5359 llvm::GlobalValue::LinkageTypes Linkage =
5360 getLLVMLinkageVarDefinition(VD, Constant);
5361 if (Linkage == llvm::GlobalVariable::ExternalLinkage) {
5362 const VarDecl *InitVD;
5363 if (VD->isStaticDataMember() && VD->getAnyInitializer(InitVD) &&
5364 isa<CXXRecordDecl>(InitVD->getLexicalDeclContext())) {
5365 // Temporaries defined inside a class get linkonce_odr linkage because the
5366 // class can be defined in multiple translation units.
5367 Linkage = llvm::GlobalVariable::LinkOnceODRLinkage;
5368 } else {
5369 // There is no need for this temporary to have external linkage if the
5370 // VarDecl has external linkage.
5371 Linkage = llvm::GlobalVariable::InternalLinkage;
5372 }
5373 }
5374 auto TargetAS = getContext().getTargetAddressSpace(AddrSpace);
5375 auto *GV = new llvm::GlobalVariable(
5376 getModule(), Type, Constant, Linkage, InitialValue, Name.c_str(),
5377 /*InsertBefore=*/nullptr, llvm::GlobalVariable::NotThreadLocal, TargetAS);
5378 if (emitter) emitter->finalize(GV);
5379 setGVProperties(GV, VD);
5380 GV->setAlignment(Align.getAsAlign());
5381 if (supportsCOMDAT() && GV->isWeakForLinker())
5382 GV->setComdat(TheModule.getOrInsertComdat(GV->getName()));
5383 if (VD->getTLSKind())
5384 setTLSMode(GV, *VD);
5385 llvm::Constant *CV = GV;
5386 if (AddrSpace != LangAS::Default)
5387 CV = getTargetCodeGenInfo().performAddrSpaceCast(
5388 *this, GV, AddrSpace, LangAS::Default,
5389 Type->getPointerTo(
5390 getContext().getTargetAddressSpace(LangAS::Default)));
5391 MaterializedGlobalTemporaryMap[E] = CV;
5392 return ConstantAddress(CV, Align);
5393 }
5394
5395 /// EmitObjCPropertyImplementations - Emit information for synthesized
5396 /// properties for an implementation.
EmitObjCPropertyImplementations(const ObjCImplementationDecl * D)5397 void CodeGenModule::EmitObjCPropertyImplementations(const
5398 ObjCImplementationDecl *D) {
5399 for (const auto *PID : D->property_impls()) {
5400 // Dynamic is just for type-checking.
5401 if (PID->getPropertyImplementation() == ObjCPropertyImplDecl::Synthesize) {
5402 ObjCPropertyDecl *PD = PID->getPropertyDecl();
5403
5404 // Determine which methods need to be implemented, some may have
5405 // been overridden. Note that ::isPropertyAccessor is not the method
5406 // we want, that just indicates if the decl came from a
5407 // property. What we want to know is if the method is defined in
5408 // this implementation.
5409 auto *Getter = PID->getGetterMethodDecl();
5410 if (!Getter || Getter->isSynthesizedAccessorStub())
5411 CodeGenFunction(*this).GenerateObjCGetter(
5412 const_cast<ObjCImplementationDecl *>(D), PID);
5413 auto *Setter = PID->getSetterMethodDecl();
5414 if (!PD->isReadOnly() && (!Setter || Setter->isSynthesizedAccessorStub()))
5415 CodeGenFunction(*this).GenerateObjCSetter(
5416 const_cast<ObjCImplementationDecl *>(D), PID);
5417 }
5418 }
5419 }
5420
needsDestructMethod(ObjCImplementationDecl * impl)5421 static bool needsDestructMethod(ObjCImplementationDecl *impl) {
5422 const ObjCInterfaceDecl *iface = impl->getClassInterface();
5423 for (const ObjCIvarDecl *ivar = iface->all_declared_ivar_begin();
5424 ivar; ivar = ivar->getNextIvar())
5425 if (ivar->getType().isDestructedType())
5426 return true;
5427
5428 return false;
5429 }
5430
AllTrivialInitializers(CodeGenModule & CGM,ObjCImplementationDecl * D)5431 static bool AllTrivialInitializers(CodeGenModule &CGM,
5432 ObjCImplementationDecl *D) {
5433 CodeGenFunction CGF(CGM);
5434 for (ObjCImplementationDecl::init_iterator B = D->init_begin(),
5435 E = D->init_end(); B != E; ++B) {
5436 CXXCtorInitializer *CtorInitExp = *B;
5437 Expr *Init = CtorInitExp->getInit();
5438 if (!CGF.isTrivialInitializer(Init))
5439 return false;
5440 }
5441 return true;
5442 }
5443
5444 /// EmitObjCIvarInitializations - Emit information for ivar initialization
5445 /// for an implementation.
EmitObjCIvarInitializations(ObjCImplementationDecl * D)5446 void CodeGenModule::EmitObjCIvarInitializations(ObjCImplementationDecl *D) {
5447 // We might need a .cxx_destruct even if we don't have any ivar initializers.
5448 if (needsDestructMethod(D)) {
5449 IdentifierInfo *II = &getContext().Idents.get(".cxx_destruct");
5450 Selector cxxSelector = getContext().Selectors.getSelector(0, &II);
5451 ObjCMethodDecl *DTORMethod = ObjCMethodDecl::Create(
5452 getContext(), D->getLocation(), D->getLocation(), cxxSelector,
5453 getContext().VoidTy, nullptr, D,
5454 /*isInstance=*/true, /*isVariadic=*/false,
5455 /*isPropertyAccessor=*/true, /*isSynthesizedAccessorStub=*/false,
5456 /*isImplicitlyDeclared=*/true,
5457 /*isDefined=*/false, ObjCMethodDecl::Required);
5458 D->addInstanceMethod(DTORMethod);
5459 CodeGenFunction(*this).GenerateObjCCtorDtorMethod(D, DTORMethod, false);
5460 D->setHasDestructors(true);
5461 }
5462
5463 // If the implementation doesn't have any ivar initializers, we don't need
5464 // a .cxx_construct.
5465 if (D->getNumIvarInitializers() == 0 ||
5466 AllTrivialInitializers(*this, D))
5467 return;
5468
5469 IdentifierInfo *II = &getContext().Idents.get(".cxx_construct");
5470 Selector cxxSelector = getContext().Selectors.getSelector(0, &II);
5471 // The constructor returns 'self'.
5472 ObjCMethodDecl *CTORMethod = ObjCMethodDecl::Create(
5473 getContext(), D->getLocation(), D->getLocation(), cxxSelector,
5474 getContext().getObjCIdType(), nullptr, D, /*isInstance=*/true,
5475 /*isVariadic=*/false,
5476 /*isPropertyAccessor=*/true, /*isSynthesizedAccessorStub=*/false,
5477 /*isImplicitlyDeclared=*/true,
5478 /*isDefined=*/false, ObjCMethodDecl::Required);
5479 D->addInstanceMethod(CTORMethod);
5480 CodeGenFunction(*this).GenerateObjCCtorDtorMethod(D, CTORMethod, true);
5481 D->setHasNonZeroConstructors(true);
5482 }
5483
5484 // EmitLinkageSpec - Emit all declarations in a linkage spec.
EmitLinkageSpec(const LinkageSpecDecl * LSD)5485 void CodeGenModule::EmitLinkageSpec(const LinkageSpecDecl *LSD) {
5486 if (LSD->getLanguage() != LinkageSpecDecl::lang_c &&
5487 LSD->getLanguage() != LinkageSpecDecl::lang_cxx) {
5488 ErrorUnsupported(LSD, "linkage spec");
5489 return;
5490 }
5491
5492 EmitDeclContext(LSD);
5493 }
5494
EmitDeclContext(const DeclContext * DC)5495 void CodeGenModule::EmitDeclContext(const DeclContext *DC) {
5496 for (auto *I : DC->decls()) {
5497 // Unlike other DeclContexts, the contents of an ObjCImplDecl at TU scope
5498 // are themselves considered "top-level", so EmitTopLevelDecl on an
5499 // ObjCImplDecl does not recursively visit them. We need to do that in
5500 // case they're nested inside another construct (LinkageSpecDecl /
5501 // ExportDecl) that does stop them from being considered "top-level".
5502 if (auto *OID = dyn_cast<ObjCImplDecl>(I)) {
5503 for (auto *M : OID->methods())
5504 EmitTopLevelDecl(M);
5505 }
5506
5507 EmitTopLevelDecl(I);
5508 }
5509 }
5510
5511 /// EmitTopLevelDecl - Emit code for a single top level declaration.
EmitTopLevelDecl(Decl * D)5512 void CodeGenModule::EmitTopLevelDecl(Decl *D) {
5513 // Ignore dependent declarations.
5514 if (D->isTemplated())
5515 return;
5516
5517 // Consteval function shouldn't be emitted.
5518 if (auto *FD = dyn_cast<FunctionDecl>(D))
5519 if (FD->isConsteval())
5520 return;
5521
5522 switch (D->getKind()) {
5523 case Decl::CXXConversion:
5524 case Decl::CXXMethod:
5525 case Decl::Function:
5526 EmitGlobal(cast<FunctionDecl>(D));
5527 // Always provide some coverage mapping
5528 // even for the functions that aren't emitted.
5529 AddDeferredUnusedCoverageMapping(D);
5530 break;
5531
5532 case Decl::CXXDeductionGuide:
5533 // Function-like, but does not result in code emission.
5534 break;
5535
5536 case Decl::Var:
5537 case Decl::Decomposition:
5538 case Decl::VarTemplateSpecialization:
5539 EmitGlobal(cast<VarDecl>(D));
5540 if (auto *DD = dyn_cast<DecompositionDecl>(D))
5541 for (auto *B : DD->bindings())
5542 if (auto *HD = B->getHoldingVar())
5543 EmitGlobal(HD);
5544 break;
5545
5546 // Indirect fields from global anonymous structs and unions can be
5547 // ignored; only the actual variable requires IR gen support.
5548 case Decl::IndirectField:
5549 break;
5550
5551 // C++ Decls
5552 case Decl::Namespace:
5553 EmitDeclContext(cast<NamespaceDecl>(D));
5554 break;
5555 case Decl::ClassTemplateSpecialization: {
5556 const auto *Spec = cast<ClassTemplateSpecializationDecl>(D);
5557 if (CGDebugInfo *DI = getModuleDebugInfo())
5558 if (Spec->getSpecializationKind() ==
5559 TSK_ExplicitInstantiationDefinition &&
5560 Spec->hasDefinition())
5561 DI->completeTemplateDefinition(*Spec);
5562 } LLVM_FALLTHROUGH;
5563 case Decl::CXXRecord: {
5564 CXXRecordDecl *CRD = cast<CXXRecordDecl>(D);
5565 if (CGDebugInfo *DI = getModuleDebugInfo()) {
5566 if (CRD->hasDefinition())
5567 DI->EmitAndRetainType(getContext().getRecordType(cast<RecordDecl>(D)));
5568 if (auto *ES = D->getASTContext().getExternalSource())
5569 if (ES->hasExternalDefinitions(D) == ExternalASTSource::EK_Never)
5570 DI->completeUnusedClass(*CRD);
5571 }
5572 // Emit any static data members, they may be definitions.
5573 for (auto *I : CRD->decls())
5574 if (isa<VarDecl>(I) || isa<CXXRecordDecl>(I))
5575 EmitTopLevelDecl(I);
5576 break;
5577 }
5578 // No code generation needed.
5579 case Decl::UsingShadow:
5580 case Decl::ClassTemplate:
5581 case Decl::VarTemplate:
5582 case Decl::Concept:
5583 case Decl::VarTemplatePartialSpecialization:
5584 case Decl::FunctionTemplate:
5585 case Decl::TypeAliasTemplate:
5586 case Decl::Block:
5587 case Decl::Empty:
5588 case Decl::Binding:
5589 break;
5590 case Decl::Using: // using X; [C++]
5591 if (CGDebugInfo *DI = getModuleDebugInfo())
5592 DI->EmitUsingDecl(cast<UsingDecl>(*D));
5593 break;
5594 case Decl::NamespaceAlias:
5595 if (CGDebugInfo *DI = getModuleDebugInfo())
5596 DI->EmitNamespaceAlias(cast<NamespaceAliasDecl>(*D));
5597 break;
5598 case Decl::UsingDirective: // using namespace X; [C++]
5599 if (CGDebugInfo *DI = getModuleDebugInfo())
5600 DI->EmitUsingDirective(cast<UsingDirectiveDecl>(*D));
5601 break;
5602 case Decl::CXXConstructor:
5603 getCXXABI().EmitCXXConstructors(cast<CXXConstructorDecl>(D));
5604 break;
5605 case Decl::CXXDestructor:
5606 getCXXABI().EmitCXXDestructors(cast<CXXDestructorDecl>(D));
5607 break;
5608
5609 case Decl::StaticAssert:
5610 // Nothing to do.
5611 break;
5612
5613 // Objective-C Decls
5614
5615 // Forward declarations, no (immediate) code generation.
5616 case Decl::ObjCInterface:
5617 case Decl::ObjCCategory:
5618 break;
5619
5620 case Decl::ObjCProtocol: {
5621 auto *Proto = cast<ObjCProtocolDecl>(D);
5622 if (Proto->isThisDeclarationADefinition())
5623 ObjCRuntime->GenerateProtocol(Proto);
5624 break;
5625 }
5626
5627 case Decl::ObjCCategoryImpl:
5628 // Categories have properties but don't support synthesize so we
5629 // can ignore them here.
5630 ObjCRuntime->GenerateCategory(cast<ObjCCategoryImplDecl>(D));
5631 break;
5632
5633 case Decl::ObjCImplementation: {
5634 auto *OMD = cast<ObjCImplementationDecl>(D);
5635 EmitObjCPropertyImplementations(OMD);
5636 EmitObjCIvarInitializations(OMD);
5637 ObjCRuntime->GenerateClass(OMD);
5638 // Emit global variable debug information.
5639 if (CGDebugInfo *DI = getModuleDebugInfo())
5640 if (getCodeGenOpts().hasReducedDebugInfo())
5641 DI->getOrCreateInterfaceType(getContext().getObjCInterfaceType(
5642 OMD->getClassInterface()), OMD->getLocation());
5643 break;
5644 }
5645 case Decl::ObjCMethod: {
5646 auto *OMD = cast<ObjCMethodDecl>(D);
5647 // If this is not a prototype, emit the body.
5648 if (OMD->getBody())
5649 CodeGenFunction(*this).GenerateObjCMethod(OMD);
5650 break;
5651 }
5652 case Decl::ObjCCompatibleAlias:
5653 ObjCRuntime->RegisterAlias(cast<ObjCCompatibleAliasDecl>(D));
5654 break;
5655
5656 case Decl::PragmaComment: {
5657 const auto *PCD = cast<PragmaCommentDecl>(D);
5658 switch (PCD->getCommentKind()) {
5659 case PCK_Unknown:
5660 llvm_unreachable("unexpected pragma comment kind");
5661 case PCK_Linker:
5662 AppendLinkerOptions(PCD->getArg());
5663 break;
5664 case PCK_Lib:
5665 AddDependentLib(PCD->getArg());
5666 break;
5667 case PCK_Compiler:
5668 case PCK_ExeStr:
5669 case PCK_User:
5670 break; // We ignore all of these.
5671 }
5672 break;
5673 }
5674
5675 case Decl::PragmaDetectMismatch: {
5676 const auto *PDMD = cast<PragmaDetectMismatchDecl>(D);
5677 AddDetectMismatch(PDMD->getName(), PDMD->getValue());
5678 break;
5679 }
5680
5681 case Decl::LinkageSpec:
5682 EmitLinkageSpec(cast<LinkageSpecDecl>(D));
5683 break;
5684
5685 case Decl::FileScopeAsm: {
5686 // File-scope asm is ignored during device-side CUDA compilation.
5687 if (LangOpts.CUDA && LangOpts.CUDAIsDevice)
5688 break;
5689 // File-scope asm is ignored during device-side OpenMP compilation.
5690 if (LangOpts.OpenMPIsDevice)
5691 break;
5692 auto *AD = cast<FileScopeAsmDecl>(D);
5693 getModule().appendModuleInlineAsm(AD->getAsmString()->getString());
5694 break;
5695 }
5696
5697 case Decl::Import: {
5698 auto *Import = cast<ImportDecl>(D);
5699
5700 // If we've already imported this module, we're done.
5701 if (!ImportedModules.insert(Import->getImportedModule()))
5702 break;
5703
5704 // Emit debug information for direct imports.
5705 if (!Import->getImportedOwningModule()) {
5706 if (CGDebugInfo *DI = getModuleDebugInfo())
5707 DI->EmitImportDecl(*Import);
5708 }
5709
5710 // Find all of the submodules and emit the module initializers.
5711 llvm::SmallPtrSet<clang::Module *, 16> Visited;
5712 SmallVector<clang::Module *, 16> Stack;
5713 Visited.insert(Import->getImportedModule());
5714 Stack.push_back(Import->getImportedModule());
5715
5716 while (!Stack.empty()) {
5717 clang::Module *Mod = Stack.pop_back_val();
5718 if (!EmittedModuleInitializers.insert(Mod).second)
5719 continue;
5720
5721 for (auto *D : Context.getModuleInitializers(Mod))
5722 EmitTopLevelDecl(D);
5723
5724 // Visit the submodules of this module.
5725 for (clang::Module::submodule_iterator Sub = Mod->submodule_begin(),
5726 SubEnd = Mod->submodule_end();
5727 Sub != SubEnd; ++Sub) {
5728 // Skip explicit children; they need to be explicitly imported to emit
5729 // the initializers.
5730 if ((*Sub)->IsExplicit)
5731 continue;
5732
5733 if (Visited.insert(*Sub).second)
5734 Stack.push_back(*Sub);
5735 }
5736 }
5737 break;
5738 }
5739
5740 case Decl::Export:
5741 EmitDeclContext(cast<ExportDecl>(D));
5742 break;
5743
5744 case Decl::OMPThreadPrivate:
5745 EmitOMPThreadPrivateDecl(cast<OMPThreadPrivateDecl>(D));
5746 break;
5747
5748 case Decl::OMPAllocate:
5749 break;
5750
5751 case Decl::OMPDeclareReduction:
5752 EmitOMPDeclareReduction(cast<OMPDeclareReductionDecl>(D));
5753 break;
5754
5755 case Decl::OMPDeclareMapper:
5756 EmitOMPDeclareMapper(cast<OMPDeclareMapperDecl>(D));
5757 break;
5758
5759 case Decl::OMPRequires:
5760 EmitOMPRequiresDecl(cast<OMPRequiresDecl>(D));
5761 break;
5762
5763 case Decl::Typedef:
5764 case Decl::TypeAlias: // using foo = bar; [C++11]
5765 if (CGDebugInfo *DI = getModuleDebugInfo())
5766 DI->EmitAndRetainType(
5767 getContext().getTypedefType(cast<TypedefNameDecl>(D)));
5768 break;
5769
5770 case Decl::Record:
5771 if (CGDebugInfo *DI = getModuleDebugInfo())
5772 if (cast<RecordDecl>(D)->getDefinition())
5773 DI->EmitAndRetainType(getContext().getRecordType(cast<RecordDecl>(D)));
5774 break;
5775
5776 case Decl::Enum:
5777 if (CGDebugInfo *DI = getModuleDebugInfo())
5778 if (cast<EnumDecl>(D)->getDefinition())
5779 DI->EmitAndRetainType(getContext().getEnumType(cast<EnumDecl>(D)));
5780 break;
5781
5782 default:
5783 // Make sure we handled everything we should, every other kind is a
5784 // non-top-level decl. FIXME: Would be nice to have an isTopLevelDeclKind
5785 // function. Need to recode Decl::Kind to do that easily.
5786 assert(isa<TypeDecl>(D) && "Unsupported decl kind");
5787 break;
5788 }
5789 }
5790
AddDeferredUnusedCoverageMapping(Decl * D)5791 void CodeGenModule::AddDeferredUnusedCoverageMapping(Decl *D) {
5792 // Do we need to generate coverage mapping?
5793 if (!CodeGenOpts.CoverageMapping)
5794 return;
5795 switch (D->getKind()) {
5796 case Decl::CXXConversion:
5797 case Decl::CXXMethod:
5798 case Decl::Function:
5799 case Decl::ObjCMethod:
5800 case Decl::CXXConstructor:
5801 case Decl::CXXDestructor: {
5802 if (!cast<FunctionDecl>(D)->doesThisDeclarationHaveABody())
5803 break;
5804 SourceManager &SM = getContext().getSourceManager();
5805 if (LimitedCoverage && SM.getMainFileID() != SM.getFileID(D->getBeginLoc()))
5806 break;
5807 auto I = DeferredEmptyCoverageMappingDecls.find(D);
5808 if (I == DeferredEmptyCoverageMappingDecls.end())
5809 DeferredEmptyCoverageMappingDecls[D] = true;
5810 break;
5811 }
5812 default:
5813 break;
5814 };
5815 }
5816
ClearUnusedCoverageMapping(const Decl * D)5817 void CodeGenModule::ClearUnusedCoverageMapping(const Decl *D) {
5818 // Do we need to generate coverage mapping?
5819 if (!CodeGenOpts.CoverageMapping)
5820 return;
5821 if (const auto *Fn = dyn_cast<FunctionDecl>(D)) {
5822 if (Fn->isTemplateInstantiation())
5823 ClearUnusedCoverageMapping(Fn->getTemplateInstantiationPattern());
5824 }
5825 auto I = DeferredEmptyCoverageMappingDecls.find(D);
5826 if (I == DeferredEmptyCoverageMappingDecls.end())
5827 DeferredEmptyCoverageMappingDecls[D] = false;
5828 else
5829 I->second = false;
5830 }
5831
EmitDeferredUnusedCoverageMappings()5832 void CodeGenModule::EmitDeferredUnusedCoverageMappings() {
5833 // We call takeVector() here to avoid use-after-free.
5834 // FIXME: DeferredEmptyCoverageMappingDecls is getting mutated because
5835 // we deserialize function bodies to emit coverage info for them, and that
5836 // deserializes more declarations. How should we handle that case?
5837 for (const auto &Entry : DeferredEmptyCoverageMappingDecls.takeVector()) {
5838 if (!Entry.second)
5839 continue;
5840 const Decl *D = Entry.first;
5841 switch (D->getKind()) {
5842 case Decl::CXXConversion:
5843 case Decl::CXXMethod:
5844 case Decl::Function:
5845 case Decl::ObjCMethod: {
5846 CodeGenPGO PGO(*this);
5847 GlobalDecl GD(cast<FunctionDecl>(D));
5848 PGO.emitEmptyCounterMapping(D, getMangledName(GD),
5849 getFunctionLinkage(GD));
5850 break;
5851 }
5852 case Decl::CXXConstructor: {
5853 CodeGenPGO PGO(*this);
5854 GlobalDecl GD(cast<CXXConstructorDecl>(D), Ctor_Base);
5855 PGO.emitEmptyCounterMapping(D, getMangledName(GD),
5856 getFunctionLinkage(GD));
5857 break;
5858 }
5859 case Decl::CXXDestructor: {
5860 CodeGenPGO PGO(*this);
5861 GlobalDecl GD(cast<CXXDestructorDecl>(D), Dtor_Base);
5862 PGO.emitEmptyCounterMapping(D, getMangledName(GD),
5863 getFunctionLinkage(GD));
5864 break;
5865 }
5866 default:
5867 break;
5868 };
5869 }
5870 }
5871
EmitMainVoidAlias()5872 void CodeGenModule::EmitMainVoidAlias() {
5873 // In order to transition away from "__original_main" gracefully, emit an
5874 // alias for "main" in the no-argument case so that libc can detect when
5875 // new-style no-argument main is in used.
5876 if (llvm::Function *F = getModule().getFunction("main")) {
5877 if (!F->isDeclaration() && F->arg_size() == 0 && !F->isVarArg() &&
5878 F->getReturnType()->isIntegerTy(Context.getTargetInfo().getIntWidth()))
5879 addUsedGlobal(llvm::GlobalAlias::create("__main_void", F));
5880 }
5881 }
5882
5883 /// Turns the given pointer into a constant.
GetPointerConstant(llvm::LLVMContext & Context,const void * Ptr)5884 static llvm::Constant *GetPointerConstant(llvm::LLVMContext &Context,
5885 const void *Ptr) {
5886 uintptr_t PtrInt = reinterpret_cast<uintptr_t>(Ptr);
5887 llvm::Type *i64 = llvm::Type::getInt64Ty(Context);
5888 return llvm::ConstantInt::get(i64, PtrInt);
5889 }
5890
EmitGlobalDeclMetadata(CodeGenModule & CGM,llvm::NamedMDNode * & GlobalMetadata,GlobalDecl D,llvm::GlobalValue * Addr)5891 static void EmitGlobalDeclMetadata(CodeGenModule &CGM,
5892 llvm::NamedMDNode *&GlobalMetadata,
5893 GlobalDecl D,
5894 llvm::GlobalValue *Addr) {
5895 if (!GlobalMetadata)
5896 GlobalMetadata =
5897 CGM.getModule().getOrInsertNamedMetadata("clang.global.decl.ptrs");
5898
5899 // TODO: should we report variant information for ctors/dtors?
5900 llvm::Metadata *Ops[] = {llvm::ConstantAsMetadata::get(Addr),
5901 llvm::ConstantAsMetadata::get(GetPointerConstant(
5902 CGM.getLLVMContext(), D.getDecl()))};
5903 GlobalMetadata->addOperand(llvm::MDNode::get(CGM.getLLVMContext(), Ops));
5904 }
5905
5906 /// For each function which is declared within an extern "C" region and marked
5907 /// as 'used', but has internal linkage, create an alias from the unmangled
5908 /// name to the mangled name if possible. People expect to be able to refer
5909 /// to such functions with an unmangled name from inline assembly within the
5910 /// same translation unit.
EmitStaticExternCAliases()5911 void CodeGenModule::EmitStaticExternCAliases() {
5912 if (!getTargetCodeGenInfo().shouldEmitStaticExternCAliases())
5913 return;
5914 for (auto &I : StaticExternCValues) {
5915 IdentifierInfo *Name = I.first;
5916 llvm::GlobalValue *Val = I.second;
5917 if (Val && !getModule().getNamedValue(Name->getName()))
5918 addUsedGlobal(llvm::GlobalAlias::create(Name->getName(), Val));
5919 }
5920 }
5921
lookupRepresentativeDecl(StringRef MangledName,GlobalDecl & Result) const5922 bool CodeGenModule::lookupRepresentativeDecl(StringRef MangledName,
5923 GlobalDecl &Result) const {
5924 auto Res = Manglings.find(MangledName);
5925 if (Res == Manglings.end())
5926 return false;
5927 Result = Res->getValue();
5928 return true;
5929 }
5930
5931 /// Emits metadata nodes associating all the global values in the
5932 /// current module with the Decls they came from. This is useful for
5933 /// projects using IR gen as a subroutine.
5934 ///
5935 /// Since there's currently no way to associate an MDNode directly
5936 /// with an llvm::GlobalValue, we create a global named metadata
5937 /// with the name 'clang.global.decl.ptrs'.
EmitDeclMetadata()5938 void CodeGenModule::EmitDeclMetadata() {
5939 llvm::NamedMDNode *GlobalMetadata = nullptr;
5940
5941 for (auto &I : MangledDeclNames) {
5942 llvm::GlobalValue *Addr = getModule().getNamedValue(I.second);
5943 // Some mangled names don't necessarily have an associated GlobalValue
5944 // in this module, e.g. if we mangled it for DebugInfo.
5945 if (Addr)
5946 EmitGlobalDeclMetadata(*this, GlobalMetadata, I.first, Addr);
5947 }
5948 }
5949
5950 /// Emits metadata nodes for all the local variables in the current
5951 /// function.
EmitDeclMetadata()5952 void CodeGenFunction::EmitDeclMetadata() {
5953 if (LocalDeclMap.empty()) return;
5954
5955 llvm::LLVMContext &Context = getLLVMContext();
5956
5957 // Find the unique metadata ID for this name.
5958 unsigned DeclPtrKind = Context.getMDKindID("clang.decl.ptr");
5959
5960 llvm::NamedMDNode *GlobalMetadata = nullptr;
5961
5962 for (auto &I : LocalDeclMap) {
5963 const Decl *D = I.first;
5964 llvm::Value *Addr = I.second.getPointer();
5965 if (auto *Alloca = dyn_cast<llvm::AllocaInst>(Addr)) {
5966 llvm::Value *DAddr = GetPointerConstant(getLLVMContext(), D);
5967 Alloca->setMetadata(
5968 DeclPtrKind, llvm::MDNode::get(
5969 Context, llvm::ValueAsMetadata::getConstant(DAddr)));
5970 } else if (auto *GV = dyn_cast<llvm::GlobalValue>(Addr)) {
5971 GlobalDecl GD = GlobalDecl(cast<VarDecl>(D));
5972 EmitGlobalDeclMetadata(CGM, GlobalMetadata, GD, GV);
5973 }
5974 }
5975 }
5976
EmitVersionIdentMetadata()5977 void CodeGenModule::EmitVersionIdentMetadata() {
5978 llvm::NamedMDNode *IdentMetadata =
5979 TheModule.getOrInsertNamedMetadata("llvm.ident");
5980 std::string Version = getClangFullVersion();
5981 llvm::LLVMContext &Ctx = TheModule.getContext();
5982
5983 llvm::Metadata *IdentNode[] = {llvm::MDString::get(Ctx, Version)};
5984 IdentMetadata->addOperand(llvm::MDNode::get(Ctx, IdentNode));
5985 }
5986
EmitCommandLineMetadata()5987 void CodeGenModule::EmitCommandLineMetadata() {
5988 llvm::NamedMDNode *CommandLineMetadata =
5989 TheModule.getOrInsertNamedMetadata("llvm.commandline");
5990 std::string CommandLine = getCodeGenOpts().RecordCommandLine;
5991 llvm::LLVMContext &Ctx = TheModule.getContext();
5992
5993 llvm::Metadata *CommandLineNode[] = {llvm::MDString::get(Ctx, CommandLine)};
5994 CommandLineMetadata->addOperand(llvm::MDNode::get(Ctx, CommandLineNode));
5995 }
5996
EmitCoverageFile()5997 void CodeGenModule::EmitCoverageFile() {
5998 if (getCodeGenOpts().CoverageDataFile.empty() &&
5999 getCodeGenOpts().CoverageNotesFile.empty())
6000 return;
6001
6002 llvm::NamedMDNode *CUNode = TheModule.getNamedMetadata("llvm.dbg.cu");
6003 if (!CUNode)
6004 return;
6005
6006 llvm::NamedMDNode *GCov = TheModule.getOrInsertNamedMetadata("llvm.gcov");
6007 llvm::LLVMContext &Ctx = TheModule.getContext();
6008 auto *CoverageDataFile =
6009 llvm::MDString::get(Ctx, getCodeGenOpts().CoverageDataFile);
6010 auto *CoverageNotesFile =
6011 llvm::MDString::get(Ctx, getCodeGenOpts().CoverageNotesFile);
6012 for (int i = 0, e = CUNode->getNumOperands(); i != e; ++i) {
6013 llvm::MDNode *CU = CUNode->getOperand(i);
6014 llvm::Metadata *Elts[] = {CoverageNotesFile, CoverageDataFile, CU};
6015 GCov->addOperand(llvm::MDNode::get(Ctx, Elts));
6016 }
6017 }
6018
GetAddrOfRTTIDescriptor(QualType Ty,bool ForEH)6019 llvm::Constant *CodeGenModule::GetAddrOfRTTIDescriptor(QualType Ty,
6020 bool ForEH) {
6021 // Return a bogus pointer if RTTI is disabled, unless it's for EH.
6022 // FIXME: should we even be calling this method if RTTI is disabled
6023 // and it's not for EH?
6024 if ((!ForEH && !getLangOpts().RTTI) || getLangOpts().CUDAIsDevice ||
6025 (getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice &&
6026 getTriple().isNVPTX()))
6027 return llvm::Constant::getNullValue(Int8PtrTy);
6028
6029 if (ForEH && Ty->isObjCObjectPointerType() &&
6030 LangOpts.ObjCRuntime.isGNUFamily())
6031 return ObjCRuntime->GetEHType(Ty);
6032
6033 return getCXXABI().getAddrOfRTTIDescriptor(Ty);
6034 }
6035
EmitOMPThreadPrivateDecl(const OMPThreadPrivateDecl * D)6036 void CodeGenModule::EmitOMPThreadPrivateDecl(const OMPThreadPrivateDecl *D) {
6037 // Do not emit threadprivates in simd-only mode.
6038 if (LangOpts.OpenMP && LangOpts.OpenMPSimd)
6039 return;
6040 for (auto RefExpr : D->varlists()) {
6041 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(RefExpr)->getDecl());
6042 bool PerformInit =
6043 VD->getAnyInitializer() &&
6044 !VD->getAnyInitializer()->isConstantInitializer(getContext(),
6045 /*ForRef=*/false);
6046
6047 Address Addr(GetAddrOfGlobalVar(VD), getContext().getDeclAlign(VD));
6048 if (auto InitFunction = getOpenMPRuntime().emitThreadPrivateVarDefinition(
6049 VD, Addr, RefExpr->getBeginLoc(), PerformInit))
6050 CXXGlobalInits.push_back(InitFunction);
6051 }
6052 }
6053
6054 llvm::Metadata *
CreateMetadataIdentifierImpl(QualType T,MetadataTypeMap & Map,StringRef Suffix)6055 CodeGenModule::CreateMetadataIdentifierImpl(QualType T, MetadataTypeMap &Map,
6056 StringRef Suffix) {
6057 llvm::Metadata *&InternalId = Map[T.getCanonicalType()];
6058 if (InternalId)
6059 return InternalId;
6060
6061 if (isExternallyVisible(T->getLinkage())) {
6062 std::string OutName;
6063 llvm::raw_string_ostream Out(OutName);
6064 getCXXABI().getMangleContext().mangleTypeName(T, Out);
6065 Out << Suffix;
6066
6067 InternalId = llvm::MDString::get(getLLVMContext(), Out.str());
6068 } else {
6069 InternalId = llvm::MDNode::getDistinct(getLLVMContext(),
6070 llvm::ArrayRef<llvm::Metadata *>());
6071 }
6072
6073 return InternalId;
6074 }
6075
CreateMetadataIdentifierForType(QualType T)6076 llvm::Metadata *CodeGenModule::CreateMetadataIdentifierForType(QualType T) {
6077 return CreateMetadataIdentifierImpl(T, MetadataIdMap, "");
6078 }
6079
6080 llvm::Metadata *
CreateMetadataIdentifierForVirtualMemPtrType(QualType T)6081 CodeGenModule::CreateMetadataIdentifierForVirtualMemPtrType(QualType T) {
6082 return CreateMetadataIdentifierImpl(T, VirtualMetadataIdMap, ".virtual");
6083 }
6084
6085 // Generalize pointer types to a void pointer with the qualifiers of the
6086 // originally pointed-to type, e.g. 'const char *' and 'char * const *'
6087 // generalize to 'const void *' while 'char *' and 'const char **' generalize to
6088 // 'void *'.
GeneralizeType(ASTContext & Ctx,QualType Ty)6089 static QualType GeneralizeType(ASTContext &Ctx, QualType Ty) {
6090 if (!Ty->isPointerType())
6091 return Ty;
6092
6093 return Ctx.getPointerType(
6094 QualType(Ctx.VoidTy).withCVRQualifiers(
6095 Ty->getPointeeType().getCVRQualifiers()));
6096 }
6097
6098 // Apply type generalization to a FunctionType's return and argument types
GeneralizeFunctionType(ASTContext & Ctx,QualType Ty)6099 static QualType GeneralizeFunctionType(ASTContext &Ctx, QualType Ty) {
6100 if (auto *FnType = Ty->getAs<FunctionProtoType>()) {
6101 SmallVector<QualType, 8> GeneralizedParams;
6102 for (auto &Param : FnType->param_types())
6103 GeneralizedParams.push_back(GeneralizeType(Ctx, Param));
6104
6105 return Ctx.getFunctionType(
6106 GeneralizeType(Ctx, FnType->getReturnType()),
6107 GeneralizedParams, FnType->getExtProtoInfo());
6108 }
6109
6110 if (auto *FnType = Ty->getAs<FunctionNoProtoType>())
6111 return Ctx.getFunctionNoProtoType(
6112 GeneralizeType(Ctx, FnType->getReturnType()));
6113
6114 llvm_unreachable("Encountered unknown FunctionType");
6115 }
6116
CreateMetadataIdentifierGeneralized(QualType T)6117 llvm::Metadata *CodeGenModule::CreateMetadataIdentifierGeneralized(QualType T) {
6118 return CreateMetadataIdentifierImpl(GeneralizeFunctionType(getContext(), T),
6119 GeneralizedMetadataIdMap, ".generalized");
6120 }
6121
6122 /// Returns whether this module needs the "all-vtables" type identifier.
NeedAllVtablesTypeId() const6123 bool CodeGenModule::NeedAllVtablesTypeId() const {
6124 // Returns true if at least one of vtable-based CFI checkers is enabled and
6125 // is not in the trapping mode.
6126 return ((LangOpts.Sanitize.has(SanitizerKind::CFIVCall) &&
6127 !CodeGenOpts.SanitizeTrap.has(SanitizerKind::CFIVCall)) ||
6128 (LangOpts.Sanitize.has(SanitizerKind::CFINVCall) &&
6129 !CodeGenOpts.SanitizeTrap.has(SanitizerKind::CFINVCall)) ||
6130 (LangOpts.Sanitize.has(SanitizerKind::CFIDerivedCast) &&
6131 !CodeGenOpts.SanitizeTrap.has(SanitizerKind::CFIDerivedCast)) ||
6132 (LangOpts.Sanitize.has(SanitizerKind::CFIUnrelatedCast) &&
6133 !CodeGenOpts.SanitizeTrap.has(SanitizerKind::CFIUnrelatedCast)));
6134 }
6135
AddVTableTypeMetadata(llvm::GlobalVariable * VTable,CharUnits Offset,const CXXRecordDecl * RD)6136 void CodeGenModule::AddVTableTypeMetadata(llvm::GlobalVariable *VTable,
6137 CharUnits Offset,
6138 const CXXRecordDecl *RD) {
6139 llvm::Metadata *MD =
6140 CreateMetadataIdentifierForType(QualType(RD->getTypeForDecl(), 0));
6141 VTable->addTypeMetadata(Offset.getQuantity(), MD);
6142
6143 if (CodeGenOpts.SanitizeCfiCrossDso)
6144 if (auto CrossDsoTypeId = CreateCrossDsoCfiTypeId(MD))
6145 VTable->addTypeMetadata(Offset.getQuantity(),
6146 llvm::ConstantAsMetadata::get(CrossDsoTypeId));
6147
6148 if (NeedAllVtablesTypeId()) {
6149 llvm::Metadata *MD = llvm::MDString::get(getLLVMContext(), "all-vtables");
6150 VTable->addTypeMetadata(Offset.getQuantity(), MD);
6151 }
6152 }
6153
getSanStats()6154 llvm::SanitizerStatReport &CodeGenModule::getSanStats() {
6155 if (!SanStats)
6156 SanStats = std::make_unique<llvm::SanitizerStatReport>(&getModule());
6157
6158 return *SanStats;
6159 }
6160 llvm::Value *
createOpenCLIntToSamplerConversion(const Expr * E,CodeGenFunction & CGF)6161 CodeGenModule::createOpenCLIntToSamplerConversion(const Expr *E,
6162 CodeGenFunction &CGF) {
6163 llvm::Constant *C = ConstantEmitter(CGF).emitAbstract(E, E->getType());
6164 auto SamplerT = getOpenCLRuntime().getSamplerType(E->getType().getTypePtr());
6165 auto FTy = llvm::FunctionType::get(SamplerT, {C->getType()}, false);
6166 return CGF.Builder.CreateCall(CreateRuntimeFunction(FTy,
6167 "__translate_sampler_initializer"),
6168 {C});
6169 }
6170
getNaturalPointeeTypeAlignment(QualType T,LValueBaseInfo * BaseInfo,TBAAAccessInfo * TBAAInfo)6171 CharUnits CodeGenModule::getNaturalPointeeTypeAlignment(
6172 QualType T, LValueBaseInfo *BaseInfo, TBAAAccessInfo *TBAAInfo) {
6173 return getNaturalTypeAlignment(T->getPointeeType(), BaseInfo, TBAAInfo,
6174 /* forPointeeType= */ true);
6175 }
6176
getNaturalTypeAlignment(QualType T,LValueBaseInfo * BaseInfo,TBAAAccessInfo * TBAAInfo,bool forPointeeType)6177 CharUnits CodeGenModule::getNaturalTypeAlignment(QualType T,
6178 LValueBaseInfo *BaseInfo,
6179 TBAAAccessInfo *TBAAInfo,
6180 bool forPointeeType) {
6181 if (TBAAInfo)
6182 *TBAAInfo = getTBAAAccessInfo(T);
6183
6184 // FIXME: This duplicates logic in ASTContext::getTypeAlignIfKnown. But
6185 // that doesn't return the information we need to compute BaseInfo.
6186
6187 // Honor alignment typedef attributes even on incomplete types.
6188 // We also honor them straight for C++ class types, even as pointees;
6189 // there's an expressivity gap here.
6190 if (auto TT = T->getAs<TypedefType>()) {
6191 if (auto Align = TT->getDecl()->getMaxAlignment()) {
6192 if (BaseInfo)
6193 *BaseInfo = LValueBaseInfo(AlignmentSource::AttributedType);
6194 return getContext().toCharUnitsFromBits(Align);
6195 }
6196 }
6197
6198 bool AlignForArray = T->isArrayType();
6199
6200 // Analyze the base element type, so we don't get confused by incomplete
6201 // array types.
6202 T = getContext().getBaseElementType(T);
6203
6204 if (T->isIncompleteType()) {
6205 // We could try to replicate the logic from
6206 // ASTContext::getTypeAlignIfKnown, but nothing uses the alignment if the
6207 // type is incomplete, so it's impossible to test. We could try to reuse
6208 // getTypeAlignIfKnown, but that doesn't return the information we need
6209 // to set BaseInfo. So just ignore the possibility that the alignment is
6210 // greater than one.
6211 if (BaseInfo)
6212 *BaseInfo = LValueBaseInfo(AlignmentSource::Type);
6213 return CharUnits::One();
6214 }
6215
6216 if (BaseInfo)
6217 *BaseInfo = LValueBaseInfo(AlignmentSource::Type);
6218
6219 CharUnits Alignment;
6220 const CXXRecordDecl *RD;
6221 if (T.getQualifiers().hasUnaligned()) {
6222 Alignment = CharUnits::One();
6223 } else if (forPointeeType && !AlignForArray &&
6224 (RD = T->getAsCXXRecordDecl())) {
6225 // For C++ class pointees, we don't know whether we're pointing at a
6226 // base or a complete object, so we generally need to use the
6227 // non-virtual alignment.
6228 Alignment = getClassPointerAlignment(RD);
6229 } else {
6230 Alignment = getContext().getTypeAlignInChars(T);
6231 }
6232
6233 // Cap to the global maximum type alignment unless the alignment
6234 // was somehow explicit on the type.
6235 if (unsigned MaxAlign = getLangOpts().MaxTypeAlign) {
6236 if (Alignment.getQuantity() > MaxAlign &&
6237 !getContext().isAlignmentRequired(T))
6238 Alignment = CharUnits::fromQuantity(MaxAlign);
6239 }
6240 return Alignment;
6241 }
6242
stopAutoInit()6243 bool CodeGenModule::stopAutoInit() {
6244 unsigned StopAfter = getContext().getLangOpts().TrivialAutoVarInitStopAfter;
6245 if (StopAfter) {
6246 // This number is positive only when -ftrivial-auto-var-init-stop-after=* is
6247 // used
6248 if (NumAutoVarInit >= StopAfter) {
6249 return true;
6250 }
6251 if (!NumAutoVarInit) {
6252 unsigned DiagID = getDiags().getCustomDiagID(
6253 DiagnosticsEngine::Warning,
6254 "-ftrivial-auto-var-init-stop-after=%0 has been enabled to limit the "
6255 "number of times ftrivial-auto-var-init=%1 gets applied.");
6256 getDiags().Report(DiagID)
6257 << StopAfter
6258 << (getContext().getLangOpts().getTrivialAutoVarInit() ==
6259 LangOptions::TrivialAutoVarInitKind::Zero
6260 ? "zero"
6261 : "pattern");
6262 }
6263 ++NumAutoVarInit;
6264 }
6265 return false;
6266 }
6267