1 //===--- CGDecl.cpp - Emit LLVM Code for declarations ---------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This contains code to emit Decl nodes as LLVM code.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #include "CGBlocks.h"
14 #include "CGCXXABI.h"
15 #include "CGCleanup.h"
16 #include "CGDebugInfo.h"
17 #include "CGOpenCLRuntime.h"
18 #include "CGOpenMPRuntime.h"
19 #include "CodeGenFunction.h"
20 #include "CodeGenModule.h"
21 #include "ConstantEmitter.h"
22 #include "PatternInit.h"
23 #include "TargetInfo.h"
24 #include "clang/AST/ASTContext.h"
25 #include "clang/AST/Attr.h"
26 #include "clang/AST/CharUnits.h"
27 #include "clang/AST/Decl.h"
28 #include "clang/AST/DeclObjC.h"
29 #include "clang/AST/DeclOpenMP.h"
30 #include "clang/Basic/CodeGenOptions.h"
31 #include "clang/Basic/SourceManager.h"
32 #include "clang/Basic/TargetInfo.h"
33 #include "clang/CodeGen/CGFunctionInfo.h"
34 #include "clang/Sema/Sema.h"
35 #include "llvm/Analysis/ValueTracking.h"
36 #include "llvm/IR/DataLayout.h"
37 #include "llvm/IR/GlobalVariable.h"
38 #include "llvm/IR/Intrinsics.h"
39 #include "llvm/IR/Type.h"
40 #include <optional>
41
42 using namespace clang;
43 using namespace CodeGen;
44
45 static_assert(clang::Sema::MaximumAlignment <= llvm::Value::MaximumAlignment,
46 "Clang max alignment greater than what LLVM supports?");
47
EmitDecl(const Decl & D)48 void CodeGenFunction::EmitDecl(const Decl &D) {
49 switch (D.getKind()) {
50 case Decl::BuiltinTemplate:
51 case Decl::TranslationUnit:
52 case Decl::ExternCContext:
53 case Decl::Namespace:
54 case Decl::UnresolvedUsingTypename:
55 case Decl::ClassTemplateSpecialization:
56 case Decl::ClassTemplatePartialSpecialization:
57 case Decl::VarTemplateSpecialization:
58 case Decl::VarTemplatePartialSpecialization:
59 case Decl::TemplateTypeParm:
60 case Decl::UnresolvedUsingValue:
61 case Decl::NonTypeTemplateParm:
62 case Decl::CXXDeductionGuide:
63 case Decl::CXXMethod:
64 case Decl::CXXConstructor:
65 case Decl::CXXDestructor:
66 case Decl::CXXConversion:
67 case Decl::Field:
68 case Decl::MSProperty:
69 case Decl::IndirectField:
70 case Decl::ObjCIvar:
71 case Decl::ObjCAtDefsField:
72 case Decl::ParmVar:
73 case Decl::ImplicitParam:
74 case Decl::ClassTemplate:
75 case Decl::VarTemplate:
76 case Decl::FunctionTemplate:
77 case Decl::TypeAliasTemplate:
78 case Decl::TemplateTemplateParm:
79 case Decl::ObjCMethod:
80 case Decl::ObjCCategory:
81 case Decl::ObjCProtocol:
82 case Decl::ObjCInterface:
83 case Decl::ObjCCategoryImpl:
84 case Decl::ObjCImplementation:
85 case Decl::ObjCProperty:
86 case Decl::ObjCCompatibleAlias:
87 case Decl::PragmaComment:
88 case Decl::PragmaDetectMismatch:
89 case Decl::AccessSpec:
90 case Decl::LinkageSpec:
91 case Decl::Export:
92 case Decl::ObjCPropertyImpl:
93 case Decl::FileScopeAsm:
94 case Decl::TopLevelStmt:
95 case Decl::Friend:
96 case Decl::FriendTemplate:
97 case Decl::Block:
98 case Decl::Captured:
99 case Decl::UsingShadow:
100 case Decl::ConstructorUsingShadow:
101 case Decl::ObjCTypeParam:
102 case Decl::Binding:
103 case Decl::UnresolvedUsingIfExists:
104 case Decl::HLSLBuffer:
105 llvm_unreachable("Declaration should not be in declstmts!");
106 case Decl::Record: // struct/union/class X;
107 case Decl::CXXRecord: // struct/union/class X; [C++]
108 if (CGDebugInfo *DI = getDebugInfo())
109 if (cast<RecordDecl>(D).getDefinition())
110 DI->EmitAndRetainType(getContext().getRecordType(cast<RecordDecl>(&D)));
111 return;
112 case Decl::Enum: // enum X;
113 if (CGDebugInfo *DI = getDebugInfo())
114 if (cast<EnumDecl>(D).getDefinition())
115 DI->EmitAndRetainType(getContext().getEnumType(cast<EnumDecl>(&D)));
116 return;
117 case Decl::Function: // void X();
118 case Decl::EnumConstant: // enum ? { X = ? }
119 case Decl::StaticAssert: // static_assert(X, ""); [C++0x]
120 case Decl::Label: // __label__ x;
121 case Decl::Import:
122 case Decl::MSGuid: // __declspec(uuid("..."))
123 case Decl::UnnamedGlobalConstant:
124 case Decl::TemplateParamObject:
125 case Decl::OMPThreadPrivate:
126 case Decl::OMPAllocate:
127 case Decl::OMPCapturedExpr:
128 case Decl::OMPRequires:
129 case Decl::Empty:
130 case Decl::Concept:
131 case Decl::ImplicitConceptSpecialization:
132 case Decl::LifetimeExtendedTemporary:
133 case Decl::RequiresExprBody:
134 // None of these decls require codegen support.
135 return;
136
137 case Decl::NamespaceAlias:
138 if (CGDebugInfo *DI = getDebugInfo())
139 DI->EmitNamespaceAlias(cast<NamespaceAliasDecl>(D));
140 return;
141 case Decl::Using: // using X; [C++]
142 if (CGDebugInfo *DI = getDebugInfo())
143 DI->EmitUsingDecl(cast<UsingDecl>(D));
144 return;
145 case Decl::UsingEnum: // using enum X; [C++]
146 if (CGDebugInfo *DI = getDebugInfo())
147 DI->EmitUsingEnumDecl(cast<UsingEnumDecl>(D));
148 return;
149 case Decl::UsingPack:
150 for (auto *Using : cast<UsingPackDecl>(D).expansions())
151 EmitDecl(*Using);
152 return;
153 case Decl::UsingDirective: // using namespace X; [C++]
154 if (CGDebugInfo *DI = getDebugInfo())
155 DI->EmitUsingDirective(cast<UsingDirectiveDecl>(D));
156 return;
157 case Decl::Var:
158 case Decl::Decomposition: {
159 const VarDecl &VD = cast<VarDecl>(D);
160 assert(VD.isLocalVarDecl() &&
161 "Should not see file-scope variables inside a function!");
162 EmitVarDecl(VD);
163 if (auto *DD = dyn_cast<DecompositionDecl>(&VD))
164 for (auto *B : DD->bindings())
165 if (auto *HD = B->getHoldingVar())
166 EmitVarDecl(*HD);
167 return;
168 }
169
170 case Decl::OMPDeclareReduction:
171 return CGM.EmitOMPDeclareReduction(cast<OMPDeclareReductionDecl>(&D), this);
172
173 case Decl::OMPDeclareMapper:
174 return CGM.EmitOMPDeclareMapper(cast<OMPDeclareMapperDecl>(&D), this);
175
176 case Decl::Typedef: // typedef int X;
177 case Decl::TypeAlias: { // using X = int; [C++0x]
178 QualType Ty = cast<TypedefNameDecl>(D).getUnderlyingType();
179 if (CGDebugInfo *DI = getDebugInfo())
180 DI->EmitAndRetainType(Ty);
181 if (Ty->isVariablyModifiedType())
182 EmitVariablyModifiedType(Ty);
183 return;
184 }
185 }
186 }
187
188 /// EmitVarDecl - This method handles emission of any variable declaration
189 /// inside a function, including static vars etc.
EmitVarDecl(const VarDecl & D)190 void CodeGenFunction::EmitVarDecl(const VarDecl &D) {
191 if (D.hasExternalStorage())
192 // Don't emit it now, allow it to be emitted lazily on its first use.
193 return;
194
195 // Some function-scope variable does not have static storage but still
196 // needs to be emitted like a static variable, e.g. a function-scope
197 // variable in constant address space in OpenCL.
198 if (D.getStorageDuration() != SD_Automatic) {
199 // Static sampler variables translated to function calls.
200 if (D.getType()->isSamplerT())
201 return;
202
203 llvm::GlobalValue::LinkageTypes Linkage =
204 CGM.getLLVMLinkageVarDefinition(&D);
205
206 // FIXME: We need to force the emission/use of a guard variable for
207 // some variables even if we can constant-evaluate them because
208 // we can't guarantee every translation unit will constant-evaluate them.
209
210 return EmitStaticVarDecl(D, Linkage);
211 }
212
213 if (D.getType().getAddressSpace() == LangAS::opencl_local)
214 return CGM.getOpenCLRuntime().EmitWorkGroupLocalVarDecl(*this, D);
215
216 assert(D.hasLocalStorage());
217 return EmitAutoVarDecl(D);
218 }
219
getStaticDeclName(CodeGenModule & CGM,const VarDecl & D)220 static std::string getStaticDeclName(CodeGenModule &CGM, const VarDecl &D) {
221 if (CGM.getLangOpts().CPlusPlus)
222 return CGM.getMangledName(&D).str();
223
224 // If this isn't C++, we don't need a mangled name, just a pretty one.
225 assert(!D.isExternallyVisible() && "name shouldn't matter");
226 std::string ContextName;
227 const DeclContext *DC = D.getDeclContext();
228 if (auto *CD = dyn_cast<CapturedDecl>(DC))
229 DC = cast<DeclContext>(CD->getNonClosureContext());
230 if (const auto *FD = dyn_cast<FunctionDecl>(DC))
231 ContextName = std::string(CGM.getMangledName(FD));
232 else if (const auto *BD = dyn_cast<BlockDecl>(DC))
233 ContextName = std::string(CGM.getBlockMangledName(GlobalDecl(), BD));
234 else if (const auto *OMD = dyn_cast<ObjCMethodDecl>(DC))
235 ContextName = OMD->getSelector().getAsString();
236 else
237 llvm_unreachable("Unknown context for static var decl");
238
239 ContextName += "." + D.getNameAsString();
240 return ContextName;
241 }
242
getOrCreateStaticVarDecl(const VarDecl & D,llvm::GlobalValue::LinkageTypes Linkage)243 llvm::Constant *CodeGenModule::getOrCreateStaticVarDecl(
244 const VarDecl &D, llvm::GlobalValue::LinkageTypes Linkage) {
245 // In general, we don't always emit static var decls once before we reference
246 // them. It is possible to reference them before emitting the function that
247 // contains them, and it is possible to emit the containing function multiple
248 // times.
249 if (llvm::Constant *ExistingGV = StaticLocalDeclMap[&D])
250 return ExistingGV;
251
252 QualType Ty = D.getType();
253 assert(Ty->isConstantSizeType() && "VLAs can't be static");
254
255 // Use the label if the variable is renamed with the asm-label extension.
256 std::string Name;
257 if (D.hasAttr<AsmLabelAttr>())
258 Name = std::string(getMangledName(&D));
259 else
260 Name = getStaticDeclName(*this, D);
261
262 llvm::Type *LTy = getTypes().ConvertTypeForMem(Ty);
263 LangAS AS = GetGlobalVarAddressSpace(&D);
264 unsigned TargetAS = getContext().getTargetAddressSpace(AS);
265
266 // OpenCL variables in local address space and CUDA shared
267 // variables cannot have an initializer.
268 llvm::Constant *Init = nullptr;
269 if (Ty.getAddressSpace() == LangAS::opencl_local ||
270 D.hasAttr<CUDASharedAttr>() || D.hasAttr<LoaderUninitializedAttr>())
271 Init = llvm::UndefValue::get(LTy);
272 else
273 Init = EmitNullConstant(Ty);
274
275 llvm::GlobalVariable *GV = new llvm::GlobalVariable(
276 getModule(), LTy, Ty.isConstant(getContext()), Linkage, Init, Name,
277 nullptr, llvm::GlobalVariable::NotThreadLocal, TargetAS);
278 GV->setAlignment(getContext().getDeclAlign(&D).getAsAlign());
279
280 if (supportsCOMDAT() && GV->isWeakForLinker())
281 GV->setComdat(TheModule.getOrInsertComdat(GV->getName()));
282
283 if (D.getTLSKind())
284 setTLSMode(GV, D);
285
286 setGVProperties(GV, &D);
287
288 // Make sure the result is of the correct type.
289 LangAS ExpectedAS = Ty.getAddressSpace();
290 llvm::Constant *Addr = GV;
291 if (AS != ExpectedAS) {
292 Addr = getTargetCodeGenInfo().performAddrSpaceCast(
293 *this, GV, AS, ExpectedAS,
294 llvm::PointerType::get(getLLVMContext(),
295 getContext().getTargetAddressSpace(ExpectedAS)));
296 }
297
298 setStaticLocalDeclAddress(&D, Addr);
299
300 // Ensure that the static local gets initialized by making sure the parent
301 // function gets emitted eventually.
302 const Decl *DC = cast<Decl>(D.getDeclContext());
303
304 // We can't name blocks or captured statements directly, so try to emit their
305 // parents.
306 if (isa<BlockDecl>(DC) || isa<CapturedDecl>(DC)) {
307 DC = DC->getNonClosureContext();
308 // FIXME: Ensure that global blocks get emitted.
309 if (!DC)
310 return Addr;
311 }
312
313 GlobalDecl GD;
314 if (const auto *CD = dyn_cast<CXXConstructorDecl>(DC))
315 GD = GlobalDecl(CD, Ctor_Base);
316 else if (const auto *DD = dyn_cast<CXXDestructorDecl>(DC))
317 GD = GlobalDecl(DD, Dtor_Base);
318 else if (const auto *FD = dyn_cast<FunctionDecl>(DC))
319 GD = GlobalDecl(FD);
320 else {
321 // Don't do anything for Obj-C method decls or global closures. We should
322 // never defer them.
323 assert(isa<ObjCMethodDecl>(DC) && "unexpected parent code decl");
324 }
325 if (GD.getDecl()) {
326 // Disable emission of the parent function for the OpenMP device codegen.
327 CGOpenMPRuntime::DisableAutoDeclareTargetRAII NoDeclTarget(*this);
328 (void)GetAddrOfGlobal(GD);
329 }
330
331 return Addr;
332 }
333
334 /// AddInitializerToStaticVarDecl - Add the initializer for 'D' to the
335 /// global variable that has already been created for it. If the initializer
336 /// has a different type than GV does, this may free GV and return a different
337 /// one. Otherwise it just returns GV.
338 llvm::GlobalVariable *
AddInitializerToStaticVarDecl(const VarDecl & D,llvm::GlobalVariable * GV)339 CodeGenFunction::AddInitializerToStaticVarDecl(const VarDecl &D,
340 llvm::GlobalVariable *GV) {
341 ConstantEmitter emitter(*this);
342 llvm::Constant *Init = emitter.tryEmitForInitializer(D);
343
344 // If constant emission failed, then this should be a C++ static
345 // initializer.
346 if (!Init) {
347 if (!getLangOpts().CPlusPlus)
348 CGM.ErrorUnsupported(D.getInit(), "constant l-value expression");
349 else if (D.hasFlexibleArrayInit(getContext()))
350 CGM.ErrorUnsupported(D.getInit(), "flexible array initializer");
351 else if (HaveInsertPoint()) {
352 // Since we have a static initializer, this global variable can't
353 // be constant.
354 GV->setConstant(false);
355
356 EmitCXXGuardedInit(D, GV, /*PerformInit*/true);
357 }
358 return GV;
359 }
360
361 #ifndef NDEBUG
362 CharUnits VarSize = CGM.getContext().getTypeSizeInChars(D.getType()) +
363 D.getFlexibleArrayInitChars(getContext());
364 CharUnits CstSize = CharUnits::fromQuantity(
365 CGM.getDataLayout().getTypeAllocSize(Init->getType()));
366 assert(VarSize == CstSize && "Emitted constant has unexpected size");
367 #endif
368
369 // The initializer may differ in type from the global. Rewrite
370 // the global to match the initializer. (We have to do this
371 // because some types, like unions, can't be completely represented
372 // in the LLVM type system.)
373 if (GV->getValueType() != Init->getType()) {
374 llvm::GlobalVariable *OldGV = GV;
375
376 GV = new llvm::GlobalVariable(
377 CGM.getModule(), Init->getType(), OldGV->isConstant(),
378 OldGV->getLinkage(), Init, "",
379 /*InsertBefore*/ OldGV, OldGV->getThreadLocalMode(),
380 OldGV->getType()->getPointerAddressSpace());
381 GV->setVisibility(OldGV->getVisibility());
382 GV->setDSOLocal(OldGV->isDSOLocal());
383 GV->setComdat(OldGV->getComdat());
384
385 // Steal the name of the old global
386 GV->takeName(OldGV);
387
388 // Replace all uses of the old global with the new global
389 OldGV->replaceAllUsesWith(GV);
390
391 // Erase the old global, since it is no longer used.
392 OldGV->eraseFromParent();
393 }
394
395 bool NeedsDtor =
396 D.needsDestruction(getContext()) == QualType::DK_cxx_destructor;
397
398 GV->setConstant(
399 D.getType().isConstantStorage(getContext(), true, !NeedsDtor));
400 GV->setInitializer(Init);
401
402 emitter.finalize(GV);
403
404 if (NeedsDtor && HaveInsertPoint()) {
405 // We have a constant initializer, but a nontrivial destructor. We still
406 // need to perform a guarded "initialization" in order to register the
407 // destructor.
408 EmitCXXGuardedInit(D, GV, /*PerformInit*/false);
409 }
410
411 return GV;
412 }
413
EmitStaticVarDecl(const VarDecl & D,llvm::GlobalValue::LinkageTypes Linkage)414 void CodeGenFunction::EmitStaticVarDecl(const VarDecl &D,
415 llvm::GlobalValue::LinkageTypes Linkage) {
416 // Check to see if we already have a global variable for this
417 // declaration. This can happen when double-emitting function
418 // bodies, e.g. with complete and base constructors.
419 llvm::Constant *addr = CGM.getOrCreateStaticVarDecl(D, Linkage);
420 CharUnits alignment = getContext().getDeclAlign(&D);
421
422 // Store into LocalDeclMap before generating initializer to handle
423 // circular references.
424 llvm::Type *elemTy = ConvertTypeForMem(D.getType());
425 setAddrOfLocalVar(&D, Address(addr, elemTy, alignment));
426
427 // We can't have a VLA here, but we can have a pointer to a VLA,
428 // even though that doesn't really make any sense.
429 // Make sure to evaluate VLA bounds now so that we have them for later.
430 if (D.getType()->isVariablyModifiedType())
431 EmitVariablyModifiedType(D.getType());
432
433 // Save the type in case adding the initializer forces a type change.
434 llvm::Type *expectedType = addr->getType();
435
436 llvm::GlobalVariable *var =
437 cast<llvm::GlobalVariable>(addr->stripPointerCasts());
438
439 // CUDA's local and local static __shared__ variables should not
440 // have any non-empty initializers. This is ensured by Sema.
441 // Whatever initializer such variable may have when it gets here is
442 // a no-op and should not be emitted.
443 bool isCudaSharedVar = getLangOpts().CUDA && getLangOpts().CUDAIsDevice &&
444 D.hasAttr<CUDASharedAttr>();
445 // If this value has an initializer, emit it.
446 if (D.getInit() && !isCudaSharedVar)
447 var = AddInitializerToStaticVarDecl(D, var);
448
449 var->setAlignment(alignment.getAsAlign());
450
451 if (D.hasAttr<AnnotateAttr>())
452 CGM.AddGlobalAnnotations(&D, var);
453
454 if (auto *SA = D.getAttr<PragmaClangBSSSectionAttr>())
455 var->addAttribute("bss-section", SA->getName());
456 if (auto *SA = D.getAttr<PragmaClangDataSectionAttr>())
457 var->addAttribute("data-section", SA->getName());
458 if (auto *SA = D.getAttr<PragmaClangRodataSectionAttr>())
459 var->addAttribute("rodata-section", SA->getName());
460 if (auto *SA = D.getAttr<PragmaClangRelroSectionAttr>())
461 var->addAttribute("relro-section", SA->getName());
462
463 if (const SectionAttr *SA = D.getAttr<SectionAttr>())
464 var->setSection(SA->getName());
465
466 if (D.hasAttr<RetainAttr>())
467 CGM.addUsedGlobal(var);
468 else if (D.hasAttr<UsedAttr>())
469 CGM.addUsedOrCompilerUsedGlobal(var);
470
471 if (CGM.getCodeGenOpts().KeepPersistentStorageVariables)
472 CGM.addUsedOrCompilerUsedGlobal(var);
473
474 // We may have to cast the constant because of the initializer
475 // mismatch above.
476 //
477 // FIXME: It is really dangerous to store this in the map; if anyone
478 // RAUW's the GV uses of this constant will be invalid.
479 llvm::Constant *castedAddr =
480 llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(var, expectedType);
481 LocalDeclMap.find(&D)->second = Address(castedAddr, elemTy, alignment);
482 CGM.setStaticLocalDeclAddress(&D, castedAddr);
483
484 CGM.getSanitizerMetadata()->reportGlobal(var, D);
485
486 // Emit global variable debug descriptor for static vars.
487 CGDebugInfo *DI = getDebugInfo();
488 if (DI && CGM.getCodeGenOpts().hasReducedDebugInfo()) {
489 DI->setLocation(D.getLocation());
490 DI->EmitGlobalVariable(var, &D);
491 }
492 }
493
494 namespace {
495 struct DestroyObject final : EHScopeStack::Cleanup {
DestroyObject__anon9560ee680111::DestroyObject496 DestroyObject(Address addr, QualType type,
497 CodeGenFunction::Destroyer *destroyer,
498 bool useEHCleanupForArray)
499 : addr(addr), type(type), destroyer(destroyer),
500 useEHCleanupForArray(useEHCleanupForArray) {}
501
502 Address addr;
503 QualType type;
504 CodeGenFunction::Destroyer *destroyer;
505 bool useEHCleanupForArray;
506
Emit__anon9560ee680111::DestroyObject507 void Emit(CodeGenFunction &CGF, Flags flags) override {
508 // Don't use an EH cleanup recursively from an EH cleanup.
509 bool useEHCleanupForArray =
510 flags.isForNormalCleanup() && this->useEHCleanupForArray;
511
512 CGF.emitDestroy(addr, type, destroyer, useEHCleanupForArray);
513 }
514 };
515
516 template <class Derived>
517 struct DestroyNRVOVariable : EHScopeStack::Cleanup {
DestroyNRVOVariable__anon9560ee680111::DestroyNRVOVariable518 DestroyNRVOVariable(Address addr, QualType type, llvm::Value *NRVOFlag)
519 : NRVOFlag(NRVOFlag), Loc(addr), Ty(type) {}
520
521 llvm::Value *NRVOFlag;
522 Address Loc;
523 QualType Ty;
524
Emit__anon9560ee680111::DestroyNRVOVariable525 void Emit(CodeGenFunction &CGF, Flags flags) override {
526 // Along the exceptions path we always execute the dtor.
527 bool NRVO = flags.isForNormalCleanup() && NRVOFlag;
528
529 llvm::BasicBlock *SkipDtorBB = nullptr;
530 if (NRVO) {
531 // If we exited via NRVO, we skip the destructor call.
532 llvm::BasicBlock *RunDtorBB = CGF.createBasicBlock("nrvo.unused");
533 SkipDtorBB = CGF.createBasicBlock("nrvo.skipdtor");
534 llvm::Value *DidNRVO =
535 CGF.Builder.CreateFlagLoad(NRVOFlag, "nrvo.val");
536 CGF.Builder.CreateCondBr(DidNRVO, SkipDtorBB, RunDtorBB);
537 CGF.EmitBlock(RunDtorBB);
538 }
539
540 static_cast<Derived *>(this)->emitDestructorCall(CGF);
541
542 if (NRVO) CGF.EmitBlock(SkipDtorBB);
543 }
544
545 virtual ~DestroyNRVOVariable() = default;
546 };
547
548 struct DestroyNRVOVariableCXX final
549 : DestroyNRVOVariable<DestroyNRVOVariableCXX> {
DestroyNRVOVariableCXX__anon9560ee680111::DestroyNRVOVariableCXX550 DestroyNRVOVariableCXX(Address addr, QualType type,
551 const CXXDestructorDecl *Dtor, llvm::Value *NRVOFlag)
552 : DestroyNRVOVariable<DestroyNRVOVariableCXX>(addr, type, NRVOFlag),
553 Dtor(Dtor) {}
554
555 const CXXDestructorDecl *Dtor;
556
emitDestructorCall__anon9560ee680111::DestroyNRVOVariableCXX557 void emitDestructorCall(CodeGenFunction &CGF) {
558 CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete,
559 /*ForVirtualBase=*/false,
560 /*Delegating=*/false, Loc, Ty);
561 }
562 };
563
564 struct DestroyNRVOVariableC final
565 : DestroyNRVOVariable<DestroyNRVOVariableC> {
DestroyNRVOVariableC__anon9560ee680111::DestroyNRVOVariableC566 DestroyNRVOVariableC(Address addr, llvm::Value *NRVOFlag, QualType Ty)
567 : DestroyNRVOVariable<DestroyNRVOVariableC>(addr, Ty, NRVOFlag) {}
568
emitDestructorCall__anon9560ee680111::DestroyNRVOVariableC569 void emitDestructorCall(CodeGenFunction &CGF) {
570 CGF.destroyNonTrivialCStruct(CGF, Loc, Ty);
571 }
572 };
573
574 struct CallStackRestore final : EHScopeStack::Cleanup {
575 Address Stack;
CallStackRestore__anon9560ee680111::CallStackRestore576 CallStackRestore(Address Stack) : Stack(Stack) {}
isRedundantBeforeReturn__anon9560ee680111::CallStackRestore577 bool isRedundantBeforeReturn() override { return true; }
Emit__anon9560ee680111::CallStackRestore578 void Emit(CodeGenFunction &CGF, Flags flags) override {
579 llvm::Value *V = CGF.Builder.CreateLoad(Stack);
580 CGF.Builder.CreateStackRestore(V);
581 }
582 };
583
584 struct KmpcAllocFree final : EHScopeStack::Cleanup {
585 std::pair<llvm::Value *, llvm::Value *> AddrSizePair;
KmpcAllocFree__anon9560ee680111::KmpcAllocFree586 KmpcAllocFree(const std::pair<llvm::Value *, llvm::Value *> &AddrSizePair)
587 : AddrSizePair(AddrSizePair) {}
Emit__anon9560ee680111::KmpcAllocFree588 void Emit(CodeGenFunction &CGF, Flags EmissionFlags) override {
589 auto &RT = CGF.CGM.getOpenMPRuntime();
590 RT.getKmpcFreeShared(CGF, AddrSizePair);
591 }
592 };
593
594 struct ExtendGCLifetime final : EHScopeStack::Cleanup {
595 const VarDecl &Var;
ExtendGCLifetime__anon9560ee680111::ExtendGCLifetime596 ExtendGCLifetime(const VarDecl *var) : Var(*var) {}
597
Emit__anon9560ee680111::ExtendGCLifetime598 void Emit(CodeGenFunction &CGF, Flags flags) override {
599 // Compute the address of the local variable, in case it's a
600 // byref or something.
601 DeclRefExpr DRE(CGF.getContext(), const_cast<VarDecl *>(&Var), false,
602 Var.getType(), VK_LValue, SourceLocation());
603 llvm::Value *value = CGF.EmitLoadOfScalar(CGF.EmitDeclRefLValue(&DRE),
604 SourceLocation());
605 CGF.EmitExtendGCLifetime(value);
606 }
607 };
608
609 struct CallCleanupFunction final : EHScopeStack::Cleanup {
610 llvm::Constant *CleanupFn;
611 const CGFunctionInfo &FnInfo;
612 const VarDecl &Var;
613
CallCleanupFunction__anon9560ee680111::CallCleanupFunction614 CallCleanupFunction(llvm::Constant *CleanupFn, const CGFunctionInfo *Info,
615 const VarDecl *Var)
616 : CleanupFn(CleanupFn), FnInfo(*Info), Var(*Var) {}
617
Emit__anon9560ee680111::CallCleanupFunction618 void Emit(CodeGenFunction &CGF, Flags flags) override {
619 DeclRefExpr DRE(CGF.getContext(), const_cast<VarDecl *>(&Var), false,
620 Var.getType(), VK_LValue, SourceLocation());
621 // Compute the address of the local variable, in case it's a byref
622 // or something.
623 llvm::Value *Addr = CGF.EmitDeclRefLValue(&DRE).getPointer(CGF);
624
625 // In some cases, the type of the function argument will be different from
626 // the type of the pointer. An example of this is
627 // void f(void* arg);
628 // __attribute__((cleanup(f))) void *g;
629 //
630 // To fix this we insert a bitcast here.
631 QualType ArgTy = FnInfo.arg_begin()->type;
632 llvm::Value *Arg =
633 CGF.Builder.CreateBitCast(Addr, CGF.ConvertType(ArgTy));
634
635 CallArgList Args;
636 Args.add(RValue::get(Arg),
637 CGF.getContext().getPointerType(Var.getType()));
638 auto Callee = CGCallee::forDirect(CleanupFn);
639 CGF.EmitCall(FnInfo, Callee, ReturnValueSlot(), Args);
640 }
641 };
642 } // end anonymous namespace
643
644 /// EmitAutoVarWithLifetime - Does the setup required for an automatic
645 /// variable with lifetime.
EmitAutoVarWithLifetime(CodeGenFunction & CGF,const VarDecl & var,Address addr,Qualifiers::ObjCLifetime lifetime)646 static void EmitAutoVarWithLifetime(CodeGenFunction &CGF, const VarDecl &var,
647 Address addr,
648 Qualifiers::ObjCLifetime lifetime) {
649 switch (lifetime) {
650 case Qualifiers::OCL_None:
651 llvm_unreachable("present but none");
652
653 case Qualifiers::OCL_ExplicitNone:
654 // nothing to do
655 break;
656
657 case Qualifiers::OCL_Strong: {
658 CodeGenFunction::Destroyer *destroyer =
659 (var.hasAttr<ObjCPreciseLifetimeAttr>()
660 ? CodeGenFunction::destroyARCStrongPrecise
661 : CodeGenFunction::destroyARCStrongImprecise);
662
663 CleanupKind cleanupKind = CGF.getARCCleanupKind();
664 CGF.pushDestroy(cleanupKind, addr, var.getType(), destroyer,
665 cleanupKind & EHCleanup);
666 break;
667 }
668 case Qualifiers::OCL_Autoreleasing:
669 // nothing to do
670 break;
671
672 case Qualifiers::OCL_Weak:
673 // __weak objects always get EH cleanups; otherwise, exceptions
674 // could cause really nasty crashes instead of mere leaks.
675 CGF.pushDestroy(NormalAndEHCleanup, addr, var.getType(),
676 CodeGenFunction::destroyARCWeak,
677 /*useEHCleanup*/ true);
678 break;
679 }
680 }
681
isAccessedBy(const VarDecl & var,const Stmt * s)682 static bool isAccessedBy(const VarDecl &var, const Stmt *s) {
683 if (const Expr *e = dyn_cast<Expr>(s)) {
684 // Skip the most common kinds of expressions that make
685 // hierarchy-walking expensive.
686 s = e = e->IgnoreParenCasts();
687
688 if (const DeclRefExpr *ref = dyn_cast<DeclRefExpr>(e))
689 return (ref->getDecl() == &var);
690 if (const BlockExpr *be = dyn_cast<BlockExpr>(e)) {
691 const BlockDecl *block = be->getBlockDecl();
692 for (const auto &I : block->captures()) {
693 if (I.getVariable() == &var)
694 return true;
695 }
696 }
697 }
698
699 for (const Stmt *SubStmt : s->children())
700 // SubStmt might be null; as in missing decl or conditional of an if-stmt.
701 if (SubStmt && isAccessedBy(var, SubStmt))
702 return true;
703
704 return false;
705 }
706
isAccessedBy(const ValueDecl * decl,const Expr * e)707 static bool isAccessedBy(const ValueDecl *decl, const Expr *e) {
708 if (!decl) return false;
709 if (!isa<VarDecl>(decl)) return false;
710 const VarDecl *var = cast<VarDecl>(decl);
711 return isAccessedBy(*var, e);
712 }
713
tryEmitARCCopyWeakInit(CodeGenFunction & CGF,const LValue & destLV,const Expr * init)714 static bool tryEmitARCCopyWeakInit(CodeGenFunction &CGF,
715 const LValue &destLV, const Expr *init) {
716 bool needsCast = false;
717
718 while (auto castExpr = dyn_cast<CastExpr>(init->IgnoreParens())) {
719 switch (castExpr->getCastKind()) {
720 // Look through casts that don't require representation changes.
721 case CK_NoOp:
722 case CK_BitCast:
723 case CK_BlockPointerToObjCPointerCast:
724 needsCast = true;
725 break;
726
727 // If we find an l-value to r-value cast from a __weak variable,
728 // emit this operation as a copy or move.
729 case CK_LValueToRValue: {
730 const Expr *srcExpr = castExpr->getSubExpr();
731 if (srcExpr->getType().getObjCLifetime() != Qualifiers::OCL_Weak)
732 return false;
733
734 // Emit the source l-value.
735 LValue srcLV = CGF.EmitLValue(srcExpr);
736
737 // Handle a formal type change to avoid asserting.
738 auto srcAddr = srcLV.getAddress(CGF);
739 if (needsCast) {
740 srcAddr =
741 srcAddr.withElementType(destLV.getAddress(CGF).getElementType());
742 }
743
744 // If it was an l-value, use objc_copyWeak.
745 if (srcExpr->isLValue()) {
746 CGF.EmitARCCopyWeak(destLV.getAddress(CGF), srcAddr);
747 } else {
748 assert(srcExpr->isXValue());
749 CGF.EmitARCMoveWeak(destLV.getAddress(CGF), srcAddr);
750 }
751 return true;
752 }
753
754 // Stop at anything else.
755 default:
756 return false;
757 }
758
759 init = castExpr->getSubExpr();
760 }
761 return false;
762 }
763
drillIntoBlockVariable(CodeGenFunction & CGF,LValue & lvalue,const VarDecl * var)764 static void drillIntoBlockVariable(CodeGenFunction &CGF,
765 LValue &lvalue,
766 const VarDecl *var) {
767 lvalue.setAddress(CGF.emitBlockByrefAddress(lvalue.getAddress(CGF), var));
768 }
769
EmitNullabilityCheck(LValue LHS,llvm::Value * RHS,SourceLocation Loc)770 void CodeGenFunction::EmitNullabilityCheck(LValue LHS, llvm::Value *RHS,
771 SourceLocation Loc) {
772 if (!SanOpts.has(SanitizerKind::NullabilityAssign))
773 return;
774
775 auto Nullability = LHS.getType()->getNullability();
776 if (!Nullability || *Nullability != NullabilityKind::NonNull)
777 return;
778
779 // Check if the right hand side of the assignment is nonnull, if the left
780 // hand side must be nonnull.
781 SanitizerScope SanScope(this);
782 llvm::Value *IsNotNull = Builder.CreateIsNotNull(RHS);
783 llvm::Constant *StaticData[] = {
784 EmitCheckSourceLocation(Loc), EmitCheckTypeDescriptor(LHS.getType()),
785 llvm::ConstantInt::get(Int8Ty, 0), // The LogAlignment info is unused.
786 llvm::ConstantInt::get(Int8Ty, TCK_NonnullAssign)};
787 EmitCheck({{IsNotNull, SanitizerKind::NullabilityAssign}},
788 SanitizerHandler::TypeMismatch, StaticData, RHS);
789 }
790
EmitScalarInit(const Expr * init,const ValueDecl * D,LValue lvalue,bool capturedByInit)791 void CodeGenFunction::EmitScalarInit(const Expr *init, const ValueDecl *D,
792 LValue lvalue, bool capturedByInit) {
793 Qualifiers::ObjCLifetime lifetime = lvalue.getObjCLifetime();
794 if (!lifetime) {
795 llvm::Value *value = EmitScalarExpr(init);
796 if (capturedByInit)
797 drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D));
798 EmitNullabilityCheck(lvalue, value, init->getExprLoc());
799 EmitStoreThroughLValue(RValue::get(value), lvalue, true);
800 return;
801 }
802
803 if (const CXXDefaultInitExpr *DIE = dyn_cast<CXXDefaultInitExpr>(init))
804 init = DIE->getExpr();
805
806 // If we're emitting a value with lifetime, we have to do the
807 // initialization *before* we leave the cleanup scopes.
808 if (auto *EWC = dyn_cast<ExprWithCleanups>(init)) {
809 CodeGenFunction::RunCleanupsScope Scope(*this);
810 return EmitScalarInit(EWC->getSubExpr(), D, lvalue, capturedByInit);
811 }
812
813 // We have to maintain the illusion that the variable is
814 // zero-initialized. If the variable might be accessed in its
815 // initializer, zero-initialize before running the initializer, then
816 // actually perform the initialization with an assign.
817 bool accessedByInit = false;
818 if (lifetime != Qualifiers::OCL_ExplicitNone)
819 accessedByInit = (capturedByInit || isAccessedBy(D, init));
820 if (accessedByInit) {
821 LValue tempLV = lvalue;
822 // Drill down to the __block object if necessary.
823 if (capturedByInit) {
824 // We can use a simple GEP for this because it can't have been
825 // moved yet.
826 tempLV.setAddress(emitBlockByrefAddress(tempLV.getAddress(*this),
827 cast<VarDecl>(D),
828 /*follow*/ false));
829 }
830
831 auto ty =
832 cast<llvm::PointerType>(tempLV.getAddress(*this).getElementType());
833 llvm::Value *zero = CGM.getNullPointer(ty, tempLV.getType());
834
835 // If __weak, we want to use a barrier under certain conditions.
836 if (lifetime == Qualifiers::OCL_Weak)
837 EmitARCInitWeak(tempLV.getAddress(*this), zero);
838
839 // Otherwise just do a simple store.
840 else
841 EmitStoreOfScalar(zero, tempLV, /* isInitialization */ true);
842 }
843
844 // Emit the initializer.
845 llvm::Value *value = nullptr;
846
847 switch (lifetime) {
848 case Qualifiers::OCL_None:
849 llvm_unreachable("present but none");
850
851 case Qualifiers::OCL_Strong: {
852 if (!D || !isa<VarDecl>(D) || !cast<VarDecl>(D)->isARCPseudoStrong()) {
853 value = EmitARCRetainScalarExpr(init);
854 break;
855 }
856 // If D is pseudo-strong, treat it like __unsafe_unretained here. This means
857 // that we omit the retain, and causes non-autoreleased return values to be
858 // immediately released.
859 [[fallthrough]];
860 }
861
862 case Qualifiers::OCL_ExplicitNone:
863 value = EmitARCUnsafeUnretainedScalarExpr(init);
864 break;
865
866 case Qualifiers::OCL_Weak: {
867 // If it's not accessed by the initializer, try to emit the
868 // initialization with a copy or move.
869 if (!accessedByInit && tryEmitARCCopyWeakInit(*this, lvalue, init)) {
870 return;
871 }
872
873 // No way to optimize a producing initializer into this. It's not
874 // worth optimizing for, because the value will immediately
875 // disappear in the common case.
876 value = EmitScalarExpr(init);
877
878 if (capturedByInit) drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D));
879 if (accessedByInit)
880 EmitARCStoreWeak(lvalue.getAddress(*this), value, /*ignored*/ true);
881 else
882 EmitARCInitWeak(lvalue.getAddress(*this), value);
883 return;
884 }
885
886 case Qualifiers::OCL_Autoreleasing:
887 value = EmitARCRetainAutoreleaseScalarExpr(init);
888 break;
889 }
890
891 if (capturedByInit) drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D));
892
893 EmitNullabilityCheck(lvalue, value, init->getExprLoc());
894
895 // If the variable might have been accessed by its initializer, we
896 // might have to initialize with a barrier. We have to do this for
897 // both __weak and __strong, but __weak got filtered out above.
898 if (accessedByInit && lifetime == Qualifiers::OCL_Strong) {
899 llvm::Value *oldValue = EmitLoadOfScalar(lvalue, init->getExprLoc());
900 EmitStoreOfScalar(value, lvalue, /* isInitialization */ true);
901 EmitARCRelease(oldValue, ARCImpreciseLifetime);
902 return;
903 }
904
905 EmitStoreOfScalar(value, lvalue, /* isInitialization */ true);
906 }
907
908 /// Decide whether we can emit the non-zero parts of the specified initializer
909 /// with equal or fewer than NumStores scalar stores.
canEmitInitWithFewStoresAfterBZero(llvm::Constant * Init,unsigned & NumStores)910 static bool canEmitInitWithFewStoresAfterBZero(llvm::Constant *Init,
911 unsigned &NumStores) {
912 // Zero and Undef never requires any extra stores.
913 if (isa<llvm::ConstantAggregateZero>(Init) ||
914 isa<llvm::ConstantPointerNull>(Init) ||
915 isa<llvm::UndefValue>(Init))
916 return true;
917 if (isa<llvm::ConstantInt>(Init) || isa<llvm::ConstantFP>(Init) ||
918 isa<llvm::ConstantVector>(Init) || isa<llvm::BlockAddress>(Init) ||
919 isa<llvm::ConstantExpr>(Init))
920 return Init->isNullValue() || NumStores--;
921
922 // See if we can emit each element.
923 if (isa<llvm::ConstantArray>(Init) || isa<llvm::ConstantStruct>(Init)) {
924 for (unsigned i = 0, e = Init->getNumOperands(); i != e; ++i) {
925 llvm::Constant *Elt = cast<llvm::Constant>(Init->getOperand(i));
926 if (!canEmitInitWithFewStoresAfterBZero(Elt, NumStores))
927 return false;
928 }
929 return true;
930 }
931
932 if (llvm::ConstantDataSequential *CDS =
933 dyn_cast<llvm::ConstantDataSequential>(Init)) {
934 for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
935 llvm::Constant *Elt = CDS->getElementAsConstant(i);
936 if (!canEmitInitWithFewStoresAfterBZero(Elt, NumStores))
937 return false;
938 }
939 return true;
940 }
941
942 // Anything else is hard and scary.
943 return false;
944 }
945
946 /// For inits that canEmitInitWithFewStoresAfterBZero returned true for, emit
947 /// the scalar stores that would be required.
emitStoresForInitAfterBZero(CodeGenModule & CGM,llvm::Constant * Init,Address Loc,bool isVolatile,CGBuilderTy & Builder,bool IsAutoInit)948 static void emitStoresForInitAfterBZero(CodeGenModule &CGM,
949 llvm::Constant *Init, Address Loc,
950 bool isVolatile, CGBuilderTy &Builder,
951 bool IsAutoInit) {
952 assert(!Init->isNullValue() && !isa<llvm::UndefValue>(Init) &&
953 "called emitStoresForInitAfterBZero for zero or undef value.");
954
955 if (isa<llvm::ConstantInt>(Init) || isa<llvm::ConstantFP>(Init) ||
956 isa<llvm::ConstantVector>(Init) || isa<llvm::BlockAddress>(Init) ||
957 isa<llvm::ConstantExpr>(Init)) {
958 auto *I = Builder.CreateStore(Init, Loc, isVolatile);
959 if (IsAutoInit)
960 I->addAnnotationMetadata("auto-init");
961 return;
962 }
963
964 if (llvm::ConstantDataSequential *CDS =
965 dyn_cast<llvm::ConstantDataSequential>(Init)) {
966 for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
967 llvm::Constant *Elt = CDS->getElementAsConstant(i);
968
969 // If necessary, get a pointer to the element and emit it.
970 if (!Elt->isNullValue() && !isa<llvm::UndefValue>(Elt))
971 emitStoresForInitAfterBZero(
972 CGM, Elt, Builder.CreateConstInBoundsGEP2_32(Loc, 0, i), isVolatile,
973 Builder, IsAutoInit);
974 }
975 return;
976 }
977
978 assert((isa<llvm::ConstantStruct>(Init) || isa<llvm::ConstantArray>(Init)) &&
979 "Unknown value type!");
980
981 for (unsigned i = 0, e = Init->getNumOperands(); i != e; ++i) {
982 llvm::Constant *Elt = cast<llvm::Constant>(Init->getOperand(i));
983
984 // If necessary, get a pointer to the element and emit it.
985 if (!Elt->isNullValue() && !isa<llvm::UndefValue>(Elt))
986 emitStoresForInitAfterBZero(CGM, Elt,
987 Builder.CreateConstInBoundsGEP2_32(Loc, 0, i),
988 isVolatile, Builder, IsAutoInit);
989 }
990 }
991
992 /// Decide whether we should use bzero plus some stores to initialize a local
993 /// variable instead of using a memcpy from a constant global. It is beneficial
994 /// to use bzero if the global is all zeros, or mostly zeros and large.
shouldUseBZeroPlusStoresToInitialize(llvm::Constant * Init,uint64_t GlobalSize)995 static bool shouldUseBZeroPlusStoresToInitialize(llvm::Constant *Init,
996 uint64_t GlobalSize) {
997 // If a global is all zeros, always use a bzero.
998 if (isa<llvm::ConstantAggregateZero>(Init)) return true;
999
1000 // If a non-zero global is <= 32 bytes, always use a memcpy. If it is large,
1001 // do it if it will require 6 or fewer scalar stores.
1002 // TODO: Should budget depends on the size? Avoiding a large global warrants
1003 // plopping in more stores.
1004 unsigned StoreBudget = 6;
1005 uint64_t SizeLimit = 32;
1006
1007 return GlobalSize > SizeLimit &&
1008 canEmitInitWithFewStoresAfterBZero(Init, StoreBudget);
1009 }
1010
1011 /// Decide whether we should use memset to initialize a local variable instead
1012 /// of using a memcpy from a constant global. Assumes we've already decided to
1013 /// not user bzero.
1014 /// FIXME We could be more clever, as we are for bzero above, and generate
1015 /// memset followed by stores. It's unclear that's worth the effort.
shouldUseMemSetToInitialize(llvm::Constant * Init,uint64_t GlobalSize,const llvm::DataLayout & DL)1016 static llvm::Value *shouldUseMemSetToInitialize(llvm::Constant *Init,
1017 uint64_t GlobalSize,
1018 const llvm::DataLayout &DL) {
1019 uint64_t SizeLimit = 32;
1020 if (GlobalSize <= SizeLimit)
1021 return nullptr;
1022 return llvm::isBytewiseValue(Init, DL);
1023 }
1024
1025 /// Decide whether we want to split a constant structure or array store into a
1026 /// sequence of its fields' stores. This may cost us code size and compilation
1027 /// speed, but plays better with store optimizations.
shouldSplitConstantStore(CodeGenModule & CGM,uint64_t GlobalByteSize)1028 static bool shouldSplitConstantStore(CodeGenModule &CGM,
1029 uint64_t GlobalByteSize) {
1030 // Don't break things that occupy more than one cacheline.
1031 uint64_t ByteSizeLimit = 64;
1032 if (CGM.getCodeGenOpts().OptimizationLevel == 0)
1033 return false;
1034 if (GlobalByteSize <= ByteSizeLimit)
1035 return true;
1036 return false;
1037 }
1038
1039 enum class IsPattern { No, Yes };
1040
1041 /// Generate a constant filled with either a pattern or zeroes.
patternOrZeroFor(CodeGenModule & CGM,IsPattern isPattern,llvm::Type * Ty)1042 static llvm::Constant *patternOrZeroFor(CodeGenModule &CGM, IsPattern isPattern,
1043 llvm::Type *Ty) {
1044 if (isPattern == IsPattern::Yes)
1045 return initializationPatternFor(CGM, Ty);
1046 else
1047 return llvm::Constant::getNullValue(Ty);
1048 }
1049
1050 static llvm::Constant *constWithPadding(CodeGenModule &CGM, IsPattern isPattern,
1051 llvm::Constant *constant);
1052
1053 /// Helper function for constWithPadding() to deal with padding in structures.
constStructWithPadding(CodeGenModule & CGM,IsPattern isPattern,llvm::StructType * STy,llvm::Constant * constant)1054 static llvm::Constant *constStructWithPadding(CodeGenModule &CGM,
1055 IsPattern isPattern,
1056 llvm::StructType *STy,
1057 llvm::Constant *constant) {
1058 const llvm::DataLayout &DL = CGM.getDataLayout();
1059 const llvm::StructLayout *Layout = DL.getStructLayout(STy);
1060 llvm::Type *Int8Ty = llvm::IntegerType::getInt8Ty(CGM.getLLVMContext());
1061 unsigned SizeSoFar = 0;
1062 SmallVector<llvm::Constant *, 8> Values;
1063 bool NestedIntact = true;
1064 for (unsigned i = 0, e = STy->getNumElements(); i != e; i++) {
1065 unsigned CurOff = Layout->getElementOffset(i);
1066 if (SizeSoFar < CurOff) {
1067 assert(!STy->isPacked());
1068 auto *PadTy = llvm::ArrayType::get(Int8Ty, CurOff - SizeSoFar);
1069 Values.push_back(patternOrZeroFor(CGM, isPattern, PadTy));
1070 }
1071 llvm::Constant *CurOp;
1072 if (constant->isZeroValue())
1073 CurOp = llvm::Constant::getNullValue(STy->getElementType(i));
1074 else
1075 CurOp = cast<llvm::Constant>(constant->getAggregateElement(i));
1076 auto *NewOp = constWithPadding(CGM, isPattern, CurOp);
1077 if (CurOp != NewOp)
1078 NestedIntact = false;
1079 Values.push_back(NewOp);
1080 SizeSoFar = CurOff + DL.getTypeAllocSize(CurOp->getType());
1081 }
1082 unsigned TotalSize = Layout->getSizeInBytes();
1083 if (SizeSoFar < TotalSize) {
1084 auto *PadTy = llvm::ArrayType::get(Int8Ty, TotalSize - SizeSoFar);
1085 Values.push_back(patternOrZeroFor(CGM, isPattern, PadTy));
1086 }
1087 if (NestedIntact && Values.size() == STy->getNumElements())
1088 return constant;
1089 return llvm::ConstantStruct::getAnon(Values, STy->isPacked());
1090 }
1091
1092 /// Replace all padding bytes in a given constant with either a pattern byte or
1093 /// 0x00.
constWithPadding(CodeGenModule & CGM,IsPattern isPattern,llvm::Constant * constant)1094 static llvm::Constant *constWithPadding(CodeGenModule &CGM, IsPattern isPattern,
1095 llvm::Constant *constant) {
1096 llvm::Type *OrigTy = constant->getType();
1097 if (const auto STy = dyn_cast<llvm::StructType>(OrigTy))
1098 return constStructWithPadding(CGM, isPattern, STy, constant);
1099 if (auto *ArrayTy = dyn_cast<llvm::ArrayType>(OrigTy)) {
1100 llvm::SmallVector<llvm::Constant *, 8> Values;
1101 uint64_t Size = ArrayTy->getNumElements();
1102 if (!Size)
1103 return constant;
1104 llvm::Type *ElemTy = ArrayTy->getElementType();
1105 bool ZeroInitializer = constant->isNullValue();
1106 llvm::Constant *OpValue, *PaddedOp;
1107 if (ZeroInitializer) {
1108 OpValue = llvm::Constant::getNullValue(ElemTy);
1109 PaddedOp = constWithPadding(CGM, isPattern, OpValue);
1110 }
1111 for (unsigned Op = 0; Op != Size; ++Op) {
1112 if (!ZeroInitializer) {
1113 OpValue = constant->getAggregateElement(Op);
1114 PaddedOp = constWithPadding(CGM, isPattern, OpValue);
1115 }
1116 Values.push_back(PaddedOp);
1117 }
1118 auto *NewElemTy = Values[0]->getType();
1119 if (NewElemTy == ElemTy)
1120 return constant;
1121 auto *NewArrayTy = llvm::ArrayType::get(NewElemTy, Size);
1122 return llvm::ConstantArray::get(NewArrayTy, Values);
1123 }
1124 // FIXME: Add handling for tail padding in vectors. Vectors don't
1125 // have padding between or inside elements, but the total amount of
1126 // data can be less than the allocated size.
1127 return constant;
1128 }
1129
createUnnamedGlobalFrom(const VarDecl & D,llvm::Constant * Constant,CharUnits Align)1130 Address CodeGenModule::createUnnamedGlobalFrom(const VarDecl &D,
1131 llvm::Constant *Constant,
1132 CharUnits Align) {
1133 auto FunctionName = [&](const DeclContext *DC) -> std::string {
1134 if (const auto *FD = dyn_cast<FunctionDecl>(DC)) {
1135 if (const auto *CC = dyn_cast<CXXConstructorDecl>(FD))
1136 return CC->getNameAsString();
1137 if (const auto *CD = dyn_cast<CXXDestructorDecl>(FD))
1138 return CD->getNameAsString();
1139 return std::string(getMangledName(FD));
1140 } else if (const auto *OM = dyn_cast<ObjCMethodDecl>(DC)) {
1141 return OM->getNameAsString();
1142 } else if (isa<BlockDecl>(DC)) {
1143 return "<block>";
1144 } else if (isa<CapturedDecl>(DC)) {
1145 return "<captured>";
1146 } else {
1147 llvm_unreachable("expected a function or method");
1148 }
1149 };
1150
1151 // Form a simple per-variable cache of these values in case we find we
1152 // want to reuse them.
1153 llvm::GlobalVariable *&CacheEntry = InitializerConstants[&D];
1154 if (!CacheEntry || CacheEntry->getInitializer() != Constant) {
1155 auto *Ty = Constant->getType();
1156 bool isConstant = true;
1157 llvm::GlobalVariable *InsertBefore = nullptr;
1158 unsigned AS =
1159 getContext().getTargetAddressSpace(GetGlobalConstantAddressSpace());
1160 std::string Name;
1161 if (D.hasGlobalStorage())
1162 Name = getMangledName(&D).str() + ".const";
1163 else if (const DeclContext *DC = D.getParentFunctionOrMethod())
1164 Name = ("__const." + FunctionName(DC) + "." + D.getName()).str();
1165 else
1166 llvm_unreachable("local variable has no parent function or method");
1167 llvm::GlobalVariable *GV = new llvm::GlobalVariable(
1168 getModule(), Ty, isConstant, llvm::GlobalValue::PrivateLinkage,
1169 Constant, Name, InsertBefore, llvm::GlobalValue::NotThreadLocal, AS);
1170 GV->setAlignment(Align.getAsAlign());
1171 GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
1172 CacheEntry = GV;
1173 } else if (CacheEntry->getAlignment() < uint64_t(Align.getQuantity())) {
1174 CacheEntry->setAlignment(Align.getAsAlign());
1175 }
1176
1177 return Address(CacheEntry, CacheEntry->getValueType(), Align);
1178 }
1179
createUnnamedGlobalForMemcpyFrom(CodeGenModule & CGM,const VarDecl & D,CGBuilderTy & Builder,llvm::Constant * Constant,CharUnits Align)1180 static Address createUnnamedGlobalForMemcpyFrom(CodeGenModule &CGM,
1181 const VarDecl &D,
1182 CGBuilderTy &Builder,
1183 llvm::Constant *Constant,
1184 CharUnits Align) {
1185 Address SrcPtr = CGM.createUnnamedGlobalFrom(D, Constant, Align);
1186 return SrcPtr.withElementType(CGM.Int8Ty);
1187 }
1188
emitStoresForConstant(CodeGenModule & CGM,const VarDecl & D,Address Loc,bool isVolatile,CGBuilderTy & Builder,llvm::Constant * constant,bool IsAutoInit)1189 static void emitStoresForConstant(CodeGenModule &CGM, const VarDecl &D,
1190 Address Loc, bool isVolatile,
1191 CGBuilderTy &Builder,
1192 llvm::Constant *constant, bool IsAutoInit) {
1193 auto *Ty = constant->getType();
1194 uint64_t ConstantSize = CGM.getDataLayout().getTypeAllocSize(Ty);
1195 if (!ConstantSize)
1196 return;
1197
1198 bool canDoSingleStore = Ty->isIntOrIntVectorTy() ||
1199 Ty->isPtrOrPtrVectorTy() || Ty->isFPOrFPVectorTy();
1200 if (canDoSingleStore) {
1201 auto *I = Builder.CreateStore(constant, Loc, isVolatile);
1202 if (IsAutoInit)
1203 I->addAnnotationMetadata("auto-init");
1204 return;
1205 }
1206
1207 auto *SizeVal = llvm::ConstantInt::get(CGM.IntPtrTy, ConstantSize);
1208
1209 // If the initializer is all or mostly the same, codegen with bzero / memset
1210 // then do a few stores afterward.
1211 if (shouldUseBZeroPlusStoresToInitialize(constant, ConstantSize)) {
1212 auto *I = Builder.CreateMemSet(Loc, llvm::ConstantInt::get(CGM.Int8Ty, 0),
1213 SizeVal, isVolatile);
1214 if (IsAutoInit)
1215 I->addAnnotationMetadata("auto-init");
1216
1217 bool valueAlreadyCorrect =
1218 constant->isNullValue() || isa<llvm::UndefValue>(constant);
1219 if (!valueAlreadyCorrect) {
1220 Loc = Loc.withElementType(Ty);
1221 emitStoresForInitAfterBZero(CGM, constant, Loc, isVolatile, Builder,
1222 IsAutoInit);
1223 }
1224 return;
1225 }
1226
1227 // If the initializer is a repeated byte pattern, use memset.
1228 llvm::Value *Pattern =
1229 shouldUseMemSetToInitialize(constant, ConstantSize, CGM.getDataLayout());
1230 if (Pattern) {
1231 uint64_t Value = 0x00;
1232 if (!isa<llvm::UndefValue>(Pattern)) {
1233 const llvm::APInt &AP = cast<llvm::ConstantInt>(Pattern)->getValue();
1234 assert(AP.getBitWidth() <= 8);
1235 Value = AP.getLimitedValue();
1236 }
1237 auto *I = Builder.CreateMemSet(
1238 Loc, llvm::ConstantInt::get(CGM.Int8Ty, Value), SizeVal, isVolatile);
1239 if (IsAutoInit)
1240 I->addAnnotationMetadata("auto-init");
1241 return;
1242 }
1243
1244 // If the initializer is small or trivialAutoVarInit is set, use a handful of
1245 // stores.
1246 bool IsTrivialAutoVarInitPattern =
1247 CGM.getContext().getLangOpts().getTrivialAutoVarInit() ==
1248 LangOptions::TrivialAutoVarInitKind::Pattern;
1249 if (shouldSplitConstantStore(CGM, ConstantSize)) {
1250 if (auto *STy = dyn_cast<llvm::StructType>(Ty)) {
1251 if (STy == Loc.getElementType() ||
1252 (STy != Loc.getElementType() && IsTrivialAutoVarInitPattern)) {
1253 const llvm::StructLayout *Layout =
1254 CGM.getDataLayout().getStructLayout(STy);
1255 for (unsigned i = 0; i != constant->getNumOperands(); i++) {
1256 CharUnits CurOff =
1257 CharUnits::fromQuantity(Layout->getElementOffset(i));
1258 Address EltPtr = Builder.CreateConstInBoundsByteGEP(
1259 Loc.withElementType(CGM.Int8Ty), CurOff);
1260 emitStoresForConstant(CGM, D, EltPtr, isVolatile, Builder,
1261 constant->getAggregateElement(i), IsAutoInit);
1262 }
1263 return;
1264 }
1265 } else if (auto *ATy = dyn_cast<llvm::ArrayType>(Ty)) {
1266 if (ATy == Loc.getElementType() ||
1267 (ATy != Loc.getElementType() && IsTrivialAutoVarInitPattern)) {
1268 for (unsigned i = 0; i != ATy->getNumElements(); i++) {
1269 Address EltPtr = Builder.CreateConstGEP(
1270 Loc.withElementType(ATy->getElementType()), i);
1271 emitStoresForConstant(CGM, D, EltPtr, isVolatile, Builder,
1272 constant->getAggregateElement(i), IsAutoInit);
1273 }
1274 return;
1275 }
1276 }
1277 }
1278
1279 // Copy from a global.
1280 auto *I =
1281 Builder.CreateMemCpy(Loc,
1282 createUnnamedGlobalForMemcpyFrom(
1283 CGM, D, Builder, constant, Loc.getAlignment()),
1284 SizeVal, isVolatile);
1285 if (IsAutoInit)
1286 I->addAnnotationMetadata("auto-init");
1287 }
1288
emitStoresForZeroInit(CodeGenModule & CGM,const VarDecl & D,Address Loc,bool isVolatile,CGBuilderTy & Builder)1289 static void emitStoresForZeroInit(CodeGenModule &CGM, const VarDecl &D,
1290 Address Loc, bool isVolatile,
1291 CGBuilderTy &Builder) {
1292 llvm::Type *ElTy = Loc.getElementType();
1293 llvm::Constant *constant =
1294 constWithPadding(CGM, IsPattern::No, llvm::Constant::getNullValue(ElTy));
1295 emitStoresForConstant(CGM, D, Loc, isVolatile, Builder, constant,
1296 /*IsAutoInit=*/true);
1297 }
1298
emitStoresForPatternInit(CodeGenModule & CGM,const VarDecl & D,Address Loc,bool isVolatile,CGBuilderTy & Builder)1299 static void emitStoresForPatternInit(CodeGenModule &CGM, const VarDecl &D,
1300 Address Loc, bool isVolatile,
1301 CGBuilderTy &Builder) {
1302 llvm::Type *ElTy = Loc.getElementType();
1303 llvm::Constant *constant = constWithPadding(
1304 CGM, IsPattern::Yes, initializationPatternFor(CGM, ElTy));
1305 assert(!isa<llvm::UndefValue>(constant));
1306 emitStoresForConstant(CGM, D, Loc, isVolatile, Builder, constant,
1307 /*IsAutoInit=*/true);
1308 }
1309
containsUndef(llvm::Constant * constant)1310 static bool containsUndef(llvm::Constant *constant) {
1311 auto *Ty = constant->getType();
1312 if (isa<llvm::UndefValue>(constant))
1313 return true;
1314 if (Ty->isStructTy() || Ty->isArrayTy() || Ty->isVectorTy())
1315 for (llvm::Use &Op : constant->operands())
1316 if (containsUndef(cast<llvm::Constant>(Op)))
1317 return true;
1318 return false;
1319 }
1320
replaceUndef(CodeGenModule & CGM,IsPattern isPattern,llvm::Constant * constant)1321 static llvm::Constant *replaceUndef(CodeGenModule &CGM, IsPattern isPattern,
1322 llvm::Constant *constant) {
1323 auto *Ty = constant->getType();
1324 if (isa<llvm::UndefValue>(constant))
1325 return patternOrZeroFor(CGM, isPattern, Ty);
1326 if (!(Ty->isStructTy() || Ty->isArrayTy() || Ty->isVectorTy()))
1327 return constant;
1328 if (!containsUndef(constant))
1329 return constant;
1330 llvm::SmallVector<llvm::Constant *, 8> Values(constant->getNumOperands());
1331 for (unsigned Op = 0, NumOp = constant->getNumOperands(); Op != NumOp; ++Op) {
1332 auto *OpValue = cast<llvm::Constant>(constant->getOperand(Op));
1333 Values[Op] = replaceUndef(CGM, isPattern, OpValue);
1334 }
1335 if (Ty->isStructTy())
1336 return llvm::ConstantStruct::get(cast<llvm::StructType>(Ty), Values);
1337 if (Ty->isArrayTy())
1338 return llvm::ConstantArray::get(cast<llvm::ArrayType>(Ty), Values);
1339 assert(Ty->isVectorTy());
1340 return llvm::ConstantVector::get(Values);
1341 }
1342
1343 /// EmitAutoVarDecl - Emit code and set up an entry in LocalDeclMap for a
1344 /// variable declaration with auto, register, or no storage class specifier.
1345 /// These turn into simple stack objects, or GlobalValues depending on target.
EmitAutoVarDecl(const VarDecl & D)1346 void CodeGenFunction::EmitAutoVarDecl(const VarDecl &D) {
1347 AutoVarEmission emission = EmitAutoVarAlloca(D);
1348 EmitAutoVarInit(emission);
1349 EmitAutoVarCleanups(emission);
1350 }
1351
1352 /// Emit a lifetime.begin marker if some criteria are satisfied.
1353 /// \return a pointer to the temporary size Value if a marker was emitted, null
1354 /// otherwise
EmitLifetimeStart(llvm::TypeSize Size,llvm::Value * Addr)1355 llvm::Value *CodeGenFunction::EmitLifetimeStart(llvm::TypeSize Size,
1356 llvm::Value *Addr) {
1357 if (!ShouldEmitLifetimeMarkers)
1358 return nullptr;
1359
1360 assert(Addr->getType()->getPointerAddressSpace() ==
1361 CGM.getDataLayout().getAllocaAddrSpace() &&
1362 "Pointer should be in alloca address space");
1363 llvm::Value *SizeV = llvm::ConstantInt::get(
1364 Int64Ty, Size.isScalable() ? -1 : Size.getFixedValue());
1365 llvm::CallInst *C =
1366 Builder.CreateCall(CGM.getLLVMLifetimeStartFn(), {SizeV, Addr});
1367 C->setDoesNotThrow();
1368 return SizeV;
1369 }
1370
EmitLifetimeEnd(llvm::Value * Size,llvm::Value * Addr)1371 void CodeGenFunction::EmitLifetimeEnd(llvm::Value *Size, llvm::Value *Addr) {
1372 assert(Addr->getType()->getPointerAddressSpace() ==
1373 CGM.getDataLayout().getAllocaAddrSpace() &&
1374 "Pointer should be in alloca address space");
1375 llvm::CallInst *C =
1376 Builder.CreateCall(CGM.getLLVMLifetimeEndFn(), {Size, Addr});
1377 C->setDoesNotThrow();
1378 }
1379
EmitAndRegisterVariableArrayDimensions(CGDebugInfo * DI,const VarDecl & D,bool EmitDebugInfo)1380 void CodeGenFunction::EmitAndRegisterVariableArrayDimensions(
1381 CGDebugInfo *DI, const VarDecl &D, bool EmitDebugInfo) {
1382 // For each dimension stores its QualType and corresponding
1383 // size-expression Value.
1384 SmallVector<CodeGenFunction::VlaSizePair, 4> Dimensions;
1385 SmallVector<IdentifierInfo *, 4> VLAExprNames;
1386
1387 // Break down the array into individual dimensions.
1388 QualType Type1D = D.getType();
1389 while (getContext().getAsVariableArrayType(Type1D)) {
1390 auto VlaSize = getVLAElements1D(Type1D);
1391 if (auto *C = dyn_cast<llvm::ConstantInt>(VlaSize.NumElts))
1392 Dimensions.emplace_back(C, Type1D.getUnqualifiedType());
1393 else {
1394 // Generate a locally unique name for the size expression.
1395 Twine Name = Twine("__vla_expr") + Twine(VLAExprCounter++);
1396 SmallString<12> Buffer;
1397 StringRef NameRef = Name.toStringRef(Buffer);
1398 auto &Ident = getContext().Idents.getOwn(NameRef);
1399 VLAExprNames.push_back(&Ident);
1400 auto SizeExprAddr =
1401 CreateDefaultAlignTempAlloca(VlaSize.NumElts->getType(), NameRef);
1402 Builder.CreateStore(VlaSize.NumElts, SizeExprAddr);
1403 Dimensions.emplace_back(SizeExprAddr.getPointer(),
1404 Type1D.getUnqualifiedType());
1405 }
1406 Type1D = VlaSize.Type;
1407 }
1408
1409 if (!EmitDebugInfo)
1410 return;
1411
1412 // Register each dimension's size-expression with a DILocalVariable,
1413 // so that it can be used by CGDebugInfo when instantiating a DISubrange
1414 // to describe this array.
1415 unsigned NameIdx = 0;
1416 for (auto &VlaSize : Dimensions) {
1417 llvm::Metadata *MD;
1418 if (auto *C = dyn_cast<llvm::ConstantInt>(VlaSize.NumElts))
1419 MD = llvm::ConstantAsMetadata::get(C);
1420 else {
1421 // Create an artificial VarDecl to generate debug info for.
1422 IdentifierInfo *NameIdent = VLAExprNames[NameIdx++];
1423 auto QT = getContext().getIntTypeForBitwidth(
1424 SizeTy->getScalarSizeInBits(), false);
1425 auto *ArtificialDecl = VarDecl::Create(
1426 getContext(), const_cast<DeclContext *>(D.getDeclContext()),
1427 D.getLocation(), D.getLocation(), NameIdent, QT,
1428 getContext().CreateTypeSourceInfo(QT), SC_Auto);
1429 ArtificialDecl->setImplicit();
1430
1431 MD = DI->EmitDeclareOfAutoVariable(ArtificialDecl, VlaSize.NumElts,
1432 Builder);
1433 }
1434 assert(MD && "No Size expression debug node created");
1435 DI->registerVLASizeExpression(VlaSize.Type, MD);
1436 }
1437 }
1438
1439 /// EmitAutoVarAlloca - Emit the alloca and debug information for a
1440 /// local variable. Does not emit initialization or destruction.
1441 CodeGenFunction::AutoVarEmission
EmitAutoVarAlloca(const VarDecl & D)1442 CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) {
1443 QualType Ty = D.getType();
1444 assert(
1445 Ty.getAddressSpace() == LangAS::Default ||
1446 (Ty.getAddressSpace() == LangAS::opencl_private && getLangOpts().OpenCL));
1447
1448 AutoVarEmission emission(D);
1449
1450 bool isEscapingByRef = D.isEscapingByref();
1451 emission.IsEscapingByRef = isEscapingByRef;
1452
1453 CharUnits alignment = getContext().getDeclAlign(&D);
1454
1455 // If the type is variably-modified, emit all the VLA sizes for it.
1456 if (Ty->isVariablyModifiedType())
1457 EmitVariablyModifiedType(Ty);
1458
1459 auto *DI = getDebugInfo();
1460 bool EmitDebugInfo = DI && CGM.getCodeGenOpts().hasReducedDebugInfo();
1461
1462 Address address = Address::invalid();
1463 Address AllocaAddr = Address::invalid();
1464 Address OpenMPLocalAddr = Address::invalid();
1465 if (CGM.getLangOpts().OpenMPIRBuilder)
1466 OpenMPLocalAddr = OMPBuilderCBHelpers::getAddressOfLocalVariable(*this, &D);
1467 else
1468 OpenMPLocalAddr =
1469 getLangOpts().OpenMP
1470 ? CGM.getOpenMPRuntime().getAddressOfLocalVariable(*this, &D)
1471 : Address::invalid();
1472
1473 bool NRVO = getLangOpts().ElideConstructors && D.isNRVOVariable();
1474
1475 if (getLangOpts().OpenMP && OpenMPLocalAddr.isValid()) {
1476 address = OpenMPLocalAddr;
1477 AllocaAddr = OpenMPLocalAddr;
1478 } else if (Ty->isConstantSizeType()) {
1479 // If this value is an array or struct with a statically determinable
1480 // constant initializer, there are optimizations we can do.
1481 //
1482 // TODO: We should constant-evaluate the initializer of any variable,
1483 // as long as it is initialized by a constant expression. Currently,
1484 // isConstantInitializer produces wrong answers for structs with
1485 // reference or bitfield members, and a few other cases, and checking
1486 // for POD-ness protects us from some of these.
1487 if (D.getInit() && (Ty->isArrayType() || Ty->isRecordType()) &&
1488 (D.isConstexpr() ||
1489 ((Ty.isPODType(getContext()) ||
1490 getContext().getBaseElementType(Ty)->isObjCObjectPointerType()) &&
1491 D.getInit()->isConstantInitializer(getContext(), false)))) {
1492
1493 // If the variable's a const type, and it's neither an NRVO
1494 // candidate nor a __block variable and has no mutable members,
1495 // emit it as a global instead.
1496 // Exception is if a variable is located in non-constant address space
1497 // in OpenCL.
1498 bool NeedsDtor =
1499 D.needsDestruction(getContext()) == QualType::DK_cxx_destructor;
1500 if ((!getLangOpts().OpenCL ||
1501 Ty.getAddressSpace() == LangAS::opencl_constant) &&
1502 (CGM.getCodeGenOpts().MergeAllConstants && !NRVO &&
1503 !isEscapingByRef &&
1504 Ty.isConstantStorage(getContext(), true, !NeedsDtor))) {
1505 EmitStaticVarDecl(D, llvm::GlobalValue::InternalLinkage);
1506
1507 // Signal this condition to later callbacks.
1508 emission.Addr = Address::invalid();
1509 assert(emission.wasEmittedAsGlobal());
1510 return emission;
1511 }
1512
1513 // Otherwise, tell the initialization code that we're in this case.
1514 emission.IsConstantAggregate = true;
1515 }
1516
1517 // A normal fixed sized variable becomes an alloca in the entry block,
1518 // unless:
1519 // - it's an NRVO variable.
1520 // - we are compiling OpenMP and it's an OpenMP local variable.
1521 if (NRVO) {
1522 // The named return value optimization: allocate this variable in the
1523 // return slot, so that we can elide the copy when returning this
1524 // variable (C++0x [class.copy]p34).
1525 address = ReturnValue;
1526 AllocaAddr = ReturnValue;
1527
1528 if (const RecordType *RecordTy = Ty->getAs<RecordType>()) {
1529 const auto *RD = RecordTy->getDecl();
1530 const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD);
1531 if ((CXXRD && !CXXRD->hasTrivialDestructor()) ||
1532 RD->isNonTrivialToPrimitiveDestroy()) {
1533 // Create a flag that is used to indicate when the NRVO was applied
1534 // to this variable. Set it to zero to indicate that NRVO was not
1535 // applied.
1536 llvm::Value *Zero = Builder.getFalse();
1537 Address NRVOFlag =
1538 CreateTempAlloca(Zero->getType(), CharUnits::One(), "nrvo");
1539 EnsureInsertPoint();
1540 Builder.CreateStore(Zero, NRVOFlag);
1541
1542 // Record the NRVO flag for this variable.
1543 NRVOFlags[&D] = NRVOFlag.getPointer();
1544 emission.NRVOFlag = NRVOFlag.getPointer();
1545 }
1546 }
1547 } else {
1548 CharUnits allocaAlignment;
1549 llvm::Type *allocaTy;
1550 if (isEscapingByRef) {
1551 auto &byrefInfo = getBlockByrefInfo(&D);
1552 allocaTy = byrefInfo.Type;
1553 allocaAlignment = byrefInfo.ByrefAlignment;
1554 } else {
1555 allocaTy = ConvertTypeForMem(Ty);
1556 allocaAlignment = alignment;
1557 }
1558
1559 // Create the alloca. Note that we set the name separately from
1560 // building the instruction so that it's there even in no-asserts
1561 // builds.
1562 address = CreateTempAlloca(allocaTy, allocaAlignment, D.getName(),
1563 /*ArraySize=*/nullptr, &AllocaAddr);
1564
1565 // Don't emit lifetime markers for MSVC catch parameters. The lifetime of
1566 // the catch parameter starts in the catchpad instruction, and we can't
1567 // insert code in those basic blocks.
1568 bool IsMSCatchParam =
1569 D.isExceptionVariable() && getTarget().getCXXABI().isMicrosoft();
1570
1571 // Emit a lifetime intrinsic if meaningful. There's no point in doing this
1572 // if we don't have a valid insertion point (?).
1573 if (HaveInsertPoint() && !IsMSCatchParam) {
1574 // If there's a jump into the lifetime of this variable, its lifetime
1575 // gets broken up into several regions in IR, which requires more work
1576 // to handle correctly. For now, just omit the intrinsics; this is a
1577 // rare case, and it's better to just be conservatively correct.
1578 // PR28267.
1579 //
1580 // We have to do this in all language modes if there's a jump past the
1581 // declaration. We also have to do it in C if there's a jump to an
1582 // earlier point in the current block because non-VLA lifetimes begin as
1583 // soon as the containing block is entered, not when its variables
1584 // actually come into scope; suppressing the lifetime annotations
1585 // completely in this case is unnecessarily pessimistic, but again, this
1586 // is rare.
1587 if (!Bypasses.IsBypassed(&D) &&
1588 !(!getLangOpts().CPlusPlus && hasLabelBeenSeenInCurrentScope())) {
1589 llvm::TypeSize Size = CGM.getDataLayout().getTypeAllocSize(allocaTy);
1590 emission.SizeForLifetimeMarkers =
1591 EmitLifetimeStart(Size, AllocaAddr.getPointer());
1592 }
1593 } else {
1594 assert(!emission.useLifetimeMarkers());
1595 }
1596 }
1597 } else {
1598 EnsureInsertPoint();
1599
1600 // Delayed globalization for variable length declarations. This ensures that
1601 // the expression representing the length has been emitted and can be used
1602 // by the definition of the VLA. Since this is an escaped declaration, in
1603 // OpenMP we have to use a call to __kmpc_alloc_shared(). The matching
1604 // deallocation call to __kmpc_free_shared() is emitted later.
1605 bool VarAllocated = false;
1606 if (getLangOpts().OpenMPIsTargetDevice) {
1607 auto &RT = CGM.getOpenMPRuntime();
1608 if (RT.isDelayedVariableLengthDecl(*this, &D)) {
1609 // Emit call to __kmpc_alloc_shared() instead of the alloca.
1610 std::pair<llvm::Value *, llvm::Value *> AddrSizePair =
1611 RT.getKmpcAllocShared(*this, &D);
1612
1613 // Save the address of the allocation:
1614 LValue Base = MakeAddrLValue(AddrSizePair.first, D.getType(),
1615 CGM.getContext().getDeclAlign(&D),
1616 AlignmentSource::Decl);
1617 address = Base.getAddress(*this);
1618
1619 // Push a cleanup block to emit the call to __kmpc_free_shared in the
1620 // appropriate location at the end of the scope of the
1621 // __kmpc_alloc_shared functions:
1622 pushKmpcAllocFree(NormalCleanup, AddrSizePair);
1623
1624 // Mark variable as allocated:
1625 VarAllocated = true;
1626 }
1627 }
1628
1629 if (!VarAllocated) {
1630 if (!DidCallStackSave) {
1631 // Save the stack.
1632 Address Stack =
1633 CreateDefaultAlignTempAlloca(AllocaInt8PtrTy, "saved_stack");
1634
1635 llvm::Value *V = Builder.CreateStackSave();
1636 assert(V->getType() == AllocaInt8PtrTy);
1637 Builder.CreateStore(V, Stack);
1638
1639 DidCallStackSave = true;
1640
1641 // Push a cleanup block and restore the stack there.
1642 // FIXME: in general circumstances, this should be an EH cleanup.
1643 pushStackRestore(NormalCleanup, Stack);
1644 }
1645
1646 auto VlaSize = getVLASize(Ty);
1647 llvm::Type *llvmTy = ConvertTypeForMem(VlaSize.Type);
1648
1649 // Allocate memory for the array.
1650 address = CreateTempAlloca(llvmTy, alignment, "vla", VlaSize.NumElts,
1651 &AllocaAddr);
1652 }
1653
1654 // If we have debug info enabled, properly describe the VLA dimensions for
1655 // this type by registering the vla size expression for each of the
1656 // dimensions.
1657 EmitAndRegisterVariableArrayDimensions(DI, D, EmitDebugInfo);
1658 }
1659
1660 setAddrOfLocalVar(&D, address);
1661 emission.Addr = address;
1662 emission.AllocaAddr = AllocaAddr;
1663
1664 // Emit debug info for local var declaration.
1665 if (EmitDebugInfo && HaveInsertPoint()) {
1666 Address DebugAddr = address;
1667 bool UsePointerValue = NRVO && ReturnValuePointer.isValid();
1668 DI->setLocation(D.getLocation());
1669
1670 // If NRVO, use a pointer to the return address.
1671 if (UsePointerValue) {
1672 DebugAddr = ReturnValuePointer;
1673 AllocaAddr = ReturnValuePointer;
1674 }
1675 (void)DI->EmitDeclareOfAutoVariable(&D, AllocaAddr.getPointer(), Builder,
1676 UsePointerValue);
1677 }
1678
1679 if (D.hasAttr<AnnotateAttr>() && HaveInsertPoint())
1680 EmitVarAnnotations(&D, address.getPointer());
1681
1682 // Make sure we call @llvm.lifetime.end.
1683 if (emission.useLifetimeMarkers())
1684 EHStack.pushCleanup<CallLifetimeEnd>(NormalEHLifetimeMarker,
1685 emission.getOriginalAllocatedAddress(),
1686 emission.getSizeForLifetimeMarkers());
1687
1688 return emission;
1689 }
1690
1691 static bool isCapturedBy(const VarDecl &, const Expr *);
1692
1693 /// Determines whether the given __block variable is potentially
1694 /// captured by the given statement.
isCapturedBy(const VarDecl & Var,const Stmt * S)1695 static bool isCapturedBy(const VarDecl &Var, const Stmt *S) {
1696 if (const Expr *E = dyn_cast<Expr>(S))
1697 return isCapturedBy(Var, E);
1698 for (const Stmt *SubStmt : S->children())
1699 if (isCapturedBy(Var, SubStmt))
1700 return true;
1701 return false;
1702 }
1703
1704 /// Determines whether the given __block variable is potentially
1705 /// captured by the given expression.
isCapturedBy(const VarDecl & Var,const Expr * E)1706 static bool isCapturedBy(const VarDecl &Var, const Expr *E) {
1707 // Skip the most common kinds of expressions that make
1708 // hierarchy-walking expensive.
1709 E = E->IgnoreParenCasts();
1710
1711 if (const BlockExpr *BE = dyn_cast<BlockExpr>(E)) {
1712 const BlockDecl *Block = BE->getBlockDecl();
1713 for (const auto &I : Block->captures()) {
1714 if (I.getVariable() == &Var)
1715 return true;
1716 }
1717
1718 // No need to walk into the subexpressions.
1719 return false;
1720 }
1721
1722 if (const StmtExpr *SE = dyn_cast<StmtExpr>(E)) {
1723 const CompoundStmt *CS = SE->getSubStmt();
1724 for (const auto *BI : CS->body())
1725 if (const auto *BIE = dyn_cast<Expr>(BI)) {
1726 if (isCapturedBy(Var, BIE))
1727 return true;
1728 }
1729 else if (const auto *DS = dyn_cast<DeclStmt>(BI)) {
1730 // special case declarations
1731 for (const auto *I : DS->decls()) {
1732 if (const auto *VD = dyn_cast<VarDecl>((I))) {
1733 const Expr *Init = VD->getInit();
1734 if (Init && isCapturedBy(Var, Init))
1735 return true;
1736 }
1737 }
1738 }
1739 else
1740 // FIXME. Make safe assumption assuming arbitrary statements cause capturing.
1741 // Later, provide code to poke into statements for capture analysis.
1742 return true;
1743 return false;
1744 }
1745
1746 for (const Stmt *SubStmt : E->children())
1747 if (isCapturedBy(Var, SubStmt))
1748 return true;
1749
1750 return false;
1751 }
1752
1753 /// Determine whether the given initializer is trivial in the sense
1754 /// that it requires no code to be generated.
isTrivialInitializer(const Expr * Init)1755 bool CodeGenFunction::isTrivialInitializer(const Expr *Init) {
1756 if (!Init)
1757 return true;
1758
1759 if (const CXXConstructExpr *Construct = dyn_cast<CXXConstructExpr>(Init))
1760 if (CXXConstructorDecl *Constructor = Construct->getConstructor())
1761 if (Constructor->isTrivial() &&
1762 Constructor->isDefaultConstructor() &&
1763 !Construct->requiresZeroInitialization())
1764 return true;
1765
1766 return false;
1767 }
1768
emitZeroOrPatternForAutoVarInit(QualType type,const VarDecl & D,Address Loc)1769 void CodeGenFunction::emitZeroOrPatternForAutoVarInit(QualType type,
1770 const VarDecl &D,
1771 Address Loc) {
1772 auto trivialAutoVarInit = getContext().getLangOpts().getTrivialAutoVarInit();
1773 auto trivialAutoVarInitMaxSize =
1774 getContext().getLangOpts().TrivialAutoVarInitMaxSize;
1775 CharUnits Size = getContext().getTypeSizeInChars(type);
1776 bool isVolatile = type.isVolatileQualified();
1777 if (!Size.isZero()) {
1778 // We skip auto-init variables by their alloc size. Take this as an example:
1779 // "struct Foo {int x; char buff[1024];}" Assume the max-size flag is 1023.
1780 // All Foo type variables will be skipped. Ideally, we only skip the buff
1781 // array and still auto-init X in this example.
1782 // TODO: Improve the size filtering to by member size.
1783 auto allocSize = CGM.getDataLayout().getTypeAllocSize(Loc.getElementType());
1784 switch (trivialAutoVarInit) {
1785 case LangOptions::TrivialAutoVarInitKind::Uninitialized:
1786 llvm_unreachable("Uninitialized handled by caller");
1787 case LangOptions::TrivialAutoVarInitKind::Zero:
1788 if (CGM.stopAutoInit())
1789 return;
1790 if (trivialAutoVarInitMaxSize > 0 &&
1791 allocSize > trivialAutoVarInitMaxSize)
1792 return;
1793 emitStoresForZeroInit(CGM, D, Loc, isVolatile, Builder);
1794 break;
1795 case LangOptions::TrivialAutoVarInitKind::Pattern:
1796 if (CGM.stopAutoInit())
1797 return;
1798 if (trivialAutoVarInitMaxSize > 0 &&
1799 allocSize > trivialAutoVarInitMaxSize)
1800 return;
1801 emitStoresForPatternInit(CGM, D, Loc, isVolatile, Builder);
1802 break;
1803 }
1804 return;
1805 }
1806
1807 // VLAs look zero-sized to getTypeInfo. We can't emit constant stores to
1808 // them, so emit a memcpy with the VLA size to initialize each element.
1809 // Technically zero-sized or negative-sized VLAs are undefined, and UBSan
1810 // will catch that code, but there exists code which generates zero-sized
1811 // VLAs. Be nice and initialize whatever they requested.
1812 const auto *VlaType = getContext().getAsVariableArrayType(type);
1813 if (!VlaType)
1814 return;
1815 auto VlaSize = getVLASize(VlaType);
1816 auto SizeVal = VlaSize.NumElts;
1817 CharUnits EltSize = getContext().getTypeSizeInChars(VlaSize.Type);
1818 switch (trivialAutoVarInit) {
1819 case LangOptions::TrivialAutoVarInitKind::Uninitialized:
1820 llvm_unreachable("Uninitialized handled by caller");
1821
1822 case LangOptions::TrivialAutoVarInitKind::Zero: {
1823 if (CGM.stopAutoInit())
1824 return;
1825 if (!EltSize.isOne())
1826 SizeVal = Builder.CreateNUWMul(SizeVal, CGM.getSize(EltSize));
1827 auto *I = Builder.CreateMemSet(Loc, llvm::ConstantInt::get(Int8Ty, 0),
1828 SizeVal, isVolatile);
1829 I->addAnnotationMetadata("auto-init");
1830 break;
1831 }
1832
1833 case LangOptions::TrivialAutoVarInitKind::Pattern: {
1834 if (CGM.stopAutoInit())
1835 return;
1836 llvm::Type *ElTy = Loc.getElementType();
1837 llvm::Constant *Constant = constWithPadding(
1838 CGM, IsPattern::Yes, initializationPatternFor(CGM, ElTy));
1839 CharUnits ConstantAlign = getContext().getTypeAlignInChars(VlaSize.Type);
1840 llvm::BasicBlock *SetupBB = createBasicBlock("vla-setup.loop");
1841 llvm::BasicBlock *LoopBB = createBasicBlock("vla-init.loop");
1842 llvm::BasicBlock *ContBB = createBasicBlock("vla-init.cont");
1843 llvm::Value *IsZeroSizedVLA = Builder.CreateICmpEQ(
1844 SizeVal, llvm::ConstantInt::get(SizeVal->getType(), 0),
1845 "vla.iszerosized");
1846 Builder.CreateCondBr(IsZeroSizedVLA, ContBB, SetupBB);
1847 EmitBlock(SetupBB);
1848 if (!EltSize.isOne())
1849 SizeVal = Builder.CreateNUWMul(SizeVal, CGM.getSize(EltSize));
1850 llvm::Value *BaseSizeInChars =
1851 llvm::ConstantInt::get(IntPtrTy, EltSize.getQuantity());
1852 Address Begin = Loc.withElementType(Int8Ty);
1853 llvm::Value *End = Builder.CreateInBoundsGEP(
1854 Begin.getElementType(), Begin.getPointer(), SizeVal, "vla.end");
1855 llvm::BasicBlock *OriginBB = Builder.GetInsertBlock();
1856 EmitBlock(LoopBB);
1857 llvm::PHINode *Cur = Builder.CreatePHI(Begin.getType(), 2, "vla.cur");
1858 Cur->addIncoming(Begin.getPointer(), OriginBB);
1859 CharUnits CurAlign = Loc.getAlignment().alignmentOfArrayElement(EltSize);
1860 auto *I =
1861 Builder.CreateMemCpy(Address(Cur, Int8Ty, CurAlign),
1862 createUnnamedGlobalForMemcpyFrom(
1863 CGM, D, Builder, Constant, ConstantAlign),
1864 BaseSizeInChars, isVolatile);
1865 I->addAnnotationMetadata("auto-init");
1866 llvm::Value *Next =
1867 Builder.CreateInBoundsGEP(Int8Ty, Cur, BaseSizeInChars, "vla.next");
1868 llvm::Value *Done = Builder.CreateICmpEQ(Next, End, "vla-init.isdone");
1869 Builder.CreateCondBr(Done, ContBB, LoopBB);
1870 Cur->addIncoming(Next, LoopBB);
1871 EmitBlock(ContBB);
1872 } break;
1873 }
1874 }
1875
EmitAutoVarInit(const AutoVarEmission & emission)1876 void CodeGenFunction::EmitAutoVarInit(const AutoVarEmission &emission) {
1877 assert(emission.Variable && "emission was not valid!");
1878
1879 // If this was emitted as a global constant, we're done.
1880 if (emission.wasEmittedAsGlobal()) return;
1881
1882 const VarDecl &D = *emission.Variable;
1883 auto DL = ApplyDebugLocation::CreateDefaultArtificial(*this, D.getLocation());
1884 QualType type = D.getType();
1885
1886 // If this local has an initializer, emit it now.
1887 const Expr *Init = D.getInit();
1888
1889 // If we are at an unreachable point, we don't need to emit the initializer
1890 // unless it contains a label.
1891 if (!HaveInsertPoint()) {
1892 if (!Init || !ContainsLabel(Init)) return;
1893 EnsureInsertPoint();
1894 }
1895
1896 // Initialize the structure of a __block variable.
1897 if (emission.IsEscapingByRef)
1898 emitByrefStructureInit(emission);
1899
1900 // Initialize the variable here if it doesn't have a initializer and it is a
1901 // C struct that is non-trivial to initialize or an array containing such a
1902 // struct.
1903 if (!Init &&
1904 type.isNonTrivialToPrimitiveDefaultInitialize() ==
1905 QualType::PDIK_Struct) {
1906 LValue Dst = MakeAddrLValue(emission.getAllocatedAddress(), type);
1907 if (emission.IsEscapingByRef)
1908 drillIntoBlockVariable(*this, Dst, &D);
1909 defaultInitNonTrivialCStructVar(Dst);
1910 return;
1911 }
1912
1913 // Check whether this is a byref variable that's potentially
1914 // captured and moved by its own initializer. If so, we'll need to
1915 // emit the initializer first, then copy into the variable.
1916 bool capturedByInit =
1917 Init && emission.IsEscapingByRef && isCapturedBy(D, Init);
1918
1919 bool locIsByrefHeader = !capturedByInit;
1920 const Address Loc =
1921 locIsByrefHeader ? emission.getObjectAddress(*this) : emission.Addr;
1922
1923 // Note: constexpr already initializes everything correctly.
1924 LangOptions::TrivialAutoVarInitKind trivialAutoVarInit =
1925 (D.isConstexpr()
1926 ? LangOptions::TrivialAutoVarInitKind::Uninitialized
1927 : (D.getAttr<UninitializedAttr>()
1928 ? LangOptions::TrivialAutoVarInitKind::Uninitialized
1929 : getContext().getLangOpts().getTrivialAutoVarInit()));
1930
1931 auto initializeWhatIsTechnicallyUninitialized = [&](Address Loc) {
1932 if (trivialAutoVarInit ==
1933 LangOptions::TrivialAutoVarInitKind::Uninitialized)
1934 return;
1935
1936 // Only initialize a __block's storage: we always initialize the header.
1937 if (emission.IsEscapingByRef && !locIsByrefHeader)
1938 Loc = emitBlockByrefAddress(Loc, &D, /*follow=*/false);
1939
1940 return emitZeroOrPatternForAutoVarInit(type, D, Loc);
1941 };
1942
1943 if (isTrivialInitializer(Init))
1944 return initializeWhatIsTechnicallyUninitialized(Loc);
1945
1946 llvm::Constant *constant = nullptr;
1947 if (emission.IsConstantAggregate ||
1948 D.mightBeUsableInConstantExpressions(getContext())) {
1949 assert(!capturedByInit && "constant init contains a capturing block?");
1950 constant = ConstantEmitter(*this).tryEmitAbstractForInitializer(D);
1951 if (constant && !constant->isZeroValue() &&
1952 (trivialAutoVarInit !=
1953 LangOptions::TrivialAutoVarInitKind::Uninitialized)) {
1954 IsPattern isPattern =
1955 (trivialAutoVarInit == LangOptions::TrivialAutoVarInitKind::Pattern)
1956 ? IsPattern::Yes
1957 : IsPattern::No;
1958 // C guarantees that brace-init with fewer initializers than members in
1959 // the aggregate will initialize the rest of the aggregate as-if it were
1960 // static initialization. In turn static initialization guarantees that
1961 // padding is initialized to zero bits. We could instead pattern-init if D
1962 // has any ImplicitValueInitExpr, but that seems to be unintuitive
1963 // behavior.
1964 constant = constWithPadding(CGM, IsPattern::No,
1965 replaceUndef(CGM, isPattern, constant));
1966 }
1967 }
1968
1969 if (!constant) {
1970 initializeWhatIsTechnicallyUninitialized(Loc);
1971 LValue lv = MakeAddrLValue(Loc, type);
1972 lv.setNonGC(true);
1973 return EmitExprAsInit(Init, &D, lv, capturedByInit);
1974 }
1975
1976 if (!emission.IsConstantAggregate) {
1977 // For simple scalar/complex initialization, store the value directly.
1978 LValue lv = MakeAddrLValue(Loc, type);
1979 lv.setNonGC(true);
1980 return EmitStoreThroughLValue(RValue::get(constant), lv, true);
1981 }
1982
1983 emitStoresForConstant(CGM, D, Loc.withElementType(CGM.Int8Ty),
1984 type.isVolatileQualified(), Builder, constant,
1985 /*IsAutoInit=*/false);
1986 }
1987
1988 /// Emit an expression as an initializer for an object (variable, field, etc.)
1989 /// at the given location. The expression is not necessarily the normal
1990 /// initializer for the object, and the address is not necessarily
1991 /// its normal location.
1992 ///
1993 /// \param init the initializing expression
1994 /// \param D the object to act as if we're initializing
1995 /// \param lvalue the lvalue to initialize
1996 /// \param capturedByInit true if \p D is a __block variable
1997 /// whose address is potentially changed by the initializer
EmitExprAsInit(const Expr * init,const ValueDecl * D,LValue lvalue,bool capturedByInit)1998 void CodeGenFunction::EmitExprAsInit(const Expr *init, const ValueDecl *D,
1999 LValue lvalue, bool capturedByInit) {
2000 QualType type = D->getType();
2001
2002 if (type->isReferenceType()) {
2003 RValue rvalue = EmitReferenceBindingToExpr(init);
2004 if (capturedByInit)
2005 drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D));
2006 EmitStoreThroughLValue(rvalue, lvalue, true);
2007 return;
2008 }
2009 switch (getEvaluationKind(type)) {
2010 case TEK_Scalar:
2011 EmitScalarInit(init, D, lvalue, capturedByInit);
2012 return;
2013 case TEK_Complex: {
2014 ComplexPairTy complex = EmitComplexExpr(init);
2015 if (capturedByInit)
2016 drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D));
2017 EmitStoreOfComplex(complex, lvalue, /*init*/ true);
2018 return;
2019 }
2020 case TEK_Aggregate:
2021 if (type->isAtomicType()) {
2022 EmitAtomicInit(const_cast<Expr*>(init), lvalue);
2023 } else {
2024 AggValueSlot::Overlap_t Overlap = AggValueSlot::MayOverlap;
2025 if (isa<VarDecl>(D))
2026 Overlap = AggValueSlot::DoesNotOverlap;
2027 else if (auto *FD = dyn_cast<FieldDecl>(D))
2028 Overlap = getOverlapForFieldInit(FD);
2029 // TODO: how can we delay here if D is captured by its initializer?
2030 EmitAggExpr(init, AggValueSlot::forLValue(
2031 lvalue, *this, AggValueSlot::IsDestructed,
2032 AggValueSlot::DoesNotNeedGCBarriers,
2033 AggValueSlot::IsNotAliased, Overlap));
2034 }
2035 return;
2036 }
2037 llvm_unreachable("bad evaluation kind");
2038 }
2039
2040 /// Enter a destroy cleanup for the given local variable.
emitAutoVarTypeCleanup(const CodeGenFunction::AutoVarEmission & emission,QualType::DestructionKind dtorKind)2041 void CodeGenFunction::emitAutoVarTypeCleanup(
2042 const CodeGenFunction::AutoVarEmission &emission,
2043 QualType::DestructionKind dtorKind) {
2044 assert(dtorKind != QualType::DK_none);
2045
2046 // Note that for __block variables, we want to destroy the
2047 // original stack object, not the possibly forwarded object.
2048 Address addr = emission.getObjectAddress(*this);
2049
2050 const VarDecl *var = emission.Variable;
2051 QualType type = var->getType();
2052
2053 CleanupKind cleanupKind = NormalAndEHCleanup;
2054 CodeGenFunction::Destroyer *destroyer = nullptr;
2055
2056 switch (dtorKind) {
2057 case QualType::DK_none:
2058 llvm_unreachable("no cleanup for trivially-destructible variable");
2059
2060 case QualType::DK_cxx_destructor:
2061 // If there's an NRVO flag on the emission, we need a different
2062 // cleanup.
2063 if (emission.NRVOFlag) {
2064 assert(!type->isArrayType());
2065 CXXDestructorDecl *dtor = type->getAsCXXRecordDecl()->getDestructor();
2066 EHStack.pushCleanup<DestroyNRVOVariableCXX>(cleanupKind, addr, type, dtor,
2067 emission.NRVOFlag);
2068 return;
2069 }
2070 break;
2071
2072 case QualType::DK_objc_strong_lifetime:
2073 // Suppress cleanups for pseudo-strong variables.
2074 if (var->isARCPseudoStrong()) return;
2075
2076 // Otherwise, consider whether to use an EH cleanup or not.
2077 cleanupKind = getARCCleanupKind();
2078
2079 // Use the imprecise destroyer by default.
2080 if (!var->hasAttr<ObjCPreciseLifetimeAttr>())
2081 destroyer = CodeGenFunction::destroyARCStrongImprecise;
2082 break;
2083
2084 case QualType::DK_objc_weak_lifetime:
2085 break;
2086
2087 case QualType::DK_nontrivial_c_struct:
2088 destroyer = CodeGenFunction::destroyNonTrivialCStruct;
2089 if (emission.NRVOFlag) {
2090 assert(!type->isArrayType());
2091 EHStack.pushCleanup<DestroyNRVOVariableC>(cleanupKind, addr,
2092 emission.NRVOFlag, type);
2093 return;
2094 }
2095 break;
2096 }
2097
2098 // If we haven't chosen a more specific destroyer, use the default.
2099 if (!destroyer) destroyer = getDestroyer(dtorKind);
2100
2101 // Use an EH cleanup in array destructors iff the destructor itself
2102 // is being pushed as an EH cleanup.
2103 bool useEHCleanup = (cleanupKind & EHCleanup);
2104 EHStack.pushCleanup<DestroyObject>(cleanupKind, addr, type, destroyer,
2105 useEHCleanup);
2106 }
2107
EmitAutoVarCleanups(const AutoVarEmission & emission)2108 void CodeGenFunction::EmitAutoVarCleanups(const AutoVarEmission &emission) {
2109 assert(emission.Variable && "emission was not valid!");
2110
2111 // If this was emitted as a global constant, we're done.
2112 if (emission.wasEmittedAsGlobal()) return;
2113
2114 // If we don't have an insertion point, we're done. Sema prevents
2115 // us from jumping into any of these scopes anyway.
2116 if (!HaveInsertPoint()) return;
2117
2118 const VarDecl &D = *emission.Variable;
2119
2120 // Check the type for a cleanup.
2121 if (QualType::DestructionKind dtorKind = D.needsDestruction(getContext()))
2122 emitAutoVarTypeCleanup(emission, dtorKind);
2123
2124 // In GC mode, honor objc_precise_lifetime.
2125 if (getLangOpts().getGC() != LangOptions::NonGC &&
2126 D.hasAttr<ObjCPreciseLifetimeAttr>()) {
2127 EHStack.pushCleanup<ExtendGCLifetime>(NormalCleanup, &D);
2128 }
2129
2130 // Handle the cleanup attribute.
2131 if (const CleanupAttr *CA = D.getAttr<CleanupAttr>()) {
2132 const FunctionDecl *FD = CA->getFunctionDecl();
2133
2134 llvm::Constant *F = CGM.GetAddrOfFunction(FD);
2135 assert(F && "Could not find function!");
2136
2137 const CGFunctionInfo &Info = CGM.getTypes().arrangeFunctionDeclaration(FD);
2138 EHStack.pushCleanup<CallCleanupFunction>(NormalAndEHCleanup, F, &Info, &D);
2139 }
2140
2141 // If this is a block variable, call _Block_object_destroy
2142 // (on the unforwarded address). Don't enter this cleanup if we're in pure-GC
2143 // mode.
2144 if (emission.IsEscapingByRef &&
2145 CGM.getLangOpts().getGC() != LangOptions::GCOnly) {
2146 BlockFieldFlags Flags = BLOCK_FIELD_IS_BYREF;
2147 if (emission.Variable->getType().isObjCGCWeak())
2148 Flags |= BLOCK_FIELD_IS_WEAK;
2149 enterByrefCleanup(NormalAndEHCleanup, emission.Addr, Flags,
2150 /*LoadBlockVarAddr*/ false,
2151 cxxDestructorCanThrow(emission.Variable->getType()));
2152 }
2153 }
2154
2155 CodeGenFunction::Destroyer *
getDestroyer(QualType::DestructionKind kind)2156 CodeGenFunction::getDestroyer(QualType::DestructionKind kind) {
2157 switch (kind) {
2158 case QualType::DK_none: llvm_unreachable("no destroyer for trivial dtor");
2159 case QualType::DK_cxx_destructor:
2160 return destroyCXXObject;
2161 case QualType::DK_objc_strong_lifetime:
2162 return destroyARCStrongPrecise;
2163 case QualType::DK_objc_weak_lifetime:
2164 return destroyARCWeak;
2165 case QualType::DK_nontrivial_c_struct:
2166 return destroyNonTrivialCStruct;
2167 }
2168 llvm_unreachable("Unknown DestructionKind");
2169 }
2170
2171 /// pushEHDestroy - Push the standard destructor for the given type as
2172 /// an EH-only cleanup.
pushEHDestroy(QualType::DestructionKind dtorKind,Address addr,QualType type)2173 void CodeGenFunction::pushEHDestroy(QualType::DestructionKind dtorKind,
2174 Address addr, QualType type) {
2175 assert(dtorKind && "cannot push destructor for trivial type");
2176 assert(needsEHCleanup(dtorKind));
2177
2178 pushDestroy(EHCleanup, addr, type, getDestroyer(dtorKind), true);
2179 }
2180
2181 /// pushDestroy - Push the standard destructor for the given type as
2182 /// at least a normal cleanup.
pushDestroy(QualType::DestructionKind dtorKind,Address addr,QualType type)2183 void CodeGenFunction::pushDestroy(QualType::DestructionKind dtorKind,
2184 Address addr, QualType type) {
2185 assert(dtorKind && "cannot push destructor for trivial type");
2186
2187 CleanupKind cleanupKind = getCleanupKind(dtorKind);
2188 pushDestroy(cleanupKind, addr, type, getDestroyer(dtorKind),
2189 cleanupKind & EHCleanup);
2190 }
2191
pushDestroy(CleanupKind cleanupKind,Address addr,QualType type,Destroyer * destroyer,bool useEHCleanupForArray)2192 void CodeGenFunction::pushDestroy(CleanupKind cleanupKind, Address addr,
2193 QualType type, Destroyer *destroyer,
2194 bool useEHCleanupForArray) {
2195 pushFullExprCleanup<DestroyObject>(cleanupKind, addr, type,
2196 destroyer, useEHCleanupForArray);
2197 }
2198
pushStackRestore(CleanupKind Kind,Address SPMem)2199 void CodeGenFunction::pushStackRestore(CleanupKind Kind, Address SPMem) {
2200 EHStack.pushCleanup<CallStackRestore>(Kind, SPMem);
2201 }
2202
pushKmpcAllocFree(CleanupKind Kind,std::pair<llvm::Value *,llvm::Value * > AddrSizePair)2203 void CodeGenFunction::pushKmpcAllocFree(
2204 CleanupKind Kind, std::pair<llvm::Value *, llvm::Value *> AddrSizePair) {
2205 EHStack.pushCleanup<KmpcAllocFree>(Kind, AddrSizePair);
2206 }
2207
pushLifetimeExtendedDestroy(CleanupKind cleanupKind,Address addr,QualType type,Destroyer * destroyer,bool useEHCleanupForArray)2208 void CodeGenFunction::pushLifetimeExtendedDestroy(CleanupKind cleanupKind,
2209 Address addr, QualType type,
2210 Destroyer *destroyer,
2211 bool useEHCleanupForArray) {
2212 // If we're not in a conditional branch, we don't need to bother generating a
2213 // conditional cleanup.
2214 if (!isInConditionalBranch()) {
2215 // Push an EH-only cleanup for the object now.
2216 // FIXME: When popping normal cleanups, we need to keep this EH cleanup
2217 // around in case a temporary's destructor throws an exception.
2218 if (cleanupKind & EHCleanup)
2219 EHStack.pushCleanup<DestroyObject>(
2220 static_cast<CleanupKind>(cleanupKind & ~NormalCleanup), addr, type,
2221 destroyer, useEHCleanupForArray);
2222
2223 return pushCleanupAfterFullExprWithActiveFlag<DestroyObject>(
2224 cleanupKind, Address::invalid(), addr, type, destroyer, useEHCleanupForArray);
2225 }
2226
2227 // Otherwise, we should only destroy the object if it's been initialized.
2228 // Re-use the active flag and saved address across both the EH and end of
2229 // scope cleanups.
2230
2231 using SavedType = typename DominatingValue<Address>::saved_type;
2232 using ConditionalCleanupType =
2233 EHScopeStack::ConditionalCleanup<DestroyObject, Address, QualType,
2234 Destroyer *, bool>;
2235
2236 Address ActiveFlag = createCleanupActiveFlag();
2237 SavedType SavedAddr = saveValueInCond(addr);
2238
2239 if (cleanupKind & EHCleanup) {
2240 EHStack.pushCleanup<ConditionalCleanupType>(
2241 static_cast<CleanupKind>(cleanupKind & ~NormalCleanup), SavedAddr, type,
2242 destroyer, useEHCleanupForArray);
2243 initFullExprCleanupWithFlag(ActiveFlag);
2244 }
2245
2246 pushCleanupAfterFullExprWithActiveFlag<ConditionalCleanupType>(
2247 cleanupKind, ActiveFlag, SavedAddr, type, destroyer,
2248 useEHCleanupForArray);
2249 }
2250
2251 /// emitDestroy - Immediately perform the destruction of the given
2252 /// object.
2253 ///
2254 /// \param addr - the address of the object; a type*
2255 /// \param type - the type of the object; if an array type, all
2256 /// objects are destroyed in reverse order
2257 /// \param destroyer - the function to call to destroy individual
2258 /// elements
2259 /// \param useEHCleanupForArray - whether an EH cleanup should be
2260 /// used when destroying array elements, in case one of the
2261 /// destructions throws an exception
emitDestroy(Address addr,QualType type,Destroyer * destroyer,bool useEHCleanupForArray)2262 void CodeGenFunction::emitDestroy(Address addr, QualType type,
2263 Destroyer *destroyer,
2264 bool useEHCleanupForArray) {
2265 const ArrayType *arrayType = getContext().getAsArrayType(type);
2266 if (!arrayType)
2267 return destroyer(*this, addr, type);
2268
2269 llvm::Value *length = emitArrayLength(arrayType, type, addr);
2270
2271 CharUnits elementAlign =
2272 addr.getAlignment()
2273 .alignmentOfArrayElement(getContext().getTypeSizeInChars(type));
2274
2275 // Normally we have to check whether the array is zero-length.
2276 bool checkZeroLength = true;
2277
2278 // But if the array length is constant, we can suppress that.
2279 if (llvm::ConstantInt *constLength = dyn_cast<llvm::ConstantInt>(length)) {
2280 // ...and if it's constant zero, we can just skip the entire thing.
2281 if (constLength->isZero()) return;
2282 checkZeroLength = false;
2283 }
2284
2285 llvm::Value *begin = addr.getPointer();
2286 llvm::Value *end =
2287 Builder.CreateInBoundsGEP(addr.getElementType(), begin, length);
2288 emitArrayDestroy(begin, end, type, elementAlign, destroyer,
2289 checkZeroLength, useEHCleanupForArray);
2290 }
2291
2292 /// emitArrayDestroy - Destroys all the elements of the given array,
2293 /// beginning from last to first. The array cannot be zero-length.
2294 ///
2295 /// \param begin - a type* denoting the first element of the array
2296 /// \param end - a type* denoting one past the end of the array
2297 /// \param elementType - the element type of the array
2298 /// \param destroyer - the function to call to destroy elements
2299 /// \param useEHCleanup - whether to push an EH cleanup to destroy
2300 /// the remaining elements in case the destruction of a single
2301 /// element throws
emitArrayDestroy(llvm::Value * begin,llvm::Value * end,QualType elementType,CharUnits elementAlign,Destroyer * destroyer,bool checkZeroLength,bool useEHCleanup)2302 void CodeGenFunction::emitArrayDestroy(llvm::Value *begin,
2303 llvm::Value *end,
2304 QualType elementType,
2305 CharUnits elementAlign,
2306 Destroyer *destroyer,
2307 bool checkZeroLength,
2308 bool useEHCleanup) {
2309 assert(!elementType->isArrayType());
2310
2311 // The basic structure here is a do-while loop, because we don't
2312 // need to check for the zero-element case.
2313 llvm::BasicBlock *bodyBB = createBasicBlock("arraydestroy.body");
2314 llvm::BasicBlock *doneBB = createBasicBlock("arraydestroy.done");
2315
2316 if (checkZeroLength) {
2317 llvm::Value *isEmpty = Builder.CreateICmpEQ(begin, end,
2318 "arraydestroy.isempty");
2319 Builder.CreateCondBr(isEmpty, doneBB, bodyBB);
2320 }
2321
2322 // Enter the loop body, making that address the current address.
2323 llvm::BasicBlock *entryBB = Builder.GetInsertBlock();
2324 EmitBlock(bodyBB);
2325 llvm::PHINode *elementPast =
2326 Builder.CreatePHI(begin->getType(), 2, "arraydestroy.elementPast");
2327 elementPast->addIncoming(end, entryBB);
2328
2329 // Shift the address back by one element.
2330 llvm::Value *negativeOne = llvm::ConstantInt::get(SizeTy, -1, true);
2331 llvm::Type *llvmElementType = ConvertTypeForMem(elementType);
2332 llvm::Value *element = Builder.CreateInBoundsGEP(
2333 llvmElementType, elementPast, negativeOne, "arraydestroy.element");
2334
2335 if (useEHCleanup)
2336 pushRegularPartialArrayCleanup(begin, element, elementType, elementAlign,
2337 destroyer);
2338
2339 // Perform the actual destruction there.
2340 destroyer(*this, Address(element, llvmElementType, elementAlign),
2341 elementType);
2342
2343 if (useEHCleanup)
2344 PopCleanupBlock();
2345
2346 // Check whether we've reached the end.
2347 llvm::Value *done = Builder.CreateICmpEQ(element, begin, "arraydestroy.done");
2348 Builder.CreateCondBr(done, doneBB, bodyBB);
2349 elementPast->addIncoming(element, Builder.GetInsertBlock());
2350
2351 // Done.
2352 EmitBlock(doneBB);
2353 }
2354
2355 /// Perform partial array destruction as if in an EH cleanup. Unlike
2356 /// emitArrayDestroy, the element type here may still be an array type.
emitPartialArrayDestroy(CodeGenFunction & CGF,llvm::Value * begin,llvm::Value * end,QualType type,CharUnits elementAlign,CodeGenFunction::Destroyer * destroyer)2357 static void emitPartialArrayDestroy(CodeGenFunction &CGF,
2358 llvm::Value *begin, llvm::Value *end,
2359 QualType type, CharUnits elementAlign,
2360 CodeGenFunction::Destroyer *destroyer) {
2361 llvm::Type *elemTy = CGF.ConvertTypeForMem(type);
2362
2363 // If the element type is itself an array, drill down.
2364 unsigned arrayDepth = 0;
2365 while (const ArrayType *arrayType = CGF.getContext().getAsArrayType(type)) {
2366 // VLAs don't require a GEP index to walk into.
2367 if (!isa<VariableArrayType>(arrayType))
2368 arrayDepth++;
2369 type = arrayType->getElementType();
2370 }
2371
2372 if (arrayDepth) {
2373 llvm::Value *zero = llvm::ConstantInt::get(CGF.SizeTy, 0);
2374
2375 SmallVector<llvm::Value*,4> gepIndices(arrayDepth+1, zero);
2376 begin = CGF.Builder.CreateInBoundsGEP(
2377 elemTy, begin, gepIndices, "pad.arraybegin");
2378 end = CGF.Builder.CreateInBoundsGEP(
2379 elemTy, end, gepIndices, "pad.arrayend");
2380 }
2381
2382 // Destroy the array. We don't ever need an EH cleanup because we
2383 // assume that we're in an EH cleanup ourselves, so a throwing
2384 // destructor causes an immediate terminate.
2385 CGF.emitArrayDestroy(begin, end, type, elementAlign, destroyer,
2386 /*checkZeroLength*/ true, /*useEHCleanup*/ false);
2387 }
2388
2389 namespace {
2390 /// RegularPartialArrayDestroy - a cleanup which performs a partial
2391 /// array destroy where the end pointer is regularly determined and
2392 /// does not need to be loaded from a local.
2393 class RegularPartialArrayDestroy final : public EHScopeStack::Cleanup {
2394 llvm::Value *ArrayBegin;
2395 llvm::Value *ArrayEnd;
2396 QualType ElementType;
2397 CodeGenFunction::Destroyer *Destroyer;
2398 CharUnits ElementAlign;
2399 public:
RegularPartialArrayDestroy(llvm::Value * arrayBegin,llvm::Value * arrayEnd,QualType elementType,CharUnits elementAlign,CodeGenFunction::Destroyer * destroyer)2400 RegularPartialArrayDestroy(llvm::Value *arrayBegin, llvm::Value *arrayEnd,
2401 QualType elementType, CharUnits elementAlign,
2402 CodeGenFunction::Destroyer *destroyer)
2403 : ArrayBegin(arrayBegin), ArrayEnd(arrayEnd),
2404 ElementType(elementType), Destroyer(destroyer),
2405 ElementAlign(elementAlign) {}
2406
Emit(CodeGenFunction & CGF,Flags flags)2407 void Emit(CodeGenFunction &CGF, Flags flags) override {
2408 emitPartialArrayDestroy(CGF, ArrayBegin, ArrayEnd,
2409 ElementType, ElementAlign, Destroyer);
2410 }
2411 };
2412
2413 /// IrregularPartialArrayDestroy - a cleanup which performs a
2414 /// partial array destroy where the end pointer is irregularly
2415 /// determined and must be loaded from a local.
2416 class IrregularPartialArrayDestroy final : public EHScopeStack::Cleanup {
2417 llvm::Value *ArrayBegin;
2418 Address ArrayEndPointer;
2419 QualType ElementType;
2420 CodeGenFunction::Destroyer *Destroyer;
2421 CharUnits ElementAlign;
2422 public:
IrregularPartialArrayDestroy(llvm::Value * arrayBegin,Address arrayEndPointer,QualType elementType,CharUnits elementAlign,CodeGenFunction::Destroyer * destroyer)2423 IrregularPartialArrayDestroy(llvm::Value *arrayBegin,
2424 Address arrayEndPointer,
2425 QualType elementType,
2426 CharUnits elementAlign,
2427 CodeGenFunction::Destroyer *destroyer)
2428 : ArrayBegin(arrayBegin), ArrayEndPointer(arrayEndPointer),
2429 ElementType(elementType), Destroyer(destroyer),
2430 ElementAlign(elementAlign) {}
2431
Emit(CodeGenFunction & CGF,Flags flags)2432 void Emit(CodeGenFunction &CGF, Flags flags) override {
2433 llvm::Value *arrayEnd = CGF.Builder.CreateLoad(ArrayEndPointer);
2434 emitPartialArrayDestroy(CGF, ArrayBegin, arrayEnd,
2435 ElementType, ElementAlign, Destroyer);
2436 }
2437 };
2438 } // end anonymous namespace
2439
2440 /// pushIrregularPartialArrayCleanup - Push an EH cleanup to destroy
2441 /// already-constructed elements of the given array. The cleanup
2442 /// may be popped with DeactivateCleanupBlock or PopCleanupBlock.
2443 ///
2444 /// \param elementType - the immediate element type of the array;
2445 /// possibly still an array type
pushIrregularPartialArrayCleanup(llvm::Value * arrayBegin,Address arrayEndPointer,QualType elementType,CharUnits elementAlign,Destroyer * destroyer)2446 void CodeGenFunction::pushIrregularPartialArrayCleanup(llvm::Value *arrayBegin,
2447 Address arrayEndPointer,
2448 QualType elementType,
2449 CharUnits elementAlign,
2450 Destroyer *destroyer) {
2451 pushFullExprCleanup<IrregularPartialArrayDestroy>(EHCleanup,
2452 arrayBegin, arrayEndPointer,
2453 elementType, elementAlign,
2454 destroyer);
2455 }
2456
2457 /// pushRegularPartialArrayCleanup - Push an EH cleanup to destroy
2458 /// already-constructed elements of the given array. The cleanup
2459 /// may be popped with DeactivateCleanupBlock or PopCleanupBlock.
2460 ///
2461 /// \param elementType - the immediate element type of the array;
2462 /// possibly still an array type
pushRegularPartialArrayCleanup(llvm::Value * arrayBegin,llvm::Value * arrayEnd,QualType elementType,CharUnits elementAlign,Destroyer * destroyer)2463 void CodeGenFunction::pushRegularPartialArrayCleanup(llvm::Value *arrayBegin,
2464 llvm::Value *arrayEnd,
2465 QualType elementType,
2466 CharUnits elementAlign,
2467 Destroyer *destroyer) {
2468 pushFullExprCleanup<RegularPartialArrayDestroy>(EHCleanup,
2469 arrayBegin, arrayEnd,
2470 elementType, elementAlign,
2471 destroyer);
2472 }
2473
2474 /// Lazily declare the @llvm.lifetime.start intrinsic.
getLLVMLifetimeStartFn()2475 llvm::Function *CodeGenModule::getLLVMLifetimeStartFn() {
2476 if (LifetimeStartFn)
2477 return LifetimeStartFn;
2478 LifetimeStartFn = llvm::Intrinsic::getDeclaration(&getModule(),
2479 llvm::Intrinsic::lifetime_start, AllocaInt8PtrTy);
2480 return LifetimeStartFn;
2481 }
2482
2483 /// Lazily declare the @llvm.lifetime.end intrinsic.
getLLVMLifetimeEndFn()2484 llvm::Function *CodeGenModule::getLLVMLifetimeEndFn() {
2485 if (LifetimeEndFn)
2486 return LifetimeEndFn;
2487 LifetimeEndFn = llvm::Intrinsic::getDeclaration(&getModule(),
2488 llvm::Intrinsic::lifetime_end, AllocaInt8PtrTy);
2489 return LifetimeEndFn;
2490 }
2491
2492 namespace {
2493 /// A cleanup to perform a release of an object at the end of a
2494 /// function. This is used to balance out the incoming +1 of a
2495 /// ns_consumed argument when we can't reasonably do that just by
2496 /// not doing the initial retain for a __block argument.
2497 struct ConsumeARCParameter final : EHScopeStack::Cleanup {
ConsumeARCParameter__anon9560ee680511::ConsumeARCParameter2498 ConsumeARCParameter(llvm::Value *param,
2499 ARCPreciseLifetime_t precise)
2500 : Param(param), Precise(precise) {}
2501
2502 llvm::Value *Param;
2503 ARCPreciseLifetime_t Precise;
2504
Emit__anon9560ee680511::ConsumeARCParameter2505 void Emit(CodeGenFunction &CGF, Flags flags) override {
2506 CGF.EmitARCRelease(Param, Precise);
2507 }
2508 };
2509 } // end anonymous namespace
2510
2511 /// Emit an alloca (or GlobalValue depending on target)
2512 /// for the specified parameter and set up LocalDeclMap.
EmitParmDecl(const VarDecl & D,ParamValue Arg,unsigned ArgNo)2513 void CodeGenFunction::EmitParmDecl(const VarDecl &D, ParamValue Arg,
2514 unsigned ArgNo) {
2515 bool NoDebugInfo = false;
2516 // FIXME: Why isn't ImplicitParamDecl a ParmVarDecl?
2517 assert((isa<ParmVarDecl>(D) || isa<ImplicitParamDecl>(D)) &&
2518 "Invalid argument to EmitParmDecl");
2519
2520 // Set the name of the parameter's initial value to make IR easier to
2521 // read. Don't modify the names of globals.
2522 if (!isa<llvm::GlobalValue>(Arg.getAnyValue()))
2523 Arg.getAnyValue()->setName(D.getName());
2524
2525 QualType Ty = D.getType();
2526
2527 // Use better IR generation for certain implicit parameters.
2528 if (auto IPD = dyn_cast<ImplicitParamDecl>(&D)) {
2529 // The only implicit argument a block has is its literal.
2530 // This may be passed as an inalloca'ed value on Windows x86.
2531 if (BlockInfo) {
2532 llvm::Value *V = Arg.isIndirect()
2533 ? Builder.CreateLoad(Arg.getIndirectAddress())
2534 : Arg.getDirectValue();
2535 setBlockContextParameter(IPD, ArgNo, V);
2536 return;
2537 }
2538 // Suppressing debug info for ThreadPrivateVar parameters, else it hides
2539 // debug info of TLS variables.
2540 NoDebugInfo =
2541 (IPD->getParameterKind() == ImplicitParamKind::ThreadPrivateVar);
2542 }
2543
2544 Address DeclPtr = Address::invalid();
2545 Address AllocaPtr = Address::invalid();
2546 bool DoStore = false;
2547 bool IsScalar = hasScalarEvaluationKind(Ty);
2548 bool UseIndirectDebugAddress = false;
2549
2550 // If we already have a pointer to the argument, reuse the input pointer.
2551 if (Arg.isIndirect()) {
2552 DeclPtr = Arg.getIndirectAddress();
2553 DeclPtr = DeclPtr.withElementType(ConvertTypeForMem(Ty));
2554 // Indirect argument is in alloca address space, which may be different
2555 // from the default address space.
2556 auto AllocaAS = CGM.getASTAllocaAddressSpace();
2557 auto *V = DeclPtr.getPointer();
2558 AllocaPtr = DeclPtr;
2559
2560 // For truly ABI indirect arguments -- those that are not `byval` -- store
2561 // the address of the argument on the stack to preserve debug information.
2562 ABIArgInfo ArgInfo = CurFnInfo->arguments()[ArgNo - 1].info;
2563 if (ArgInfo.isIndirect())
2564 UseIndirectDebugAddress = !ArgInfo.getIndirectByVal();
2565 if (UseIndirectDebugAddress) {
2566 auto PtrTy = getContext().getPointerType(Ty);
2567 AllocaPtr = CreateMemTemp(PtrTy, getContext().getTypeAlignInChars(PtrTy),
2568 D.getName() + ".indirect_addr");
2569 EmitStoreOfScalar(V, AllocaPtr, /* Volatile */ false, PtrTy);
2570 }
2571
2572 auto SrcLangAS = getLangOpts().OpenCL ? LangAS::opencl_private : AllocaAS;
2573 auto DestLangAS =
2574 getLangOpts().OpenCL ? LangAS::opencl_private : LangAS::Default;
2575 if (SrcLangAS != DestLangAS) {
2576 assert(getContext().getTargetAddressSpace(SrcLangAS) ==
2577 CGM.getDataLayout().getAllocaAddrSpace());
2578 auto DestAS = getContext().getTargetAddressSpace(DestLangAS);
2579 auto *T = llvm::PointerType::get(getLLVMContext(), DestAS);
2580 DeclPtr =
2581 DeclPtr.withPointer(getTargetHooks().performAddrSpaceCast(
2582 *this, V, SrcLangAS, DestLangAS, T, true),
2583 DeclPtr.isKnownNonNull());
2584 }
2585
2586 // Push a destructor cleanup for this parameter if the ABI requires it.
2587 // Don't push a cleanup in a thunk for a method that will also emit a
2588 // cleanup.
2589 if (Ty->isRecordType() && !CurFuncIsThunk &&
2590 Ty->castAs<RecordType>()->getDecl()->isParamDestroyedInCallee()) {
2591 if (QualType::DestructionKind DtorKind =
2592 D.needsDestruction(getContext())) {
2593 assert((DtorKind == QualType::DK_cxx_destructor ||
2594 DtorKind == QualType::DK_nontrivial_c_struct) &&
2595 "unexpected destructor type");
2596 pushDestroy(DtorKind, DeclPtr, Ty);
2597 CalleeDestructedParamCleanups[cast<ParmVarDecl>(&D)] =
2598 EHStack.stable_begin();
2599 }
2600 }
2601 } else {
2602 // Check if the parameter address is controlled by OpenMP runtime.
2603 Address OpenMPLocalAddr =
2604 getLangOpts().OpenMP
2605 ? CGM.getOpenMPRuntime().getAddressOfLocalVariable(*this, &D)
2606 : Address::invalid();
2607 if (getLangOpts().OpenMP && OpenMPLocalAddr.isValid()) {
2608 DeclPtr = OpenMPLocalAddr;
2609 AllocaPtr = DeclPtr;
2610 } else {
2611 // Otherwise, create a temporary to hold the value.
2612 DeclPtr = CreateMemTemp(Ty, getContext().getDeclAlign(&D),
2613 D.getName() + ".addr", &AllocaPtr);
2614 }
2615 DoStore = true;
2616 }
2617
2618 llvm::Value *ArgVal = (DoStore ? Arg.getDirectValue() : nullptr);
2619
2620 LValue lv = MakeAddrLValue(DeclPtr, Ty);
2621 if (IsScalar) {
2622 Qualifiers qs = Ty.getQualifiers();
2623 if (Qualifiers::ObjCLifetime lt = qs.getObjCLifetime()) {
2624 // We honor __attribute__((ns_consumed)) for types with lifetime.
2625 // For __strong, it's handled by just skipping the initial retain;
2626 // otherwise we have to balance out the initial +1 with an extra
2627 // cleanup to do the release at the end of the function.
2628 bool isConsumed = D.hasAttr<NSConsumedAttr>();
2629
2630 // If a parameter is pseudo-strong then we can omit the implicit retain.
2631 if (D.isARCPseudoStrong()) {
2632 assert(lt == Qualifiers::OCL_Strong &&
2633 "pseudo-strong variable isn't strong?");
2634 assert(qs.hasConst() && "pseudo-strong variable should be const!");
2635 lt = Qualifiers::OCL_ExplicitNone;
2636 }
2637
2638 // Load objects passed indirectly.
2639 if (Arg.isIndirect() && !ArgVal)
2640 ArgVal = Builder.CreateLoad(DeclPtr);
2641
2642 if (lt == Qualifiers::OCL_Strong) {
2643 if (!isConsumed) {
2644 if (CGM.getCodeGenOpts().OptimizationLevel == 0) {
2645 // use objc_storeStrong(&dest, value) for retaining the
2646 // object. But first, store a null into 'dest' because
2647 // objc_storeStrong attempts to release its old value.
2648 llvm::Value *Null = CGM.EmitNullConstant(D.getType());
2649 EmitStoreOfScalar(Null, lv, /* isInitialization */ true);
2650 EmitARCStoreStrongCall(lv.getAddress(*this), ArgVal, true);
2651 DoStore = false;
2652 }
2653 else
2654 // Don't use objc_retainBlock for block pointers, because we
2655 // don't want to Block_copy something just because we got it
2656 // as a parameter.
2657 ArgVal = EmitARCRetainNonBlock(ArgVal);
2658 }
2659 } else {
2660 // Push the cleanup for a consumed parameter.
2661 if (isConsumed) {
2662 ARCPreciseLifetime_t precise = (D.hasAttr<ObjCPreciseLifetimeAttr>()
2663 ? ARCPreciseLifetime : ARCImpreciseLifetime);
2664 EHStack.pushCleanup<ConsumeARCParameter>(getARCCleanupKind(), ArgVal,
2665 precise);
2666 }
2667
2668 if (lt == Qualifiers::OCL_Weak) {
2669 EmitARCInitWeak(DeclPtr, ArgVal);
2670 DoStore = false; // The weak init is a store, no need to do two.
2671 }
2672 }
2673
2674 // Enter the cleanup scope.
2675 EmitAutoVarWithLifetime(*this, D, DeclPtr, lt);
2676 }
2677 }
2678
2679 // Store the initial value into the alloca.
2680 if (DoStore)
2681 EmitStoreOfScalar(ArgVal, lv, /* isInitialization */ true);
2682
2683 setAddrOfLocalVar(&D, DeclPtr);
2684
2685 // Emit debug info for param declarations in non-thunk functions.
2686 if (CGDebugInfo *DI = getDebugInfo()) {
2687 if (CGM.getCodeGenOpts().hasReducedDebugInfo() && !CurFuncIsThunk &&
2688 !NoDebugInfo) {
2689 llvm::DILocalVariable *DILocalVar = DI->EmitDeclareOfArgVariable(
2690 &D, AllocaPtr.getPointer(), ArgNo, Builder, UseIndirectDebugAddress);
2691 if (const auto *Var = dyn_cast_or_null<ParmVarDecl>(&D))
2692 DI->getParamDbgMappings().insert({Var, DILocalVar});
2693 }
2694 }
2695
2696 if (D.hasAttr<AnnotateAttr>())
2697 EmitVarAnnotations(&D, DeclPtr.getPointer());
2698
2699 // We can only check return value nullability if all arguments to the
2700 // function satisfy their nullability preconditions. This makes it necessary
2701 // to emit null checks for args in the function body itself.
2702 if (requiresReturnValueNullabilityCheck()) {
2703 auto Nullability = Ty->getNullability();
2704 if (Nullability && *Nullability == NullabilityKind::NonNull) {
2705 SanitizerScope SanScope(this);
2706 RetValNullabilityPrecondition =
2707 Builder.CreateAnd(RetValNullabilityPrecondition,
2708 Builder.CreateIsNotNull(Arg.getAnyValue()));
2709 }
2710 }
2711 }
2712
EmitOMPDeclareReduction(const OMPDeclareReductionDecl * D,CodeGenFunction * CGF)2713 void CodeGenModule::EmitOMPDeclareReduction(const OMPDeclareReductionDecl *D,
2714 CodeGenFunction *CGF) {
2715 if (!LangOpts.OpenMP || (!LangOpts.EmitAllDecls && !D->isUsed()))
2716 return;
2717 getOpenMPRuntime().emitUserDefinedReduction(CGF, D);
2718 }
2719
EmitOMPDeclareMapper(const OMPDeclareMapperDecl * D,CodeGenFunction * CGF)2720 void CodeGenModule::EmitOMPDeclareMapper(const OMPDeclareMapperDecl *D,
2721 CodeGenFunction *CGF) {
2722 if (!LangOpts.OpenMP || LangOpts.OpenMPSimd ||
2723 (!LangOpts.EmitAllDecls && !D->isUsed()))
2724 return;
2725 getOpenMPRuntime().emitUserDefinedMapper(D, CGF);
2726 }
2727
EmitOMPRequiresDecl(const OMPRequiresDecl * D)2728 void CodeGenModule::EmitOMPRequiresDecl(const OMPRequiresDecl *D) {
2729 getOpenMPRuntime().processRequiresDirective(D);
2730 }
2731
EmitOMPAllocateDecl(const OMPAllocateDecl * D)2732 void CodeGenModule::EmitOMPAllocateDecl(const OMPAllocateDecl *D) {
2733 for (const Expr *E : D->varlists()) {
2734 const auto *DE = cast<DeclRefExpr>(E);
2735 const auto *VD = cast<VarDecl>(DE->getDecl());
2736
2737 // Skip all but globals.
2738 if (!VD->hasGlobalStorage())
2739 continue;
2740
2741 // Check if the global has been materialized yet or not. If not, we are done
2742 // as any later generation will utilize the OMPAllocateDeclAttr. However, if
2743 // we already emitted the global we might have done so before the
2744 // OMPAllocateDeclAttr was attached, leading to the wrong address space
2745 // (potentially). While not pretty, common practise is to remove the old IR
2746 // global and generate a new one, so we do that here too. Uses are replaced
2747 // properly.
2748 StringRef MangledName = getMangledName(VD);
2749 llvm::GlobalValue *Entry = GetGlobalValue(MangledName);
2750 if (!Entry)
2751 continue;
2752
2753 // We can also keep the existing global if the address space is what we
2754 // expect it to be, if not, it is replaced.
2755 QualType ASTTy = VD->getType();
2756 clang::LangAS GVAS = GetGlobalVarAddressSpace(VD);
2757 auto TargetAS = getContext().getTargetAddressSpace(GVAS);
2758 if (Entry->getType()->getAddressSpace() == TargetAS)
2759 continue;
2760
2761 // Make a new global with the correct type / address space.
2762 llvm::Type *Ty = getTypes().ConvertTypeForMem(ASTTy);
2763 llvm::PointerType *PTy = llvm::PointerType::get(Ty, TargetAS);
2764
2765 // Replace all uses of the old global with a cast. Since we mutate the type
2766 // in place we neeed an intermediate that takes the spot of the old entry
2767 // until we can create the cast.
2768 llvm::GlobalVariable *DummyGV = new llvm::GlobalVariable(
2769 getModule(), Entry->getValueType(), false,
2770 llvm::GlobalValue::CommonLinkage, nullptr, "dummy", nullptr,
2771 llvm::GlobalVariable::NotThreadLocal, Entry->getAddressSpace());
2772 Entry->replaceAllUsesWith(DummyGV);
2773
2774 Entry->mutateType(PTy);
2775 llvm::Constant *NewPtrForOldDecl =
2776 llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(
2777 Entry, DummyGV->getType());
2778
2779 // Now we have a casted version of the changed global, the dummy can be
2780 // replaced and deleted.
2781 DummyGV->replaceAllUsesWith(NewPtrForOldDecl);
2782 DummyGV->eraseFromParent();
2783 }
2784 }
2785
2786 std::optional<CharUnits>
getOMPAllocateAlignment(const VarDecl * VD)2787 CodeGenModule::getOMPAllocateAlignment(const VarDecl *VD) {
2788 if (const auto *AA = VD->getAttr<OMPAllocateDeclAttr>()) {
2789 if (Expr *Alignment = AA->getAlignment()) {
2790 unsigned UserAlign =
2791 Alignment->EvaluateKnownConstInt(getContext()).getExtValue();
2792 CharUnits NaturalAlign =
2793 getNaturalTypeAlignment(VD->getType().getNonReferenceType());
2794
2795 // OpenMP5.1 pg 185 lines 7-10
2796 // Each item in the align modifier list must be aligned to the maximum
2797 // of the specified alignment and the type's natural alignment.
2798 return CharUnits::fromQuantity(
2799 std::max<unsigned>(UserAlign, NaturalAlign.getQuantity()));
2800 }
2801 }
2802 return std::nullopt;
2803 }
2804