1 //===--- CGDecl.cpp - Emit LLVM Code for declarations ---------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This contains code to emit Decl nodes as LLVM code.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #include "CGBlocks.h"
14 #include "CGCXXABI.h"
15 #include "CGCleanup.h"
16 #include "CGDebugInfo.h"
17 #include "CGOpenCLRuntime.h"
18 #include "CGOpenMPRuntime.h"
19 #include "CodeGenFunction.h"
20 #include "CodeGenModule.h"
21 #include "ConstantEmitter.h"
22 #include "PatternInit.h"
23 #include "TargetInfo.h"
24 #include "clang/AST/ASTContext.h"
25 #include "clang/AST/Attr.h"
26 #include "clang/AST/CharUnits.h"
27 #include "clang/AST/Decl.h"
28 #include "clang/AST/DeclObjC.h"
29 #include "clang/AST/DeclOpenMP.h"
30 #include "clang/Basic/CodeGenOptions.h"
31 #include "clang/Basic/SourceManager.h"
32 #include "clang/Basic/TargetInfo.h"
33 #include "clang/CodeGen/CGFunctionInfo.h"
34 #include "clang/Sema/Sema.h"
35 #include "llvm/Analysis/ValueTracking.h"
36 #include "llvm/IR/DataLayout.h"
37 #include "llvm/IR/GlobalVariable.h"
38 #include "llvm/IR/Intrinsics.h"
39 #include "llvm/IR/Type.h"
40
41 using namespace clang;
42 using namespace CodeGen;
43
44 static_assert(clang::Sema::MaximumAlignment <= llvm::Value::MaximumAlignment,
45 "Clang max alignment greater than what LLVM supports?");
46
EmitDecl(const Decl & D)47 void CodeGenFunction::EmitDecl(const Decl &D) {
48 switch (D.getKind()) {
49 case Decl::BuiltinTemplate:
50 case Decl::TranslationUnit:
51 case Decl::ExternCContext:
52 case Decl::Namespace:
53 case Decl::UnresolvedUsingTypename:
54 case Decl::ClassTemplateSpecialization:
55 case Decl::ClassTemplatePartialSpecialization:
56 case Decl::VarTemplateSpecialization:
57 case Decl::VarTemplatePartialSpecialization:
58 case Decl::TemplateTypeParm:
59 case Decl::UnresolvedUsingValue:
60 case Decl::NonTypeTemplateParm:
61 case Decl::CXXDeductionGuide:
62 case Decl::CXXMethod:
63 case Decl::CXXConstructor:
64 case Decl::CXXDestructor:
65 case Decl::CXXConversion:
66 case Decl::Field:
67 case Decl::MSProperty:
68 case Decl::IndirectField:
69 case Decl::ObjCIvar:
70 case Decl::ObjCAtDefsField:
71 case Decl::ParmVar:
72 case Decl::ImplicitParam:
73 case Decl::ClassTemplate:
74 case Decl::VarTemplate:
75 case Decl::FunctionTemplate:
76 case Decl::TypeAliasTemplate:
77 case Decl::TemplateTemplateParm:
78 case Decl::ObjCMethod:
79 case Decl::ObjCCategory:
80 case Decl::ObjCProtocol:
81 case Decl::ObjCInterface:
82 case Decl::ObjCCategoryImpl:
83 case Decl::ObjCImplementation:
84 case Decl::ObjCProperty:
85 case Decl::ObjCCompatibleAlias:
86 case Decl::PragmaComment:
87 case Decl::PragmaDetectMismatch:
88 case Decl::AccessSpec:
89 case Decl::LinkageSpec:
90 case Decl::Export:
91 case Decl::ObjCPropertyImpl:
92 case Decl::FileScopeAsm:
93 case Decl::Friend:
94 case Decl::FriendTemplate:
95 case Decl::Block:
96 case Decl::Captured:
97 case Decl::ClassScopeFunctionSpecialization:
98 case Decl::UsingShadow:
99 case Decl::ConstructorUsingShadow:
100 case Decl::ObjCTypeParam:
101 case Decl::Binding:
102 case Decl::UnresolvedUsingIfExists:
103 llvm_unreachable("Declaration should not be in declstmts!");
104 case Decl::Record: // struct/union/class X;
105 case Decl::CXXRecord: // struct/union/class X; [C++]
106 if (CGDebugInfo *DI = getDebugInfo())
107 if (cast<RecordDecl>(D).getDefinition())
108 DI->EmitAndRetainType(getContext().getRecordType(cast<RecordDecl>(&D)));
109 return;
110 case Decl::Enum: // enum X;
111 if (CGDebugInfo *DI = getDebugInfo())
112 if (cast<EnumDecl>(D).getDefinition())
113 DI->EmitAndRetainType(getContext().getEnumType(cast<EnumDecl>(&D)));
114 return;
115 case Decl::Function: // void X();
116 case Decl::EnumConstant: // enum ? { X = ? }
117 case Decl::StaticAssert: // static_assert(X, ""); [C++0x]
118 case Decl::Label: // __label__ x;
119 case Decl::Import:
120 case Decl::MSGuid: // __declspec(uuid("..."))
121 case Decl::TemplateParamObject:
122 case Decl::OMPThreadPrivate:
123 case Decl::OMPAllocate:
124 case Decl::OMPCapturedExpr:
125 case Decl::OMPRequires:
126 case Decl::Empty:
127 case Decl::Concept:
128 case Decl::LifetimeExtendedTemporary:
129 case Decl::RequiresExprBody:
130 // None of these decls require codegen support.
131 return;
132
133 case Decl::NamespaceAlias:
134 if (CGDebugInfo *DI = getDebugInfo())
135 DI->EmitNamespaceAlias(cast<NamespaceAliasDecl>(D));
136 return;
137 case Decl::Using: // using X; [C++]
138 if (CGDebugInfo *DI = getDebugInfo())
139 DI->EmitUsingDecl(cast<UsingDecl>(D));
140 return;
141 case Decl::UsingEnum: // using enum X; [C++]
142 if (CGDebugInfo *DI = getDebugInfo())
143 DI->EmitUsingEnumDecl(cast<UsingEnumDecl>(D));
144 return;
145 case Decl::UsingPack:
146 for (auto *Using : cast<UsingPackDecl>(D).expansions())
147 EmitDecl(*Using);
148 return;
149 case Decl::UsingDirective: // using namespace X; [C++]
150 if (CGDebugInfo *DI = getDebugInfo())
151 DI->EmitUsingDirective(cast<UsingDirectiveDecl>(D));
152 return;
153 case Decl::Var:
154 case Decl::Decomposition: {
155 const VarDecl &VD = cast<VarDecl>(D);
156 assert(VD.isLocalVarDecl() &&
157 "Should not see file-scope variables inside a function!");
158 EmitVarDecl(VD);
159 if (auto *DD = dyn_cast<DecompositionDecl>(&VD))
160 for (auto *B : DD->bindings())
161 if (auto *HD = B->getHoldingVar())
162 EmitVarDecl(*HD);
163 return;
164 }
165
166 case Decl::OMPDeclareReduction:
167 return CGM.EmitOMPDeclareReduction(cast<OMPDeclareReductionDecl>(&D), this);
168
169 case Decl::OMPDeclareMapper:
170 return CGM.EmitOMPDeclareMapper(cast<OMPDeclareMapperDecl>(&D), this);
171
172 case Decl::Typedef: // typedef int X;
173 case Decl::TypeAlias: { // using X = int; [C++0x]
174 QualType Ty = cast<TypedefNameDecl>(D).getUnderlyingType();
175 if (CGDebugInfo *DI = getDebugInfo())
176 DI->EmitAndRetainType(Ty);
177 if (Ty->isVariablyModifiedType())
178 EmitVariablyModifiedType(Ty);
179 return;
180 }
181 }
182 }
183
184 /// EmitVarDecl - This method handles emission of any variable declaration
185 /// inside a function, including static vars etc.
EmitVarDecl(const VarDecl & D)186 void CodeGenFunction::EmitVarDecl(const VarDecl &D) {
187 if (D.hasExternalStorage())
188 // Don't emit it now, allow it to be emitted lazily on its first use.
189 return;
190
191 // Some function-scope variable does not have static storage but still
192 // needs to be emitted like a static variable, e.g. a function-scope
193 // variable in constant address space in OpenCL.
194 if (D.getStorageDuration() != SD_Automatic) {
195 // Static sampler variables translated to function calls.
196 if (D.getType()->isSamplerT())
197 return;
198
199 llvm::GlobalValue::LinkageTypes Linkage =
200 CGM.getLLVMLinkageVarDefinition(&D, /*IsConstant=*/false);
201
202 // FIXME: We need to force the emission/use of a guard variable for
203 // some variables even if we can constant-evaluate them because
204 // we can't guarantee every translation unit will constant-evaluate them.
205
206 return EmitStaticVarDecl(D, Linkage);
207 }
208
209 if (D.getType().getAddressSpace() == LangAS::opencl_local)
210 return CGM.getOpenCLRuntime().EmitWorkGroupLocalVarDecl(*this, D);
211
212 assert(D.hasLocalStorage());
213 return EmitAutoVarDecl(D);
214 }
215
getStaticDeclName(CodeGenModule & CGM,const VarDecl & D)216 static std::string getStaticDeclName(CodeGenModule &CGM, const VarDecl &D) {
217 if (CGM.getLangOpts().CPlusPlus)
218 return CGM.getMangledName(&D).str();
219
220 // If this isn't C++, we don't need a mangled name, just a pretty one.
221 assert(!D.isExternallyVisible() && "name shouldn't matter");
222 std::string ContextName;
223 const DeclContext *DC = D.getDeclContext();
224 if (auto *CD = dyn_cast<CapturedDecl>(DC))
225 DC = cast<DeclContext>(CD->getNonClosureContext());
226 if (const auto *FD = dyn_cast<FunctionDecl>(DC))
227 ContextName = std::string(CGM.getMangledName(FD));
228 else if (const auto *BD = dyn_cast<BlockDecl>(DC))
229 ContextName = std::string(CGM.getBlockMangledName(GlobalDecl(), BD));
230 else if (const auto *OMD = dyn_cast<ObjCMethodDecl>(DC))
231 ContextName = OMD->getSelector().getAsString();
232 else
233 llvm_unreachable("Unknown context for static var decl");
234
235 ContextName += "." + D.getNameAsString();
236 return ContextName;
237 }
238
getOrCreateStaticVarDecl(const VarDecl & D,llvm::GlobalValue::LinkageTypes Linkage)239 llvm::Constant *CodeGenModule::getOrCreateStaticVarDecl(
240 const VarDecl &D, llvm::GlobalValue::LinkageTypes Linkage) {
241 // In general, we don't always emit static var decls once before we reference
242 // them. It is possible to reference them before emitting the function that
243 // contains them, and it is possible to emit the containing function multiple
244 // times.
245 if (llvm::Constant *ExistingGV = StaticLocalDeclMap[&D])
246 return ExistingGV;
247
248 QualType Ty = D.getType();
249 assert(Ty->isConstantSizeType() && "VLAs can't be static");
250
251 // Use the label if the variable is renamed with the asm-label extension.
252 std::string Name;
253 if (D.hasAttr<AsmLabelAttr>())
254 Name = std::string(getMangledName(&D));
255 else
256 Name = getStaticDeclName(*this, D);
257
258 llvm::Type *LTy = getTypes().ConvertTypeForMem(Ty);
259 LangAS AS = GetGlobalVarAddressSpace(&D);
260 unsigned TargetAS = getContext().getTargetAddressSpace(AS);
261
262 // OpenCL variables in local address space and CUDA shared
263 // variables cannot have an initializer.
264 llvm::Constant *Init = nullptr;
265 if (Ty.getAddressSpace() == LangAS::opencl_local ||
266 D.hasAttr<CUDASharedAttr>() || D.hasAttr<LoaderUninitializedAttr>())
267 Init = llvm::UndefValue::get(LTy);
268 else
269 Init = EmitNullConstant(Ty);
270
271 llvm::GlobalVariable *GV = new llvm::GlobalVariable(
272 getModule(), LTy, Ty.isConstant(getContext()), Linkage, Init, Name,
273 nullptr, llvm::GlobalVariable::NotThreadLocal, TargetAS);
274 GV->setAlignment(getContext().getDeclAlign(&D).getAsAlign());
275
276 if (supportsCOMDAT() && GV->isWeakForLinker())
277 GV->setComdat(TheModule.getOrInsertComdat(GV->getName()));
278
279 if (D.getTLSKind())
280 setTLSMode(GV, D);
281
282 setGVProperties(GV, &D);
283
284 // Make sure the result is of the correct type.
285 LangAS ExpectedAS = Ty.getAddressSpace();
286 llvm::Constant *Addr = GV;
287 if (AS != ExpectedAS) {
288 Addr = getTargetCodeGenInfo().performAddrSpaceCast(
289 *this, GV, AS, ExpectedAS,
290 LTy->getPointerTo(getContext().getTargetAddressSpace(ExpectedAS)));
291 }
292
293 setStaticLocalDeclAddress(&D, Addr);
294
295 // Ensure that the static local gets initialized by making sure the parent
296 // function gets emitted eventually.
297 const Decl *DC = cast<Decl>(D.getDeclContext());
298
299 // We can't name blocks or captured statements directly, so try to emit their
300 // parents.
301 if (isa<BlockDecl>(DC) || isa<CapturedDecl>(DC)) {
302 DC = DC->getNonClosureContext();
303 // FIXME: Ensure that global blocks get emitted.
304 if (!DC)
305 return Addr;
306 }
307
308 GlobalDecl GD;
309 if (const auto *CD = dyn_cast<CXXConstructorDecl>(DC))
310 GD = GlobalDecl(CD, Ctor_Base);
311 else if (const auto *DD = dyn_cast<CXXDestructorDecl>(DC))
312 GD = GlobalDecl(DD, Dtor_Base);
313 else if (const auto *FD = dyn_cast<FunctionDecl>(DC))
314 GD = GlobalDecl(FD);
315 else {
316 // Don't do anything for Obj-C method decls or global closures. We should
317 // never defer them.
318 assert(isa<ObjCMethodDecl>(DC) && "unexpected parent code decl");
319 }
320 if (GD.getDecl()) {
321 // Disable emission of the parent function for the OpenMP device codegen.
322 CGOpenMPRuntime::DisableAutoDeclareTargetRAII NoDeclTarget(*this);
323 (void)GetAddrOfGlobal(GD);
324 }
325
326 return Addr;
327 }
328
329 /// AddInitializerToStaticVarDecl - Add the initializer for 'D' to the
330 /// global variable that has already been created for it. If the initializer
331 /// has a different type than GV does, this may free GV and return a different
332 /// one. Otherwise it just returns GV.
333 llvm::GlobalVariable *
AddInitializerToStaticVarDecl(const VarDecl & D,llvm::GlobalVariable * GV)334 CodeGenFunction::AddInitializerToStaticVarDecl(const VarDecl &D,
335 llvm::GlobalVariable *GV) {
336 ConstantEmitter emitter(*this);
337 llvm::Constant *Init = emitter.tryEmitForInitializer(D);
338
339 // If constant emission failed, then this should be a C++ static
340 // initializer.
341 if (!Init) {
342 if (!getLangOpts().CPlusPlus)
343 CGM.ErrorUnsupported(D.getInit(), "constant l-value expression");
344 else if (HaveInsertPoint()) {
345 // Since we have a static initializer, this global variable can't
346 // be constant.
347 GV->setConstant(false);
348
349 EmitCXXGuardedInit(D, GV, /*PerformInit*/true);
350 }
351 return GV;
352 }
353
354 // The initializer may differ in type from the global. Rewrite
355 // the global to match the initializer. (We have to do this
356 // because some types, like unions, can't be completely represented
357 // in the LLVM type system.)
358 if (GV->getValueType() != Init->getType()) {
359 llvm::GlobalVariable *OldGV = GV;
360
361 GV = new llvm::GlobalVariable(
362 CGM.getModule(), Init->getType(), OldGV->isConstant(),
363 OldGV->getLinkage(), Init, "",
364 /*InsertBefore*/ OldGV, OldGV->getThreadLocalMode(),
365 OldGV->getType()->getPointerAddressSpace());
366 GV->setVisibility(OldGV->getVisibility());
367 GV->setDSOLocal(OldGV->isDSOLocal());
368 GV->setComdat(OldGV->getComdat());
369
370 // Steal the name of the old global
371 GV->takeName(OldGV);
372
373 // Replace all uses of the old global with the new global
374 llvm::Constant *NewPtrForOldDecl =
375 llvm::ConstantExpr::getBitCast(GV, OldGV->getType());
376 OldGV->replaceAllUsesWith(NewPtrForOldDecl);
377
378 // Erase the old global, since it is no longer used.
379 OldGV->eraseFromParent();
380 }
381
382 GV->setConstant(CGM.isTypeConstant(D.getType(), true));
383 GV->setInitializer(Init);
384
385 emitter.finalize(GV);
386
387 if (D.needsDestruction(getContext()) == QualType::DK_cxx_destructor &&
388 HaveInsertPoint()) {
389 // We have a constant initializer, but a nontrivial destructor. We still
390 // need to perform a guarded "initialization" in order to register the
391 // destructor.
392 EmitCXXGuardedInit(D, GV, /*PerformInit*/false);
393 }
394
395 return GV;
396 }
397
EmitStaticVarDecl(const VarDecl & D,llvm::GlobalValue::LinkageTypes Linkage)398 void CodeGenFunction::EmitStaticVarDecl(const VarDecl &D,
399 llvm::GlobalValue::LinkageTypes Linkage) {
400 // Check to see if we already have a global variable for this
401 // declaration. This can happen when double-emitting function
402 // bodies, e.g. with complete and base constructors.
403 llvm::Constant *addr = CGM.getOrCreateStaticVarDecl(D, Linkage);
404 CharUnits alignment = getContext().getDeclAlign(&D);
405
406 // Store into LocalDeclMap before generating initializer to handle
407 // circular references.
408 setAddrOfLocalVar(&D, Address(addr, alignment));
409
410 // We can't have a VLA here, but we can have a pointer to a VLA,
411 // even though that doesn't really make any sense.
412 // Make sure to evaluate VLA bounds now so that we have them for later.
413 if (D.getType()->isVariablyModifiedType())
414 EmitVariablyModifiedType(D.getType());
415
416 // Save the type in case adding the initializer forces a type change.
417 llvm::Type *expectedType = addr->getType();
418
419 llvm::GlobalVariable *var =
420 cast<llvm::GlobalVariable>(addr->stripPointerCasts());
421
422 // CUDA's local and local static __shared__ variables should not
423 // have any non-empty initializers. This is ensured by Sema.
424 // Whatever initializer such variable may have when it gets here is
425 // a no-op and should not be emitted.
426 bool isCudaSharedVar = getLangOpts().CUDA && getLangOpts().CUDAIsDevice &&
427 D.hasAttr<CUDASharedAttr>();
428 // If this value has an initializer, emit it.
429 if (D.getInit() && !isCudaSharedVar)
430 var = AddInitializerToStaticVarDecl(D, var);
431
432 var->setAlignment(alignment.getAsAlign());
433
434 if (D.hasAttr<AnnotateAttr>())
435 CGM.AddGlobalAnnotations(&D, var);
436
437 if (auto *SA = D.getAttr<PragmaClangBSSSectionAttr>())
438 var->addAttribute("bss-section", SA->getName());
439 if (auto *SA = D.getAttr<PragmaClangDataSectionAttr>())
440 var->addAttribute("data-section", SA->getName());
441 if (auto *SA = D.getAttr<PragmaClangRodataSectionAttr>())
442 var->addAttribute("rodata-section", SA->getName());
443 if (auto *SA = D.getAttr<PragmaClangRelroSectionAttr>())
444 var->addAttribute("relro-section", SA->getName());
445
446 if (const SectionAttr *SA = D.getAttr<SectionAttr>())
447 var->setSection(SA->getName());
448
449 if (D.hasAttr<RetainAttr>())
450 CGM.addUsedGlobal(var);
451 else if (D.hasAttr<UsedAttr>())
452 CGM.addUsedOrCompilerUsedGlobal(var);
453
454 // We may have to cast the constant because of the initializer
455 // mismatch above.
456 //
457 // FIXME: It is really dangerous to store this in the map; if anyone
458 // RAUW's the GV uses of this constant will be invalid.
459 llvm::Constant *castedAddr =
460 llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(var, expectedType);
461 if (var != castedAddr)
462 LocalDeclMap.find(&D)->second = Address(castedAddr, alignment);
463 CGM.setStaticLocalDeclAddress(&D, castedAddr);
464
465 CGM.getSanitizerMetadata()->reportGlobalToASan(var, D);
466
467 // Emit global variable debug descriptor for static vars.
468 CGDebugInfo *DI = getDebugInfo();
469 if (DI && CGM.getCodeGenOpts().hasReducedDebugInfo()) {
470 DI->setLocation(D.getLocation());
471 DI->EmitGlobalVariable(var, &D);
472 }
473 }
474
475 namespace {
476 struct DestroyObject final : EHScopeStack::Cleanup {
DestroyObject__anonf2b58dfb0111::DestroyObject477 DestroyObject(Address addr, QualType type,
478 CodeGenFunction::Destroyer *destroyer,
479 bool useEHCleanupForArray)
480 : addr(addr), type(type), destroyer(destroyer),
481 useEHCleanupForArray(useEHCleanupForArray) {}
482
483 Address addr;
484 QualType type;
485 CodeGenFunction::Destroyer *destroyer;
486 bool useEHCleanupForArray;
487
Emit__anonf2b58dfb0111::DestroyObject488 void Emit(CodeGenFunction &CGF, Flags flags) override {
489 // Don't use an EH cleanup recursively from an EH cleanup.
490 bool useEHCleanupForArray =
491 flags.isForNormalCleanup() && this->useEHCleanupForArray;
492
493 CGF.emitDestroy(addr, type, destroyer, useEHCleanupForArray);
494 }
495 };
496
497 template <class Derived>
498 struct DestroyNRVOVariable : EHScopeStack::Cleanup {
DestroyNRVOVariable__anonf2b58dfb0111::DestroyNRVOVariable499 DestroyNRVOVariable(Address addr, QualType type, llvm::Value *NRVOFlag)
500 : NRVOFlag(NRVOFlag), Loc(addr), Ty(type) {}
501
502 llvm::Value *NRVOFlag;
503 Address Loc;
504 QualType Ty;
505
Emit__anonf2b58dfb0111::DestroyNRVOVariable506 void Emit(CodeGenFunction &CGF, Flags flags) override {
507 // Along the exceptions path we always execute the dtor.
508 bool NRVO = flags.isForNormalCleanup() && NRVOFlag;
509
510 llvm::BasicBlock *SkipDtorBB = nullptr;
511 if (NRVO) {
512 // If we exited via NRVO, we skip the destructor call.
513 llvm::BasicBlock *RunDtorBB = CGF.createBasicBlock("nrvo.unused");
514 SkipDtorBB = CGF.createBasicBlock("nrvo.skipdtor");
515 llvm::Value *DidNRVO =
516 CGF.Builder.CreateFlagLoad(NRVOFlag, "nrvo.val");
517 CGF.Builder.CreateCondBr(DidNRVO, SkipDtorBB, RunDtorBB);
518 CGF.EmitBlock(RunDtorBB);
519 }
520
521 static_cast<Derived *>(this)->emitDestructorCall(CGF);
522
523 if (NRVO) CGF.EmitBlock(SkipDtorBB);
524 }
525
526 virtual ~DestroyNRVOVariable() = default;
527 };
528
529 struct DestroyNRVOVariableCXX final
530 : DestroyNRVOVariable<DestroyNRVOVariableCXX> {
DestroyNRVOVariableCXX__anonf2b58dfb0111::DestroyNRVOVariableCXX531 DestroyNRVOVariableCXX(Address addr, QualType type,
532 const CXXDestructorDecl *Dtor, llvm::Value *NRVOFlag)
533 : DestroyNRVOVariable<DestroyNRVOVariableCXX>(addr, type, NRVOFlag),
534 Dtor(Dtor) {}
535
536 const CXXDestructorDecl *Dtor;
537
emitDestructorCall__anonf2b58dfb0111::DestroyNRVOVariableCXX538 void emitDestructorCall(CodeGenFunction &CGF) {
539 CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete,
540 /*ForVirtualBase=*/false,
541 /*Delegating=*/false, Loc, Ty);
542 }
543 };
544
545 struct DestroyNRVOVariableC final
546 : DestroyNRVOVariable<DestroyNRVOVariableC> {
DestroyNRVOVariableC__anonf2b58dfb0111::DestroyNRVOVariableC547 DestroyNRVOVariableC(Address addr, llvm::Value *NRVOFlag, QualType Ty)
548 : DestroyNRVOVariable<DestroyNRVOVariableC>(addr, Ty, NRVOFlag) {}
549
emitDestructorCall__anonf2b58dfb0111::DestroyNRVOVariableC550 void emitDestructorCall(CodeGenFunction &CGF) {
551 CGF.destroyNonTrivialCStruct(CGF, Loc, Ty);
552 }
553 };
554
555 struct CallStackRestore final : EHScopeStack::Cleanup {
556 Address Stack;
CallStackRestore__anonf2b58dfb0111::CallStackRestore557 CallStackRestore(Address Stack) : Stack(Stack) {}
isRedundantBeforeReturn__anonf2b58dfb0111::CallStackRestore558 bool isRedundantBeforeReturn() override { return true; }
Emit__anonf2b58dfb0111::CallStackRestore559 void Emit(CodeGenFunction &CGF, Flags flags) override {
560 llvm::Value *V = CGF.Builder.CreateLoad(Stack);
561 llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stackrestore);
562 CGF.Builder.CreateCall(F, V);
563 }
564 };
565
566 struct ExtendGCLifetime final : EHScopeStack::Cleanup {
567 const VarDecl &Var;
ExtendGCLifetime__anonf2b58dfb0111::ExtendGCLifetime568 ExtendGCLifetime(const VarDecl *var) : Var(*var) {}
569
Emit__anonf2b58dfb0111::ExtendGCLifetime570 void Emit(CodeGenFunction &CGF, Flags flags) override {
571 // Compute the address of the local variable, in case it's a
572 // byref or something.
573 DeclRefExpr DRE(CGF.getContext(), const_cast<VarDecl *>(&Var), false,
574 Var.getType(), VK_LValue, SourceLocation());
575 llvm::Value *value = CGF.EmitLoadOfScalar(CGF.EmitDeclRefLValue(&DRE),
576 SourceLocation());
577 CGF.EmitExtendGCLifetime(value);
578 }
579 };
580
581 struct CallCleanupFunction final : EHScopeStack::Cleanup {
582 llvm::Constant *CleanupFn;
583 const CGFunctionInfo &FnInfo;
584 const VarDecl &Var;
585
CallCleanupFunction__anonf2b58dfb0111::CallCleanupFunction586 CallCleanupFunction(llvm::Constant *CleanupFn, const CGFunctionInfo *Info,
587 const VarDecl *Var)
588 : CleanupFn(CleanupFn), FnInfo(*Info), Var(*Var) {}
589
Emit__anonf2b58dfb0111::CallCleanupFunction590 void Emit(CodeGenFunction &CGF, Flags flags) override {
591 DeclRefExpr DRE(CGF.getContext(), const_cast<VarDecl *>(&Var), false,
592 Var.getType(), VK_LValue, SourceLocation());
593 // Compute the address of the local variable, in case it's a byref
594 // or something.
595 llvm::Value *Addr = CGF.EmitDeclRefLValue(&DRE).getPointer(CGF);
596
597 // In some cases, the type of the function argument will be different from
598 // the type of the pointer. An example of this is
599 // void f(void* arg);
600 // __attribute__((cleanup(f))) void *g;
601 //
602 // To fix this we insert a bitcast here.
603 QualType ArgTy = FnInfo.arg_begin()->type;
604 llvm::Value *Arg =
605 CGF.Builder.CreateBitCast(Addr, CGF.ConvertType(ArgTy));
606
607 CallArgList Args;
608 Args.add(RValue::get(Arg),
609 CGF.getContext().getPointerType(Var.getType()));
610 auto Callee = CGCallee::forDirect(CleanupFn);
611 CGF.EmitCall(FnInfo, Callee, ReturnValueSlot(), Args);
612 }
613 };
614 } // end anonymous namespace
615
616 /// EmitAutoVarWithLifetime - Does the setup required for an automatic
617 /// variable with lifetime.
EmitAutoVarWithLifetime(CodeGenFunction & CGF,const VarDecl & var,Address addr,Qualifiers::ObjCLifetime lifetime)618 static void EmitAutoVarWithLifetime(CodeGenFunction &CGF, const VarDecl &var,
619 Address addr,
620 Qualifiers::ObjCLifetime lifetime) {
621 switch (lifetime) {
622 case Qualifiers::OCL_None:
623 llvm_unreachable("present but none");
624
625 case Qualifiers::OCL_ExplicitNone:
626 // nothing to do
627 break;
628
629 case Qualifiers::OCL_Strong: {
630 CodeGenFunction::Destroyer *destroyer =
631 (var.hasAttr<ObjCPreciseLifetimeAttr>()
632 ? CodeGenFunction::destroyARCStrongPrecise
633 : CodeGenFunction::destroyARCStrongImprecise);
634
635 CleanupKind cleanupKind = CGF.getARCCleanupKind();
636 CGF.pushDestroy(cleanupKind, addr, var.getType(), destroyer,
637 cleanupKind & EHCleanup);
638 break;
639 }
640 case Qualifiers::OCL_Autoreleasing:
641 // nothing to do
642 break;
643
644 case Qualifiers::OCL_Weak:
645 // __weak objects always get EH cleanups; otherwise, exceptions
646 // could cause really nasty crashes instead of mere leaks.
647 CGF.pushDestroy(NormalAndEHCleanup, addr, var.getType(),
648 CodeGenFunction::destroyARCWeak,
649 /*useEHCleanup*/ true);
650 break;
651 }
652 }
653
isAccessedBy(const VarDecl & var,const Stmt * s)654 static bool isAccessedBy(const VarDecl &var, const Stmt *s) {
655 if (const Expr *e = dyn_cast<Expr>(s)) {
656 // Skip the most common kinds of expressions that make
657 // hierarchy-walking expensive.
658 s = e = e->IgnoreParenCasts();
659
660 if (const DeclRefExpr *ref = dyn_cast<DeclRefExpr>(e))
661 return (ref->getDecl() == &var);
662 if (const BlockExpr *be = dyn_cast<BlockExpr>(e)) {
663 const BlockDecl *block = be->getBlockDecl();
664 for (const auto &I : block->captures()) {
665 if (I.getVariable() == &var)
666 return true;
667 }
668 }
669 }
670
671 for (const Stmt *SubStmt : s->children())
672 // SubStmt might be null; as in missing decl or conditional of an if-stmt.
673 if (SubStmt && isAccessedBy(var, SubStmt))
674 return true;
675
676 return false;
677 }
678
isAccessedBy(const ValueDecl * decl,const Expr * e)679 static bool isAccessedBy(const ValueDecl *decl, const Expr *e) {
680 if (!decl) return false;
681 if (!isa<VarDecl>(decl)) return false;
682 const VarDecl *var = cast<VarDecl>(decl);
683 return isAccessedBy(*var, e);
684 }
685
tryEmitARCCopyWeakInit(CodeGenFunction & CGF,const LValue & destLV,const Expr * init)686 static bool tryEmitARCCopyWeakInit(CodeGenFunction &CGF,
687 const LValue &destLV, const Expr *init) {
688 bool needsCast = false;
689
690 while (auto castExpr = dyn_cast<CastExpr>(init->IgnoreParens())) {
691 switch (castExpr->getCastKind()) {
692 // Look through casts that don't require representation changes.
693 case CK_NoOp:
694 case CK_BitCast:
695 case CK_BlockPointerToObjCPointerCast:
696 needsCast = true;
697 break;
698
699 // If we find an l-value to r-value cast from a __weak variable,
700 // emit this operation as a copy or move.
701 case CK_LValueToRValue: {
702 const Expr *srcExpr = castExpr->getSubExpr();
703 if (srcExpr->getType().getObjCLifetime() != Qualifiers::OCL_Weak)
704 return false;
705
706 // Emit the source l-value.
707 LValue srcLV = CGF.EmitLValue(srcExpr);
708
709 // Handle a formal type change to avoid asserting.
710 auto srcAddr = srcLV.getAddress(CGF);
711 if (needsCast) {
712 srcAddr = CGF.Builder.CreateElementBitCast(
713 srcAddr, destLV.getAddress(CGF).getElementType());
714 }
715
716 // If it was an l-value, use objc_copyWeak.
717 if (srcExpr->isLValue()) {
718 CGF.EmitARCCopyWeak(destLV.getAddress(CGF), srcAddr);
719 } else {
720 assert(srcExpr->isXValue());
721 CGF.EmitARCMoveWeak(destLV.getAddress(CGF), srcAddr);
722 }
723 return true;
724 }
725
726 // Stop at anything else.
727 default:
728 return false;
729 }
730
731 init = castExpr->getSubExpr();
732 }
733 return false;
734 }
735
drillIntoBlockVariable(CodeGenFunction & CGF,LValue & lvalue,const VarDecl * var)736 static void drillIntoBlockVariable(CodeGenFunction &CGF,
737 LValue &lvalue,
738 const VarDecl *var) {
739 lvalue.setAddress(CGF.emitBlockByrefAddress(lvalue.getAddress(CGF), var));
740 }
741
EmitNullabilityCheck(LValue LHS,llvm::Value * RHS,SourceLocation Loc)742 void CodeGenFunction::EmitNullabilityCheck(LValue LHS, llvm::Value *RHS,
743 SourceLocation Loc) {
744 if (!SanOpts.has(SanitizerKind::NullabilityAssign))
745 return;
746
747 auto Nullability = LHS.getType()->getNullability(getContext());
748 if (!Nullability || *Nullability != NullabilityKind::NonNull)
749 return;
750
751 // Check if the right hand side of the assignment is nonnull, if the left
752 // hand side must be nonnull.
753 SanitizerScope SanScope(this);
754 llvm::Value *IsNotNull = Builder.CreateIsNotNull(RHS);
755 llvm::Constant *StaticData[] = {
756 EmitCheckSourceLocation(Loc), EmitCheckTypeDescriptor(LHS.getType()),
757 llvm::ConstantInt::get(Int8Ty, 0), // The LogAlignment info is unused.
758 llvm::ConstantInt::get(Int8Ty, TCK_NonnullAssign)};
759 EmitCheck({{IsNotNull, SanitizerKind::NullabilityAssign}},
760 SanitizerHandler::TypeMismatch, StaticData, RHS);
761 }
762
EmitScalarInit(const Expr * init,const ValueDecl * D,LValue lvalue,bool capturedByInit)763 void CodeGenFunction::EmitScalarInit(const Expr *init, const ValueDecl *D,
764 LValue lvalue, bool capturedByInit) {
765 Qualifiers::ObjCLifetime lifetime = lvalue.getObjCLifetime();
766 if (!lifetime) {
767 llvm::Value *value = EmitScalarExpr(init);
768 if (capturedByInit)
769 drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D));
770 EmitNullabilityCheck(lvalue, value, init->getExprLoc());
771 EmitStoreThroughLValue(RValue::get(value), lvalue, true);
772 return;
773 }
774
775 if (const CXXDefaultInitExpr *DIE = dyn_cast<CXXDefaultInitExpr>(init))
776 init = DIE->getExpr();
777
778 // If we're emitting a value with lifetime, we have to do the
779 // initialization *before* we leave the cleanup scopes.
780 if (auto *EWC = dyn_cast<ExprWithCleanups>(init)) {
781 CodeGenFunction::RunCleanupsScope Scope(*this);
782 return EmitScalarInit(EWC->getSubExpr(), D, lvalue, capturedByInit);
783 }
784
785 // We have to maintain the illusion that the variable is
786 // zero-initialized. If the variable might be accessed in its
787 // initializer, zero-initialize before running the initializer, then
788 // actually perform the initialization with an assign.
789 bool accessedByInit = false;
790 if (lifetime != Qualifiers::OCL_ExplicitNone)
791 accessedByInit = (capturedByInit || isAccessedBy(D, init));
792 if (accessedByInit) {
793 LValue tempLV = lvalue;
794 // Drill down to the __block object if necessary.
795 if (capturedByInit) {
796 // We can use a simple GEP for this because it can't have been
797 // moved yet.
798 tempLV.setAddress(emitBlockByrefAddress(tempLV.getAddress(*this),
799 cast<VarDecl>(D),
800 /*follow*/ false));
801 }
802
803 auto ty =
804 cast<llvm::PointerType>(tempLV.getAddress(*this).getElementType());
805 llvm::Value *zero = CGM.getNullPointer(ty, tempLV.getType());
806
807 // If __weak, we want to use a barrier under certain conditions.
808 if (lifetime == Qualifiers::OCL_Weak)
809 EmitARCInitWeak(tempLV.getAddress(*this), zero);
810
811 // Otherwise just do a simple store.
812 else
813 EmitStoreOfScalar(zero, tempLV, /* isInitialization */ true);
814 }
815
816 // Emit the initializer.
817 llvm::Value *value = nullptr;
818
819 switch (lifetime) {
820 case Qualifiers::OCL_None:
821 llvm_unreachable("present but none");
822
823 case Qualifiers::OCL_Strong: {
824 if (!D || !isa<VarDecl>(D) || !cast<VarDecl>(D)->isARCPseudoStrong()) {
825 value = EmitARCRetainScalarExpr(init);
826 break;
827 }
828 // If D is pseudo-strong, treat it like __unsafe_unretained here. This means
829 // that we omit the retain, and causes non-autoreleased return values to be
830 // immediately released.
831 LLVM_FALLTHROUGH;
832 }
833
834 case Qualifiers::OCL_ExplicitNone:
835 value = EmitARCUnsafeUnretainedScalarExpr(init);
836 break;
837
838 case Qualifiers::OCL_Weak: {
839 // If it's not accessed by the initializer, try to emit the
840 // initialization with a copy or move.
841 if (!accessedByInit && tryEmitARCCopyWeakInit(*this, lvalue, init)) {
842 return;
843 }
844
845 // No way to optimize a producing initializer into this. It's not
846 // worth optimizing for, because the value will immediately
847 // disappear in the common case.
848 value = EmitScalarExpr(init);
849
850 if (capturedByInit) drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D));
851 if (accessedByInit)
852 EmitARCStoreWeak(lvalue.getAddress(*this), value, /*ignored*/ true);
853 else
854 EmitARCInitWeak(lvalue.getAddress(*this), value);
855 return;
856 }
857
858 case Qualifiers::OCL_Autoreleasing:
859 value = EmitARCRetainAutoreleaseScalarExpr(init);
860 break;
861 }
862
863 if (capturedByInit) drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D));
864
865 EmitNullabilityCheck(lvalue, value, init->getExprLoc());
866
867 // If the variable might have been accessed by its initializer, we
868 // might have to initialize with a barrier. We have to do this for
869 // both __weak and __strong, but __weak got filtered out above.
870 if (accessedByInit && lifetime == Qualifiers::OCL_Strong) {
871 llvm::Value *oldValue = EmitLoadOfScalar(lvalue, init->getExprLoc());
872 EmitStoreOfScalar(value, lvalue, /* isInitialization */ true);
873 EmitARCRelease(oldValue, ARCImpreciseLifetime);
874 return;
875 }
876
877 EmitStoreOfScalar(value, lvalue, /* isInitialization */ true);
878 }
879
880 /// Decide whether we can emit the non-zero parts of the specified initializer
881 /// with equal or fewer than NumStores scalar stores.
canEmitInitWithFewStoresAfterBZero(llvm::Constant * Init,unsigned & NumStores)882 static bool canEmitInitWithFewStoresAfterBZero(llvm::Constant *Init,
883 unsigned &NumStores) {
884 // Zero and Undef never requires any extra stores.
885 if (isa<llvm::ConstantAggregateZero>(Init) ||
886 isa<llvm::ConstantPointerNull>(Init) ||
887 isa<llvm::UndefValue>(Init))
888 return true;
889 if (isa<llvm::ConstantInt>(Init) || isa<llvm::ConstantFP>(Init) ||
890 isa<llvm::ConstantVector>(Init) || isa<llvm::BlockAddress>(Init) ||
891 isa<llvm::ConstantExpr>(Init))
892 return Init->isNullValue() || NumStores--;
893
894 // See if we can emit each element.
895 if (isa<llvm::ConstantArray>(Init) || isa<llvm::ConstantStruct>(Init)) {
896 for (unsigned i = 0, e = Init->getNumOperands(); i != e; ++i) {
897 llvm::Constant *Elt = cast<llvm::Constant>(Init->getOperand(i));
898 if (!canEmitInitWithFewStoresAfterBZero(Elt, NumStores))
899 return false;
900 }
901 return true;
902 }
903
904 if (llvm::ConstantDataSequential *CDS =
905 dyn_cast<llvm::ConstantDataSequential>(Init)) {
906 for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
907 llvm::Constant *Elt = CDS->getElementAsConstant(i);
908 if (!canEmitInitWithFewStoresAfterBZero(Elt, NumStores))
909 return false;
910 }
911 return true;
912 }
913
914 // Anything else is hard and scary.
915 return false;
916 }
917
918 /// For inits that canEmitInitWithFewStoresAfterBZero returned true for, emit
919 /// the scalar stores that would be required.
emitStoresForInitAfterBZero(CodeGenModule & CGM,llvm::Constant * Init,Address Loc,bool isVolatile,CGBuilderTy & Builder,bool IsAutoInit)920 static void emitStoresForInitAfterBZero(CodeGenModule &CGM,
921 llvm::Constant *Init, Address Loc,
922 bool isVolatile, CGBuilderTy &Builder,
923 bool IsAutoInit) {
924 assert(!Init->isNullValue() && !isa<llvm::UndefValue>(Init) &&
925 "called emitStoresForInitAfterBZero for zero or undef value.");
926
927 if (isa<llvm::ConstantInt>(Init) || isa<llvm::ConstantFP>(Init) ||
928 isa<llvm::ConstantVector>(Init) || isa<llvm::BlockAddress>(Init) ||
929 isa<llvm::ConstantExpr>(Init)) {
930 auto *I = Builder.CreateStore(Init, Loc, isVolatile);
931 if (IsAutoInit)
932 I->addAnnotationMetadata("auto-init");
933 return;
934 }
935
936 if (llvm::ConstantDataSequential *CDS =
937 dyn_cast<llvm::ConstantDataSequential>(Init)) {
938 for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
939 llvm::Constant *Elt = CDS->getElementAsConstant(i);
940
941 // If necessary, get a pointer to the element and emit it.
942 if (!Elt->isNullValue() && !isa<llvm::UndefValue>(Elt))
943 emitStoresForInitAfterBZero(
944 CGM, Elt, Builder.CreateConstInBoundsGEP2_32(Loc, 0, i), isVolatile,
945 Builder, IsAutoInit);
946 }
947 return;
948 }
949
950 assert((isa<llvm::ConstantStruct>(Init) || isa<llvm::ConstantArray>(Init)) &&
951 "Unknown value type!");
952
953 for (unsigned i = 0, e = Init->getNumOperands(); i != e; ++i) {
954 llvm::Constant *Elt = cast<llvm::Constant>(Init->getOperand(i));
955
956 // If necessary, get a pointer to the element and emit it.
957 if (!Elt->isNullValue() && !isa<llvm::UndefValue>(Elt))
958 emitStoresForInitAfterBZero(CGM, Elt,
959 Builder.CreateConstInBoundsGEP2_32(Loc, 0, i),
960 isVolatile, Builder, IsAutoInit);
961 }
962 }
963
964 /// Decide whether we should use bzero plus some stores to initialize a local
965 /// variable instead of using a memcpy from a constant global. It is beneficial
966 /// to use bzero if the global is all zeros, or mostly zeros and large.
shouldUseBZeroPlusStoresToInitialize(llvm::Constant * Init,uint64_t GlobalSize)967 static bool shouldUseBZeroPlusStoresToInitialize(llvm::Constant *Init,
968 uint64_t GlobalSize) {
969 // If a global is all zeros, always use a bzero.
970 if (isa<llvm::ConstantAggregateZero>(Init)) return true;
971
972 // If a non-zero global is <= 32 bytes, always use a memcpy. If it is large,
973 // do it if it will require 6 or fewer scalar stores.
974 // TODO: Should budget depends on the size? Avoiding a large global warrants
975 // plopping in more stores.
976 unsigned StoreBudget = 6;
977 uint64_t SizeLimit = 32;
978
979 return GlobalSize > SizeLimit &&
980 canEmitInitWithFewStoresAfterBZero(Init, StoreBudget);
981 }
982
983 /// Decide whether we should use memset to initialize a local variable instead
984 /// of using a memcpy from a constant global. Assumes we've already decided to
985 /// not user bzero.
986 /// FIXME We could be more clever, as we are for bzero above, and generate
987 /// memset followed by stores. It's unclear that's worth the effort.
shouldUseMemSetToInitialize(llvm::Constant * Init,uint64_t GlobalSize,const llvm::DataLayout & DL)988 static llvm::Value *shouldUseMemSetToInitialize(llvm::Constant *Init,
989 uint64_t GlobalSize,
990 const llvm::DataLayout &DL) {
991 uint64_t SizeLimit = 32;
992 if (GlobalSize <= SizeLimit)
993 return nullptr;
994 return llvm::isBytewiseValue(Init, DL);
995 }
996
997 /// Decide whether we want to split a constant structure or array store into a
998 /// sequence of its fields' stores. This may cost us code size and compilation
999 /// speed, but plays better with store optimizations.
shouldSplitConstantStore(CodeGenModule & CGM,uint64_t GlobalByteSize)1000 static bool shouldSplitConstantStore(CodeGenModule &CGM,
1001 uint64_t GlobalByteSize) {
1002 // Don't break things that occupy more than one cacheline.
1003 uint64_t ByteSizeLimit = 64;
1004 if (CGM.getCodeGenOpts().OptimizationLevel == 0)
1005 return false;
1006 if (GlobalByteSize <= ByteSizeLimit)
1007 return true;
1008 return false;
1009 }
1010
1011 enum class IsPattern { No, Yes };
1012
1013 /// Generate a constant filled with either a pattern or zeroes.
patternOrZeroFor(CodeGenModule & CGM,IsPattern isPattern,llvm::Type * Ty)1014 static llvm::Constant *patternOrZeroFor(CodeGenModule &CGM, IsPattern isPattern,
1015 llvm::Type *Ty) {
1016 if (isPattern == IsPattern::Yes)
1017 return initializationPatternFor(CGM, Ty);
1018 else
1019 return llvm::Constant::getNullValue(Ty);
1020 }
1021
1022 static llvm::Constant *constWithPadding(CodeGenModule &CGM, IsPattern isPattern,
1023 llvm::Constant *constant);
1024
1025 /// Helper function for constWithPadding() to deal with padding in structures.
constStructWithPadding(CodeGenModule & CGM,IsPattern isPattern,llvm::StructType * STy,llvm::Constant * constant)1026 static llvm::Constant *constStructWithPadding(CodeGenModule &CGM,
1027 IsPattern isPattern,
1028 llvm::StructType *STy,
1029 llvm::Constant *constant) {
1030 const llvm::DataLayout &DL = CGM.getDataLayout();
1031 const llvm::StructLayout *Layout = DL.getStructLayout(STy);
1032 llvm::Type *Int8Ty = llvm::IntegerType::getInt8Ty(CGM.getLLVMContext());
1033 unsigned SizeSoFar = 0;
1034 SmallVector<llvm::Constant *, 8> Values;
1035 bool NestedIntact = true;
1036 for (unsigned i = 0, e = STy->getNumElements(); i != e; i++) {
1037 unsigned CurOff = Layout->getElementOffset(i);
1038 if (SizeSoFar < CurOff) {
1039 assert(!STy->isPacked());
1040 auto *PadTy = llvm::ArrayType::get(Int8Ty, CurOff - SizeSoFar);
1041 Values.push_back(patternOrZeroFor(CGM, isPattern, PadTy));
1042 }
1043 llvm::Constant *CurOp;
1044 if (constant->isZeroValue())
1045 CurOp = llvm::Constant::getNullValue(STy->getElementType(i));
1046 else
1047 CurOp = cast<llvm::Constant>(constant->getAggregateElement(i));
1048 auto *NewOp = constWithPadding(CGM, isPattern, CurOp);
1049 if (CurOp != NewOp)
1050 NestedIntact = false;
1051 Values.push_back(NewOp);
1052 SizeSoFar = CurOff + DL.getTypeAllocSize(CurOp->getType());
1053 }
1054 unsigned TotalSize = Layout->getSizeInBytes();
1055 if (SizeSoFar < TotalSize) {
1056 auto *PadTy = llvm::ArrayType::get(Int8Ty, TotalSize - SizeSoFar);
1057 Values.push_back(patternOrZeroFor(CGM, isPattern, PadTy));
1058 }
1059 if (NestedIntact && Values.size() == STy->getNumElements())
1060 return constant;
1061 return llvm::ConstantStruct::getAnon(Values, STy->isPacked());
1062 }
1063
1064 /// Replace all padding bytes in a given constant with either a pattern byte or
1065 /// 0x00.
constWithPadding(CodeGenModule & CGM,IsPattern isPattern,llvm::Constant * constant)1066 static llvm::Constant *constWithPadding(CodeGenModule &CGM, IsPattern isPattern,
1067 llvm::Constant *constant) {
1068 llvm::Type *OrigTy = constant->getType();
1069 if (const auto STy = dyn_cast<llvm::StructType>(OrigTy))
1070 return constStructWithPadding(CGM, isPattern, STy, constant);
1071 if (auto *ArrayTy = dyn_cast<llvm::ArrayType>(OrigTy)) {
1072 llvm::SmallVector<llvm::Constant *, 8> Values;
1073 uint64_t Size = ArrayTy->getNumElements();
1074 if (!Size)
1075 return constant;
1076 llvm::Type *ElemTy = ArrayTy->getElementType();
1077 bool ZeroInitializer = constant->isNullValue();
1078 llvm::Constant *OpValue, *PaddedOp;
1079 if (ZeroInitializer) {
1080 OpValue = llvm::Constant::getNullValue(ElemTy);
1081 PaddedOp = constWithPadding(CGM, isPattern, OpValue);
1082 }
1083 for (unsigned Op = 0; Op != Size; ++Op) {
1084 if (!ZeroInitializer) {
1085 OpValue = constant->getAggregateElement(Op);
1086 PaddedOp = constWithPadding(CGM, isPattern, OpValue);
1087 }
1088 Values.push_back(PaddedOp);
1089 }
1090 auto *NewElemTy = Values[0]->getType();
1091 if (NewElemTy == ElemTy)
1092 return constant;
1093 auto *NewArrayTy = llvm::ArrayType::get(NewElemTy, Size);
1094 return llvm::ConstantArray::get(NewArrayTy, Values);
1095 }
1096 // FIXME: Add handling for tail padding in vectors. Vectors don't
1097 // have padding between or inside elements, but the total amount of
1098 // data can be less than the allocated size.
1099 return constant;
1100 }
1101
createUnnamedGlobalFrom(const VarDecl & D,llvm::Constant * Constant,CharUnits Align)1102 Address CodeGenModule::createUnnamedGlobalFrom(const VarDecl &D,
1103 llvm::Constant *Constant,
1104 CharUnits Align) {
1105 auto FunctionName = [&](const DeclContext *DC) -> std::string {
1106 if (const auto *FD = dyn_cast<FunctionDecl>(DC)) {
1107 if (const auto *CC = dyn_cast<CXXConstructorDecl>(FD))
1108 return CC->getNameAsString();
1109 if (const auto *CD = dyn_cast<CXXDestructorDecl>(FD))
1110 return CD->getNameAsString();
1111 return std::string(getMangledName(FD));
1112 } else if (const auto *OM = dyn_cast<ObjCMethodDecl>(DC)) {
1113 return OM->getNameAsString();
1114 } else if (isa<BlockDecl>(DC)) {
1115 return "<block>";
1116 } else if (isa<CapturedDecl>(DC)) {
1117 return "<captured>";
1118 } else {
1119 llvm_unreachable("expected a function or method");
1120 }
1121 };
1122
1123 // Form a simple per-variable cache of these values in case we find we
1124 // want to reuse them.
1125 llvm::GlobalVariable *&CacheEntry = InitializerConstants[&D];
1126 if (!CacheEntry || CacheEntry->getInitializer() != Constant) {
1127 auto *Ty = Constant->getType();
1128 bool isConstant = true;
1129 llvm::GlobalVariable *InsertBefore = nullptr;
1130 unsigned AS =
1131 getContext().getTargetAddressSpace(GetGlobalConstantAddressSpace());
1132 std::string Name;
1133 if (D.hasGlobalStorage())
1134 Name = getMangledName(&D).str() + ".const";
1135 else if (const DeclContext *DC = D.getParentFunctionOrMethod())
1136 Name = ("__const." + FunctionName(DC) + "." + D.getName()).str();
1137 else
1138 llvm_unreachable("local variable has no parent function or method");
1139 llvm::GlobalVariable *GV = new llvm::GlobalVariable(
1140 getModule(), Ty, isConstant, llvm::GlobalValue::PrivateLinkage,
1141 Constant, Name, InsertBefore, llvm::GlobalValue::NotThreadLocal, AS);
1142 GV->setAlignment(Align.getAsAlign());
1143 GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
1144 CacheEntry = GV;
1145 } else if (CacheEntry->getAlignment() < Align.getQuantity()) {
1146 CacheEntry->setAlignment(Align.getAsAlign());
1147 }
1148
1149 return Address(CacheEntry, Align);
1150 }
1151
createUnnamedGlobalForMemcpyFrom(CodeGenModule & CGM,const VarDecl & D,CGBuilderTy & Builder,llvm::Constant * Constant,CharUnits Align)1152 static Address createUnnamedGlobalForMemcpyFrom(CodeGenModule &CGM,
1153 const VarDecl &D,
1154 CGBuilderTy &Builder,
1155 llvm::Constant *Constant,
1156 CharUnits Align) {
1157 Address SrcPtr = CGM.createUnnamedGlobalFrom(D, Constant, Align);
1158 llvm::Type *BP = llvm::PointerType::getInt8PtrTy(CGM.getLLVMContext(),
1159 SrcPtr.getAddressSpace());
1160 if (SrcPtr.getType() != BP)
1161 SrcPtr = Builder.CreateBitCast(SrcPtr, BP);
1162 return SrcPtr;
1163 }
1164
emitStoresForConstant(CodeGenModule & CGM,const VarDecl & D,Address Loc,bool isVolatile,CGBuilderTy & Builder,llvm::Constant * constant,bool IsAutoInit)1165 static void emitStoresForConstant(CodeGenModule &CGM, const VarDecl &D,
1166 Address Loc, bool isVolatile,
1167 CGBuilderTy &Builder,
1168 llvm::Constant *constant, bool IsAutoInit) {
1169 auto *Ty = constant->getType();
1170 uint64_t ConstantSize = CGM.getDataLayout().getTypeAllocSize(Ty);
1171 if (!ConstantSize)
1172 return;
1173
1174 bool canDoSingleStore = Ty->isIntOrIntVectorTy() ||
1175 Ty->isPtrOrPtrVectorTy() || Ty->isFPOrFPVectorTy();
1176 if (canDoSingleStore) {
1177 auto *I = Builder.CreateStore(constant, Loc, isVolatile);
1178 if (IsAutoInit)
1179 I->addAnnotationMetadata("auto-init");
1180 return;
1181 }
1182
1183 auto *SizeVal = llvm::ConstantInt::get(CGM.IntPtrTy, ConstantSize);
1184
1185 // If the initializer is all or mostly the same, codegen with bzero / memset
1186 // then do a few stores afterward.
1187 if (shouldUseBZeroPlusStoresToInitialize(constant, ConstantSize)) {
1188 auto *I = Builder.CreateMemSet(Loc, llvm::ConstantInt::get(CGM.Int8Ty, 0),
1189 SizeVal, isVolatile);
1190 if (IsAutoInit)
1191 I->addAnnotationMetadata("auto-init");
1192
1193 bool valueAlreadyCorrect =
1194 constant->isNullValue() || isa<llvm::UndefValue>(constant);
1195 if (!valueAlreadyCorrect) {
1196 Loc = Builder.CreateBitCast(Loc, Ty->getPointerTo(Loc.getAddressSpace()));
1197 emitStoresForInitAfterBZero(CGM, constant, Loc, isVolatile, Builder,
1198 IsAutoInit);
1199 }
1200 return;
1201 }
1202
1203 // If the initializer is a repeated byte pattern, use memset.
1204 llvm::Value *Pattern =
1205 shouldUseMemSetToInitialize(constant, ConstantSize, CGM.getDataLayout());
1206 if (Pattern) {
1207 uint64_t Value = 0x00;
1208 if (!isa<llvm::UndefValue>(Pattern)) {
1209 const llvm::APInt &AP = cast<llvm::ConstantInt>(Pattern)->getValue();
1210 assert(AP.getBitWidth() <= 8);
1211 Value = AP.getLimitedValue();
1212 }
1213 auto *I = Builder.CreateMemSet(
1214 Loc, llvm::ConstantInt::get(CGM.Int8Ty, Value), SizeVal, isVolatile);
1215 if (IsAutoInit)
1216 I->addAnnotationMetadata("auto-init");
1217 return;
1218 }
1219
1220 // If the initializer is small, use a handful of stores.
1221 if (shouldSplitConstantStore(CGM, ConstantSize)) {
1222 if (auto *STy = dyn_cast<llvm::StructType>(Ty)) {
1223 // FIXME: handle the case when STy != Loc.getElementType().
1224 if (STy == Loc.getElementType()) {
1225 for (unsigned i = 0; i != constant->getNumOperands(); i++) {
1226 Address EltPtr = Builder.CreateStructGEP(Loc, i);
1227 emitStoresForConstant(
1228 CGM, D, EltPtr, isVolatile, Builder,
1229 cast<llvm::Constant>(Builder.CreateExtractValue(constant, i)),
1230 IsAutoInit);
1231 }
1232 return;
1233 }
1234 } else if (auto *ATy = dyn_cast<llvm::ArrayType>(Ty)) {
1235 // FIXME: handle the case when ATy != Loc.getElementType().
1236 if (ATy == Loc.getElementType()) {
1237 for (unsigned i = 0; i != ATy->getNumElements(); i++) {
1238 Address EltPtr = Builder.CreateConstArrayGEP(Loc, i);
1239 emitStoresForConstant(
1240 CGM, D, EltPtr, isVolatile, Builder,
1241 cast<llvm::Constant>(Builder.CreateExtractValue(constant, i)),
1242 IsAutoInit);
1243 }
1244 return;
1245 }
1246 }
1247 }
1248
1249 // Copy from a global.
1250 auto *I =
1251 Builder.CreateMemCpy(Loc,
1252 createUnnamedGlobalForMemcpyFrom(
1253 CGM, D, Builder, constant, Loc.getAlignment()),
1254 SizeVal, isVolatile);
1255 if (IsAutoInit)
1256 I->addAnnotationMetadata("auto-init");
1257 }
1258
emitStoresForZeroInit(CodeGenModule & CGM,const VarDecl & D,Address Loc,bool isVolatile,CGBuilderTy & Builder)1259 static void emitStoresForZeroInit(CodeGenModule &CGM, const VarDecl &D,
1260 Address Loc, bool isVolatile,
1261 CGBuilderTy &Builder) {
1262 llvm::Type *ElTy = Loc.getElementType();
1263 llvm::Constant *constant =
1264 constWithPadding(CGM, IsPattern::No, llvm::Constant::getNullValue(ElTy));
1265 emitStoresForConstant(CGM, D, Loc, isVolatile, Builder, constant,
1266 /*IsAutoInit=*/true);
1267 }
1268
emitStoresForPatternInit(CodeGenModule & CGM,const VarDecl & D,Address Loc,bool isVolatile,CGBuilderTy & Builder)1269 static void emitStoresForPatternInit(CodeGenModule &CGM, const VarDecl &D,
1270 Address Loc, bool isVolatile,
1271 CGBuilderTy &Builder) {
1272 llvm::Type *ElTy = Loc.getElementType();
1273 llvm::Constant *constant = constWithPadding(
1274 CGM, IsPattern::Yes, initializationPatternFor(CGM, ElTy));
1275 assert(!isa<llvm::UndefValue>(constant));
1276 emitStoresForConstant(CGM, D, Loc, isVolatile, Builder, constant,
1277 /*IsAutoInit=*/true);
1278 }
1279
containsUndef(llvm::Constant * constant)1280 static bool containsUndef(llvm::Constant *constant) {
1281 auto *Ty = constant->getType();
1282 if (isa<llvm::UndefValue>(constant))
1283 return true;
1284 if (Ty->isStructTy() || Ty->isArrayTy() || Ty->isVectorTy())
1285 for (llvm::Use &Op : constant->operands())
1286 if (containsUndef(cast<llvm::Constant>(Op)))
1287 return true;
1288 return false;
1289 }
1290
replaceUndef(CodeGenModule & CGM,IsPattern isPattern,llvm::Constant * constant)1291 static llvm::Constant *replaceUndef(CodeGenModule &CGM, IsPattern isPattern,
1292 llvm::Constant *constant) {
1293 auto *Ty = constant->getType();
1294 if (isa<llvm::UndefValue>(constant))
1295 return patternOrZeroFor(CGM, isPattern, Ty);
1296 if (!(Ty->isStructTy() || Ty->isArrayTy() || Ty->isVectorTy()))
1297 return constant;
1298 if (!containsUndef(constant))
1299 return constant;
1300 llvm::SmallVector<llvm::Constant *, 8> Values(constant->getNumOperands());
1301 for (unsigned Op = 0, NumOp = constant->getNumOperands(); Op != NumOp; ++Op) {
1302 auto *OpValue = cast<llvm::Constant>(constant->getOperand(Op));
1303 Values[Op] = replaceUndef(CGM, isPattern, OpValue);
1304 }
1305 if (Ty->isStructTy())
1306 return llvm::ConstantStruct::get(cast<llvm::StructType>(Ty), Values);
1307 if (Ty->isArrayTy())
1308 return llvm::ConstantArray::get(cast<llvm::ArrayType>(Ty), Values);
1309 assert(Ty->isVectorTy());
1310 return llvm::ConstantVector::get(Values);
1311 }
1312
1313 /// EmitAutoVarDecl - Emit code and set up an entry in LocalDeclMap for a
1314 /// variable declaration with auto, register, or no storage class specifier.
1315 /// These turn into simple stack objects, or GlobalValues depending on target.
EmitAutoVarDecl(const VarDecl & D)1316 void CodeGenFunction::EmitAutoVarDecl(const VarDecl &D) {
1317 AutoVarEmission emission = EmitAutoVarAlloca(D);
1318 EmitAutoVarInit(emission);
1319 EmitAutoVarCleanups(emission);
1320 }
1321
1322 /// Emit a lifetime.begin marker if some criteria are satisfied.
1323 /// \return a pointer to the temporary size Value if a marker was emitted, null
1324 /// otherwise
EmitLifetimeStart(llvm::TypeSize Size,llvm::Value * Addr)1325 llvm::Value *CodeGenFunction::EmitLifetimeStart(llvm::TypeSize Size,
1326 llvm::Value *Addr) {
1327 if (!ShouldEmitLifetimeMarkers)
1328 return nullptr;
1329
1330 assert(Addr->getType()->getPointerAddressSpace() ==
1331 CGM.getDataLayout().getAllocaAddrSpace() &&
1332 "Pointer should be in alloca address space");
1333 llvm::Value *SizeV = llvm::ConstantInt::get(
1334 Int64Ty, Size.isScalable() ? -1 : Size.getFixedValue());
1335 Addr = Builder.CreateBitCast(Addr, AllocaInt8PtrTy);
1336 llvm::CallInst *C =
1337 Builder.CreateCall(CGM.getLLVMLifetimeStartFn(), {SizeV, Addr});
1338 C->setDoesNotThrow();
1339 return SizeV;
1340 }
1341
EmitLifetimeEnd(llvm::Value * Size,llvm::Value * Addr)1342 void CodeGenFunction::EmitLifetimeEnd(llvm::Value *Size, llvm::Value *Addr) {
1343 assert(Addr->getType()->getPointerAddressSpace() ==
1344 CGM.getDataLayout().getAllocaAddrSpace() &&
1345 "Pointer should be in alloca address space");
1346 Addr = Builder.CreateBitCast(Addr, AllocaInt8PtrTy);
1347 llvm::CallInst *C =
1348 Builder.CreateCall(CGM.getLLVMLifetimeEndFn(), {Size, Addr});
1349 C->setDoesNotThrow();
1350 }
1351
EmitAndRegisterVariableArrayDimensions(CGDebugInfo * DI,const VarDecl & D,bool EmitDebugInfo)1352 void CodeGenFunction::EmitAndRegisterVariableArrayDimensions(
1353 CGDebugInfo *DI, const VarDecl &D, bool EmitDebugInfo) {
1354 // For each dimension stores its QualType and corresponding
1355 // size-expression Value.
1356 SmallVector<CodeGenFunction::VlaSizePair, 4> Dimensions;
1357 SmallVector<IdentifierInfo *, 4> VLAExprNames;
1358
1359 // Break down the array into individual dimensions.
1360 QualType Type1D = D.getType();
1361 while (getContext().getAsVariableArrayType(Type1D)) {
1362 auto VlaSize = getVLAElements1D(Type1D);
1363 if (auto *C = dyn_cast<llvm::ConstantInt>(VlaSize.NumElts))
1364 Dimensions.emplace_back(C, Type1D.getUnqualifiedType());
1365 else {
1366 // Generate a locally unique name for the size expression.
1367 Twine Name = Twine("__vla_expr") + Twine(VLAExprCounter++);
1368 SmallString<12> Buffer;
1369 StringRef NameRef = Name.toStringRef(Buffer);
1370 auto &Ident = getContext().Idents.getOwn(NameRef);
1371 VLAExprNames.push_back(&Ident);
1372 auto SizeExprAddr =
1373 CreateDefaultAlignTempAlloca(VlaSize.NumElts->getType(), NameRef);
1374 Builder.CreateStore(VlaSize.NumElts, SizeExprAddr);
1375 Dimensions.emplace_back(SizeExprAddr.getPointer(),
1376 Type1D.getUnqualifiedType());
1377 }
1378 Type1D = VlaSize.Type;
1379 }
1380
1381 if (!EmitDebugInfo)
1382 return;
1383
1384 // Register each dimension's size-expression with a DILocalVariable,
1385 // so that it can be used by CGDebugInfo when instantiating a DISubrange
1386 // to describe this array.
1387 unsigned NameIdx = 0;
1388 for (auto &VlaSize : Dimensions) {
1389 llvm::Metadata *MD;
1390 if (auto *C = dyn_cast<llvm::ConstantInt>(VlaSize.NumElts))
1391 MD = llvm::ConstantAsMetadata::get(C);
1392 else {
1393 // Create an artificial VarDecl to generate debug info for.
1394 IdentifierInfo *NameIdent = VLAExprNames[NameIdx++];
1395 auto VlaExprTy = VlaSize.NumElts->getType()->getPointerElementType();
1396 auto QT = getContext().getIntTypeForBitwidth(
1397 VlaExprTy->getScalarSizeInBits(), false);
1398 auto *ArtificialDecl = VarDecl::Create(
1399 getContext(), const_cast<DeclContext *>(D.getDeclContext()),
1400 D.getLocation(), D.getLocation(), NameIdent, QT,
1401 getContext().CreateTypeSourceInfo(QT), SC_Auto);
1402 ArtificialDecl->setImplicit();
1403
1404 MD = DI->EmitDeclareOfAutoVariable(ArtificialDecl, VlaSize.NumElts,
1405 Builder);
1406 }
1407 assert(MD && "No Size expression debug node created");
1408 DI->registerVLASizeExpression(VlaSize.Type, MD);
1409 }
1410 }
1411
1412 /// EmitAutoVarAlloca - Emit the alloca and debug information for a
1413 /// local variable. Does not emit initialization or destruction.
1414 CodeGenFunction::AutoVarEmission
EmitAutoVarAlloca(const VarDecl & D)1415 CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) {
1416 QualType Ty = D.getType();
1417 assert(
1418 Ty.getAddressSpace() == LangAS::Default ||
1419 (Ty.getAddressSpace() == LangAS::opencl_private && getLangOpts().OpenCL));
1420
1421 AutoVarEmission emission(D);
1422
1423 bool isEscapingByRef = D.isEscapingByref();
1424 emission.IsEscapingByRef = isEscapingByRef;
1425
1426 CharUnits alignment = getContext().getDeclAlign(&D);
1427
1428 // If the type is variably-modified, emit all the VLA sizes for it.
1429 if (Ty->isVariablyModifiedType())
1430 EmitVariablyModifiedType(Ty);
1431
1432 auto *DI = getDebugInfo();
1433 bool EmitDebugInfo = DI && CGM.getCodeGenOpts().hasReducedDebugInfo();
1434
1435 Address address = Address::invalid();
1436 Address AllocaAddr = Address::invalid();
1437 Address OpenMPLocalAddr = Address::invalid();
1438 if (CGM.getLangOpts().OpenMPIRBuilder)
1439 OpenMPLocalAddr = OMPBuilderCBHelpers::getAddressOfLocalVariable(*this, &D);
1440 else
1441 OpenMPLocalAddr =
1442 getLangOpts().OpenMP
1443 ? CGM.getOpenMPRuntime().getAddressOfLocalVariable(*this, &D)
1444 : Address::invalid();
1445
1446 bool NRVO = getLangOpts().ElideConstructors && D.isNRVOVariable();
1447
1448 if (getLangOpts().OpenMP && OpenMPLocalAddr.isValid()) {
1449 address = OpenMPLocalAddr;
1450 } else if (Ty->isConstantSizeType()) {
1451 // If this value is an array or struct with a statically determinable
1452 // constant initializer, there are optimizations we can do.
1453 //
1454 // TODO: We should constant-evaluate the initializer of any variable,
1455 // as long as it is initialized by a constant expression. Currently,
1456 // isConstantInitializer produces wrong answers for structs with
1457 // reference or bitfield members, and a few other cases, and checking
1458 // for POD-ness protects us from some of these.
1459 if (D.getInit() && (Ty->isArrayType() || Ty->isRecordType()) &&
1460 (D.isConstexpr() ||
1461 ((Ty.isPODType(getContext()) ||
1462 getContext().getBaseElementType(Ty)->isObjCObjectPointerType()) &&
1463 D.getInit()->isConstantInitializer(getContext(), false)))) {
1464
1465 // If the variable's a const type, and it's neither an NRVO
1466 // candidate nor a __block variable and has no mutable members,
1467 // emit it as a global instead.
1468 // Exception is if a variable is located in non-constant address space
1469 // in OpenCL.
1470 if ((!getLangOpts().OpenCL ||
1471 Ty.getAddressSpace() == LangAS::opencl_constant) &&
1472 (CGM.getCodeGenOpts().MergeAllConstants && !NRVO &&
1473 !isEscapingByRef && CGM.isTypeConstant(Ty, true))) {
1474 EmitStaticVarDecl(D, llvm::GlobalValue::InternalLinkage);
1475
1476 // Signal this condition to later callbacks.
1477 emission.Addr = Address::invalid();
1478 assert(emission.wasEmittedAsGlobal());
1479 return emission;
1480 }
1481
1482 // Otherwise, tell the initialization code that we're in this case.
1483 emission.IsConstantAggregate = true;
1484 }
1485
1486 // A normal fixed sized variable becomes an alloca in the entry block,
1487 // unless:
1488 // - it's an NRVO variable.
1489 // - we are compiling OpenMP and it's an OpenMP local variable.
1490 if (NRVO) {
1491 // The named return value optimization: allocate this variable in the
1492 // return slot, so that we can elide the copy when returning this
1493 // variable (C++0x [class.copy]p34).
1494 address = ReturnValue;
1495
1496 if (const RecordType *RecordTy = Ty->getAs<RecordType>()) {
1497 const auto *RD = RecordTy->getDecl();
1498 const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD);
1499 if ((CXXRD && !CXXRD->hasTrivialDestructor()) ||
1500 RD->isNonTrivialToPrimitiveDestroy()) {
1501 // Create a flag that is used to indicate when the NRVO was applied
1502 // to this variable. Set it to zero to indicate that NRVO was not
1503 // applied.
1504 llvm::Value *Zero = Builder.getFalse();
1505 Address NRVOFlag =
1506 CreateTempAlloca(Zero->getType(), CharUnits::One(), "nrvo");
1507 EnsureInsertPoint();
1508 Builder.CreateStore(Zero, NRVOFlag);
1509
1510 // Record the NRVO flag for this variable.
1511 NRVOFlags[&D] = NRVOFlag.getPointer();
1512 emission.NRVOFlag = NRVOFlag.getPointer();
1513 }
1514 }
1515 } else {
1516 CharUnits allocaAlignment;
1517 llvm::Type *allocaTy;
1518 if (isEscapingByRef) {
1519 auto &byrefInfo = getBlockByrefInfo(&D);
1520 allocaTy = byrefInfo.Type;
1521 allocaAlignment = byrefInfo.ByrefAlignment;
1522 } else {
1523 allocaTy = ConvertTypeForMem(Ty);
1524 allocaAlignment = alignment;
1525 }
1526
1527 // Create the alloca. Note that we set the name separately from
1528 // building the instruction so that it's there even in no-asserts
1529 // builds.
1530 address = CreateTempAlloca(allocaTy, allocaAlignment, D.getName(),
1531 /*ArraySize=*/nullptr, &AllocaAddr);
1532
1533 // Don't emit lifetime markers for MSVC catch parameters. The lifetime of
1534 // the catch parameter starts in the catchpad instruction, and we can't
1535 // insert code in those basic blocks.
1536 bool IsMSCatchParam =
1537 D.isExceptionVariable() && getTarget().getCXXABI().isMicrosoft();
1538
1539 // Emit a lifetime intrinsic if meaningful. There's no point in doing this
1540 // if we don't have a valid insertion point (?).
1541 if (HaveInsertPoint() && !IsMSCatchParam) {
1542 // If there's a jump into the lifetime of this variable, its lifetime
1543 // gets broken up into several regions in IR, which requires more work
1544 // to handle correctly. For now, just omit the intrinsics; this is a
1545 // rare case, and it's better to just be conservatively correct.
1546 // PR28267.
1547 //
1548 // We have to do this in all language modes if there's a jump past the
1549 // declaration. We also have to do it in C if there's a jump to an
1550 // earlier point in the current block because non-VLA lifetimes begin as
1551 // soon as the containing block is entered, not when its variables
1552 // actually come into scope; suppressing the lifetime annotations
1553 // completely in this case is unnecessarily pessimistic, but again, this
1554 // is rare.
1555 if (!Bypasses.IsBypassed(&D) &&
1556 !(!getLangOpts().CPlusPlus && hasLabelBeenSeenInCurrentScope())) {
1557 llvm::TypeSize Size = CGM.getDataLayout().getTypeAllocSize(allocaTy);
1558 emission.SizeForLifetimeMarkers =
1559 EmitLifetimeStart(Size, AllocaAddr.getPointer());
1560 }
1561 } else {
1562 assert(!emission.useLifetimeMarkers());
1563 }
1564 }
1565 } else {
1566 EnsureInsertPoint();
1567
1568 if (!DidCallStackSave) {
1569 // Save the stack.
1570 Address Stack =
1571 CreateTempAlloca(Int8PtrTy, getPointerAlign(), "saved_stack");
1572
1573 llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::stacksave);
1574 llvm::Value *V = Builder.CreateCall(F);
1575 Builder.CreateStore(V, Stack);
1576
1577 DidCallStackSave = true;
1578
1579 // Push a cleanup block and restore the stack there.
1580 // FIXME: in general circumstances, this should be an EH cleanup.
1581 pushStackRestore(NormalCleanup, Stack);
1582 }
1583
1584 auto VlaSize = getVLASize(Ty);
1585 llvm::Type *llvmTy = ConvertTypeForMem(VlaSize.Type);
1586
1587 // Allocate memory for the array.
1588 address = CreateTempAlloca(llvmTy, alignment, "vla", VlaSize.NumElts,
1589 &AllocaAddr);
1590
1591 // If we have debug info enabled, properly describe the VLA dimensions for
1592 // this type by registering the vla size expression for each of the
1593 // dimensions.
1594 EmitAndRegisterVariableArrayDimensions(DI, D, EmitDebugInfo);
1595 }
1596
1597 setAddrOfLocalVar(&D, address);
1598 emission.Addr = address;
1599 emission.AllocaAddr = AllocaAddr;
1600
1601 // Emit debug info for local var declaration.
1602 if (EmitDebugInfo && HaveInsertPoint()) {
1603 Address DebugAddr = address;
1604 bool UsePointerValue = NRVO && ReturnValuePointer.isValid();
1605 DI->setLocation(D.getLocation());
1606
1607 // If NRVO, use a pointer to the return address.
1608 if (UsePointerValue)
1609 DebugAddr = ReturnValuePointer;
1610
1611 (void)DI->EmitDeclareOfAutoVariable(&D, DebugAddr.getPointer(), Builder,
1612 UsePointerValue);
1613 }
1614
1615 if (D.hasAttr<AnnotateAttr>() && HaveInsertPoint())
1616 EmitVarAnnotations(&D, address.getPointer());
1617
1618 // Make sure we call @llvm.lifetime.end.
1619 if (emission.useLifetimeMarkers())
1620 EHStack.pushCleanup<CallLifetimeEnd>(NormalEHLifetimeMarker,
1621 emission.getOriginalAllocatedAddress(),
1622 emission.getSizeForLifetimeMarkers());
1623
1624 return emission;
1625 }
1626
1627 static bool isCapturedBy(const VarDecl &, const Expr *);
1628
1629 /// Determines whether the given __block variable is potentially
1630 /// captured by the given statement.
isCapturedBy(const VarDecl & Var,const Stmt * S)1631 static bool isCapturedBy(const VarDecl &Var, const Stmt *S) {
1632 if (const Expr *E = dyn_cast<Expr>(S))
1633 return isCapturedBy(Var, E);
1634 for (const Stmt *SubStmt : S->children())
1635 if (isCapturedBy(Var, SubStmt))
1636 return true;
1637 return false;
1638 }
1639
1640 /// Determines whether the given __block variable is potentially
1641 /// captured by the given expression.
isCapturedBy(const VarDecl & Var,const Expr * E)1642 static bool isCapturedBy(const VarDecl &Var, const Expr *E) {
1643 // Skip the most common kinds of expressions that make
1644 // hierarchy-walking expensive.
1645 E = E->IgnoreParenCasts();
1646
1647 if (const BlockExpr *BE = dyn_cast<BlockExpr>(E)) {
1648 const BlockDecl *Block = BE->getBlockDecl();
1649 for (const auto &I : Block->captures()) {
1650 if (I.getVariable() == &Var)
1651 return true;
1652 }
1653
1654 // No need to walk into the subexpressions.
1655 return false;
1656 }
1657
1658 if (const StmtExpr *SE = dyn_cast<StmtExpr>(E)) {
1659 const CompoundStmt *CS = SE->getSubStmt();
1660 for (const auto *BI : CS->body())
1661 if (const auto *BIE = dyn_cast<Expr>(BI)) {
1662 if (isCapturedBy(Var, BIE))
1663 return true;
1664 }
1665 else if (const auto *DS = dyn_cast<DeclStmt>(BI)) {
1666 // special case declarations
1667 for (const auto *I : DS->decls()) {
1668 if (const auto *VD = dyn_cast<VarDecl>((I))) {
1669 const Expr *Init = VD->getInit();
1670 if (Init && isCapturedBy(Var, Init))
1671 return true;
1672 }
1673 }
1674 }
1675 else
1676 // FIXME. Make safe assumption assuming arbitrary statements cause capturing.
1677 // Later, provide code to poke into statements for capture analysis.
1678 return true;
1679 return false;
1680 }
1681
1682 for (const Stmt *SubStmt : E->children())
1683 if (isCapturedBy(Var, SubStmt))
1684 return true;
1685
1686 return false;
1687 }
1688
1689 /// Determine whether the given initializer is trivial in the sense
1690 /// that it requires no code to be generated.
isTrivialInitializer(const Expr * Init)1691 bool CodeGenFunction::isTrivialInitializer(const Expr *Init) {
1692 if (!Init)
1693 return true;
1694
1695 if (const CXXConstructExpr *Construct = dyn_cast<CXXConstructExpr>(Init))
1696 if (CXXConstructorDecl *Constructor = Construct->getConstructor())
1697 if (Constructor->isTrivial() &&
1698 Constructor->isDefaultConstructor() &&
1699 !Construct->requiresZeroInitialization())
1700 return true;
1701
1702 return false;
1703 }
1704
emitZeroOrPatternForAutoVarInit(QualType type,const VarDecl & D,Address Loc)1705 void CodeGenFunction::emitZeroOrPatternForAutoVarInit(QualType type,
1706 const VarDecl &D,
1707 Address Loc) {
1708 auto trivialAutoVarInit = getContext().getLangOpts().getTrivialAutoVarInit();
1709 CharUnits Size = getContext().getTypeSizeInChars(type);
1710 bool isVolatile = type.isVolatileQualified();
1711 if (!Size.isZero()) {
1712 switch (trivialAutoVarInit) {
1713 case LangOptions::TrivialAutoVarInitKind::Uninitialized:
1714 llvm_unreachable("Uninitialized handled by caller");
1715 case LangOptions::TrivialAutoVarInitKind::Zero:
1716 if (CGM.stopAutoInit())
1717 return;
1718 emitStoresForZeroInit(CGM, D, Loc, isVolatile, Builder);
1719 break;
1720 case LangOptions::TrivialAutoVarInitKind::Pattern:
1721 if (CGM.stopAutoInit())
1722 return;
1723 emitStoresForPatternInit(CGM, D, Loc, isVolatile, Builder);
1724 break;
1725 }
1726 return;
1727 }
1728
1729 // VLAs look zero-sized to getTypeInfo. We can't emit constant stores to
1730 // them, so emit a memcpy with the VLA size to initialize each element.
1731 // Technically zero-sized or negative-sized VLAs are undefined, and UBSan
1732 // will catch that code, but there exists code which generates zero-sized
1733 // VLAs. Be nice and initialize whatever they requested.
1734 const auto *VlaType = getContext().getAsVariableArrayType(type);
1735 if (!VlaType)
1736 return;
1737 auto VlaSize = getVLASize(VlaType);
1738 auto SizeVal = VlaSize.NumElts;
1739 CharUnits EltSize = getContext().getTypeSizeInChars(VlaSize.Type);
1740 switch (trivialAutoVarInit) {
1741 case LangOptions::TrivialAutoVarInitKind::Uninitialized:
1742 llvm_unreachable("Uninitialized handled by caller");
1743
1744 case LangOptions::TrivialAutoVarInitKind::Zero: {
1745 if (CGM.stopAutoInit())
1746 return;
1747 if (!EltSize.isOne())
1748 SizeVal = Builder.CreateNUWMul(SizeVal, CGM.getSize(EltSize));
1749 auto *I = Builder.CreateMemSet(Loc, llvm::ConstantInt::get(Int8Ty, 0),
1750 SizeVal, isVolatile);
1751 I->addAnnotationMetadata("auto-init");
1752 break;
1753 }
1754
1755 case LangOptions::TrivialAutoVarInitKind::Pattern: {
1756 if (CGM.stopAutoInit())
1757 return;
1758 llvm::Type *ElTy = Loc.getElementType();
1759 llvm::Constant *Constant = constWithPadding(
1760 CGM, IsPattern::Yes, initializationPatternFor(CGM, ElTy));
1761 CharUnits ConstantAlign = getContext().getTypeAlignInChars(VlaSize.Type);
1762 llvm::BasicBlock *SetupBB = createBasicBlock("vla-setup.loop");
1763 llvm::BasicBlock *LoopBB = createBasicBlock("vla-init.loop");
1764 llvm::BasicBlock *ContBB = createBasicBlock("vla-init.cont");
1765 llvm::Value *IsZeroSizedVLA = Builder.CreateICmpEQ(
1766 SizeVal, llvm::ConstantInt::get(SizeVal->getType(), 0),
1767 "vla.iszerosized");
1768 Builder.CreateCondBr(IsZeroSizedVLA, ContBB, SetupBB);
1769 EmitBlock(SetupBB);
1770 if (!EltSize.isOne())
1771 SizeVal = Builder.CreateNUWMul(SizeVal, CGM.getSize(EltSize));
1772 llvm::Value *BaseSizeInChars =
1773 llvm::ConstantInt::get(IntPtrTy, EltSize.getQuantity());
1774 Address Begin = Builder.CreateElementBitCast(Loc, Int8Ty, "vla.begin");
1775 llvm::Value *End = Builder.CreateInBoundsGEP(
1776 Begin.getElementType(), Begin.getPointer(), SizeVal, "vla.end");
1777 llvm::BasicBlock *OriginBB = Builder.GetInsertBlock();
1778 EmitBlock(LoopBB);
1779 llvm::PHINode *Cur = Builder.CreatePHI(Begin.getType(), 2, "vla.cur");
1780 Cur->addIncoming(Begin.getPointer(), OriginBB);
1781 CharUnits CurAlign = Loc.getAlignment().alignmentOfArrayElement(EltSize);
1782 auto *I =
1783 Builder.CreateMemCpy(Address(Cur, CurAlign),
1784 createUnnamedGlobalForMemcpyFrom(
1785 CGM, D, Builder, Constant, ConstantAlign),
1786 BaseSizeInChars, isVolatile);
1787 I->addAnnotationMetadata("auto-init");
1788 llvm::Value *Next =
1789 Builder.CreateInBoundsGEP(Int8Ty, Cur, BaseSizeInChars, "vla.next");
1790 llvm::Value *Done = Builder.CreateICmpEQ(Next, End, "vla-init.isdone");
1791 Builder.CreateCondBr(Done, ContBB, LoopBB);
1792 Cur->addIncoming(Next, LoopBB);
1793 EmitBlock(ContBB);
1794 } break;
1795 }
1796 }
1797
EmitAutoVarInit(const AutoVarEmission & emission)1798 void CodeGenFunction::EmitAutoVarInit(const AutoVarEmission &emission) {
1799 assert(emission.Variable && "emission was not valid!");
1800
1801 // If this was emitted as a global constant, we're done.
1802 if (emission.wasEmittedAsGlobal()) return;
1803
1804 const VarDecl &D = *emission.Variable;
1805 auto DL = ApplyDebugLocation::CreateDefaultArtificial(*this, D.getLocation());
1806 QualType type = D.getType();
1807
1808 // If this local has an initializer, emit it now.
1809 const Expr *Init = D.getInit();
1810
1811 // If we are at an unreachable point, we don't need to emit the initializer
1812 // unless it contains a label.
1813 if (!HaveInsertPoint()) {
1814 if (!Init || !ContainsLabel(Init)) return;
1815 EnsureInsertPoint();
1816 }
1817
1818 // Initialize the structure of a __block variable.
1819 if (emission.IsEscapingByRef)
1820 emitByrefStructureInit(emission);
1821
1822 // Initialize the variable here if it doesn't have a initializer and it is a
1823 // C struct that is non-trivial to initialize or an array containing such a
1824 // struct.
1825 if (!Init &&
1826 type.isNonTrivialToPrimitiveDefaultInitialize() ==
1827 QualType::PDIK_Struct) {
1828 LValue Dst = MakeAddrLValue(emission.getAllocatedAddress(), type);
1829 if (emission.IsEscapingByRef)
1830 drillIntoBlockVariable(*this, Dst, &D);
1831 defaultInitNonTrivialCStructVar(Dst);
1832 return;
1833 }
1834
1835 // Check whether this is a byref variable that's potentially
1836 // captured and moved by its own initializer. If so, we'll need to
1837 // emit the initializer first, then copy into the variable.
1838 bool capturedByInit =
1839 Init && emission.IsEscapingByRef && isCapturedBy(D, Init);
1840
1841 bool locIsByrefHeader = !capturedByInit;
1842 const Address Loc =
1843 locIsByrefHeader ? emission.getObjectAddress(*this) : emission.Addr;
1844
1845 // Note: constexpr already initializes everything correctly.
1846 LangOptions::TrivialAutoVarInitKind trivialAutoVarInit =
1847 (D.isConstexpr()
1848 ? LangOptions::TrivialAutoVarInitKind::Uninitialized
1849 : (D.getAttr<UninitializedAttr>()
1850 ? LangOptions::TrivialAutoVarInitKind::Uninitialized
1851 : getContext().getLangOpts().getTrivialAutoVarInit()));
1852
1853 auto initializeWhatIsTechnicallyUninitialized = [&](Address Loc) {
1854 if (trivialAutoVarInit ==
1855 LangOptions::TrivialAutoVarInitKind::Uninitialized)
1856 return;
1857
1858 // Only initialize a __block's storage: we always initialize the header.
1859 if (emission.IsEscapingByRef && !locIsByrefHeader)
1860 Loc = emitBlockByrefAddress(Loc, &D, /*follow=*/false);
1861
1862 return emitZeroOrPatternForAutoVarInit(type, D, Loc);
1863 };
1864
1865 if (isTrivialInitializer(Init))
1866 return initializeWhatIsTechnicallyUninitialized(Loc);
1867
1868 llvm::Constant *constant = nullptr;
1869 if (emission.IsConstantAggregate ||
1870 D.mightBeUsableInConstantExpressions(getContext())) {
1871 assert(!capturedByInit && "constant init contains a capturing block?");
1872 constant = ConstantEmitter(*this).tryEmitAbstractForInitializer(D);
1873 if (constant && !constant->isZeroValue() &&
1874 (trivialAutoVarInit !=
1875 LangOptions::TrivialAutoVarInitKind::Uninitialized)) {
1876 IsPattern isPattern =
1877 (trivialAutoVarInit == LangOptions::TrivialAutoVarInitKind::Pattern)
1878 ? IsPattern::Yes
1879 : IsPattern::No;
1880 // C guarantees that brace-init with fewer initializers than members in
1881 // the aggregate will initialize the rest of the aggregate as-if it were
1882 // static initialization. In turn static initialization guarantees that
1883 // padding is initialized to zero bits. We could instead pattern-init if D
1884 // has any ImplicitValueInitExpr, but that seems to be unintuitive
1885 // behavior.
1886 constant = constWithPadding(CGM, IsPattern::No,
1887 replaceUndef(CGM, isPattern, constant));
1888 }
1889 }
1890
1891 if (!constant) {
1892 initializeWhatIsTechnicallyUninitialized(Loc);
1893 LValue lv = MakeAddrLValue(Loc, type);
1894 lv.setNonGC(true);
1895 return EmitExprAsInit(Init, &D, lv, capturedByInit);
1896 }
1897
1898 if (!emission.IsConstantAggregate) {
1899 // For simple scalar/complex initialization, store the value directly.
1900 LValue lv = MakeAddrLValue(Loc, type);
1901 lv.setNonGC(true);
1902 return EmitStoreThroughLValue(RValue::get(constant), lv, true);
1903 }
1904
1905 llvm::Type *BP = CGM.Int8Ty->getPointerTo(Loc.getAddressSpace());
1906 emitStoresForConstant(
1907 CGM, D, (Loc.getType() == BP) ? Loc : Builder.CreateBitCast(Loc, BP),
1908 type.isVolatileQualified(), Builder, constant, /*IsAutoInit=*/false);
1909 }
1910
1911 /// Emit an expression as an initializer for an object (variable, field, etc.)
1912 /// at the given location. The expression is not necessarily the normal
1913 /// initializer for the object, and the address is not necessarily
1914 /// its normal location.
1915 ///
1916 /// \param init the initializing expression
1917 /// \param D the object to act as if we're initializing
1918 /// \param lvalue the lvalue to initialize
1919 /// \param capturedByInit true if \p D is a __block variable
1920 /// whose address is potentially changed by the initializer
EmitExprAsInit(const Expr * init,const ValueDecl * D,LValue lvalue,bool capturedByInit)1921 void CodeGenFunction::EmitExprAsInit(const Expr *init, const ValueDecl *D,
1922 LValue lvalue, bool capturedByInit) {
1923 QualType type = D->getType();
1924
1925 if (type->isReferenceType()) {
1926 RValue rvalue = EmitReferenceBindingToExpr(init);
1927 if (capturedByInit)
1928 drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D));
1929 EmitStoreThroughLValue(rvalue, lvalue, true);
1930 return;
1931 }
1932 switch (getEvaluationKind(type)) {
1933 case TEK_Scalar:
1934 EmitScalarInit(init, D, lvalue, capturedByInit);
1935 return;
1936 case TEK_Complex: {
1937 ComplexPairTy complex = EmitComplexExpr(init);
1938 if (capturedByInit)
1939 drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D));
1940 EmitStoreOfComplex(complex, lvalue, /*init*/ true);
1941 return;
1942 }
1943 case TEK_Aggregate:
1944 if (type->isAtomicType()) {
1945 EmitAtomicInit(const_cast<Expr*>(init), lvalue);
1946 } else {
1947 AggValueSlot::Overlap_t Overlap = AggValueSlot::MayOverlap;
1948 if (isa<VarDecl>(D))
1949 Overlap = AggValueSlot::DoesNotOverlap;
1950 else if (auto *FD = dyn_cast<FieldDecl>(D))
1951 Overlap = getOverlapForFieldInit(FD);
1952 // TODO: how can we delay here if D is captured by its initializer?
1953 EmitAggExpr(init, AggValueSlot::forLValue(
1954 lvalue, *this, AggValueSlot::IsDestructed,
1955 AggValueSlot::DoesNotNeedGCBarriers,
1956 AggValueSlot::IsNotAliased, Overlap));
1957 }
1958 return;
1959 }
1960 llvm_unreachable("bad evaluation kind");
1961 }
1962
1963 /// Enter a destroy cleanup for the given local variable.
emitAutoVarTypeCleanup(const CodeGenFunction::AutoVarEmission & emission,QualType::DestructionKind dtorKind)1964 void CodeGenFunction::emitAutoVarTypeCleanup(
1965 const CodeGenFunction::AutoVarEmission &emission,
1966 QualType::DestructionKind dtorKind) {
1967 assert(dtorKind != QualType::DK_none);
1968
1969 // Note that for __block variables, we want to destroy the
1970 // original stack object, not the possibly forwarded object.
1971 Address addr = emission.getObjectAddress(*this);
1972
1973 const VarDecl *var = emission.Variable;
1974 QualType type = var->getType();
1975
1976 CleanupKind cleanupKind = NormalAndEHCleanup;
1977 CodeGenFunction::Destroyer *destroyer = nullptr;
1978
1979 switch (dtorKind) {
1980 case QualType::DK_none:
1981 llvm_unreachable("no cleanup for trivially-destructible variable");
1982
1983 case QualType::DK_cxx_destructor:
1984 // If there's an NRVO flag on the emission, we need a different
1985 // cleanup.
1986 if (emission.NRVOFlag) {
1987 assert(!type->isArrayType());
1988 CXXDestructorDecl *dtor = type->getAsCXXRecordDecl()->getDestructor();
1989 EHStack.pushCleanup<DestroyNRVOVariableCXX>(cleanupKind, addr, type, dtor,
1990 emission.NRVOFlag);
1991 return;
1992 }
1993 break;
1994
1995 case QualType::DK_objc_strong_lifetime:
1996 // Suppress cleanups for pseudo-strong variables.
1997 if (var->isARCPseudoStrong()) return;
1998
1999 // Otherwise, consider whether to use an EH cleanup or not.
2000 cleanupKind = getARCCleanupKind();
2001
2002 // Use the imprecise destroyer by default.
2003 if (!var->hasAttr<ObjCPreciseLifetimeAttr>())
2004 destroyer = CodeGenFunction::destroyARCStrongImprecise;
2005 break;
2006
2007 case QualType::DK_objc_weak_lifetime:
2008 break;
2009
2010 case QualType::DK_nontrivial_c_struct:
2011 destroyer = CodeGenFunction::destroyNonTrivialCStruct;
2012 if (emission.NRVOFlag) {
2013 assert(!type->isArrayType());
2014 EHStack.pushCleanup<DestroyNRVOVariableC>(cleanupKind, addr,
2015 emission.NRVOFlag, type);
2016 return;
2017 }
2018 break;
2019 }
2020
2021 // If we haven't chosen a more specific destroyer, use the default.
2022 if (!destroyer) destroyer = getDestroyer(dtorKind);
2023
2024 // Use an EH cleanup in array destructors iff the destructor itself
2025 // is being pushed as an EH cleanup.
2026 bool useEHCleanup = (cleanupKind & EHCleanup);
2027 EHStack.pushCleanup<DestroyObject>(cleanupKind, addr, type, destroyer,
2028 useEHCleanup);
2029 }
2030
EmitAutoVarCleanups(const AutoVarEmission & emission)2031 void CodeGenFunction::EmitAutoVarCleanups(const AutoVarEmission &emission) {
2032 assert(emission.Variable && "emission was not valid!");
2033
2034 // If this was emitted as a global constant, we're done.
2035 if (emission.wasEmittedAsGlobal()) return;
2036
2037 // If we don't have an insertion point, we're done. Sema prevents
2038 // us from jumping into any of these scopes anyway.
2039 if (!HaveInsertPoint()) return;
2040
2041 const VarDecl &D = *emission.Variable;
2042
2043 // Check the type for a cleanup.
2044 if (QualType::DestructionKind dtorKind = D.needsDestruction(getContext()))
2045 emitAutoVarTypeCleanup(emission, dtorKind);
2046
2047 // In GC mode, honor objc_precise_lifetime.
2048 if (getLangOpts().getGC() != LangOptions::NonGC &&
2049 D.hasAttr<ObjCPreciseLifetimeAttr>()) {
2050 EHStack.pushCleanup<ExtendGCLifetime>(NormalCleanup, &D);
2051 }
2052
2053 // Handle the cleanup attribute.
2054 if (const CleanupAttr *CA = D.getAttr<CleanupAttr>()) {
2055 const FunctionDecl *FD = CA->getFunctionDecl();
2056
2057 llvm::Constant *F = CGM.GetAddrOfFunction(FD);
2058 assert(F && "Could not find function!");
2059
2060 const CGFunctionInfo &Info = CGM.getTypes().arrangeFunctionDeclaration(FD);
2061 EHStack.pushCleanup<CallCleanupFunction>(NormalAndEHCleanup, F, &Info, &D);
2062 }
2063
2064 // If this is a block variable, call _Block_object_destroy
2065 // (on the unforwarded address). Don't enter this cleanup if we're in pure-GC
2066 // mode.
2067 if (emission.IsEscapingByRef &&
2068 CGM.getLangOpts().getGC() != LangOptions::GCOnly) {
2069 BlockFieldFlags Flags = BLOCK_FIELD_IS_BYREF;
2070 if (emission.Variable->getType().isObjCGCWeak())
2071 Flags |= BLOCK_FIELD_IS_WEAK;
2072 enterByrefCleanup(NormalAndEHCleanup, emission.Addr, Flags,
2073 /*LoadBlockVarAddr*/ false,
2074 cxxDestructorCanThrow(emission.Variable->getType()));
2075 }
2076 }
2077
2078 CodeGenFunction::Destroyer *
getDestroyer(QualType::DestructionKind kind)2079 CodeGenFunction::getDestroyer(QualType::DestructionKind kind) {
2080 switch (kind) {
2081 case QualType::DK_none: llvm_unreachable("no destroyer for trivial dtor");
2082 case QualType::DK_cxx_destructor:
2083 return destroyCXXObject;
2084 case QualType::DK_objc_strong_lifetime:
2085 return destroyARCStrongPrecise;
2086 case QualType::DK_objc_weak_lifetime:
2087 return destroyARCWeak;
2088 case QualType::DK_nontrivial_c_struct:
2089 return destroyNonTrivialCStruct;
2090 }
2091 llvm_unreachable("Unknown DestructionKind");
2092 }
2093
2094 /// pushEHDestroy - Push the standard destructor for the given type as
2095 /// an EH-only cleanup.
pushEHDestroy(QualType::DestructionKind dtorKind,Address addr,QualType type)2096 void CodeGenFunction::pushEHDestroy(QualType::DestructionKind dtorKind,
2097 Address addr, QualType type) {
2098 assert(dtorKind && "cannot push destructor for trivial type");
2099 assert(needsEHCleanup(dtorKind));
2100
2101 pushDestroy(EHCleanup, addr, type, getDestroyer(dtorKind), true);
2102 }
2103
2104 /// pushDestroy - Push the standard destructor for the given type as
2105 /// at least a normal cleanup.
pushDestroy(QualType::DestructionKind dtorKind,Address addr,QualType type)2106 void CodeGenFunction::pushDestroy(QualType::DestructionKind dtorKind,
2107 Address addr, QualType type) {
2108 assert(dtorKind && "cannot push destructor for trivial type");
2109
2110 CleanupKind cleanupKind = getCleanupKind(dtorKind);
2111 pushDestroy(cleanupKind, addr, type, getDestroyer(dtorKind),
2112 cleanupKind & EHCleanup);
2113 }
2114
pushDestroy(CleanupKind cleanupKind,Address addr,QualType type,Destroyer * destroyer,bool useEHCleanupForArray)2115 void CodeGenFunction::pushDestroy(CleanupKind cleanupKind, Address addr,
2116 QualType type, Destroyer *destroyer,
2117 bool useEHCleanupForArray) {
2118 pushFullExprCleanup<DestroyObject>(cleanupKind, addr, type,
2119 destroyer, useEHCleanupForArray);
2120 }
2121
pushStackRestore(CleanupKind Kind,Address SPMem)2122 void CodeGenFunction::pushStackRestore(CleanupKind Kind, Address SPMem) {
2123 EHStack.pushCleanup<CallStackRestore>(Kind, SPMem);
2124 }
2125
pushLifetimeExtendedDestroy(CleanupKind cleanupKind,Address addr,QualType type,Destroyer * destroyer,bool useEHCleanupForArray)2126 void CodeGenFunction::pushLifetimeExtendedDestroy(CleanupKind cleanupKind,
2127 Address addr, QualType type,
2128 Destroyer *destroyer,
2129 bool useEHCleanupForArray) {
2130 // If we're not in a conditional branch, we don't need to bother generating a
2131 // conditional cleanup.
2132 if (!isInConditionalBranch()) {
2133 // Push an EH-only cleanup for the object now.
2134 // FIXME: When popping normal cleanups, we need to keep this EH cleanup
2135 // around in case a temporary's destructor throws an exception.
2136 if (cleanupKind & EHCleanup)
2137 EHStack.pushCleanup<DestroyObject>(
2138 static_cast<CleanupKind>(cleanupKind & ~NormalCleanup), addr, type,
2139 destroyer, useEHCleanupForArray);
2140
2141 return pushCleanupAfterFullExprWithActiveFlag<DestroyObject>(
2142 cleanupKind, Address::invalid(), addr, type, destroyer, useEHCleanupForArray);
2143 }
2144
2145 // Otherwise, we should only destroy the object if it's been initialized.
2146 // Re-use the active flag and saved address across both the EH and end of
2147 // scope cleanups.
2148
2149 using SavedType = typename DominatingValue<Address>::saved_type;
2150 using ConditionalCleanupType =
2151 EHScopeStack::ConditionalCleanup<DestroyObject, Address, QualType,
2152 Destroyer *, bool>;
2153
2154 Address ActiveFlag = createCleanupActiveFlag();
2155 SavedType SavedAddr = saveValueInCond(addr);
2156
2157 if (cleanupKind & EHCleanup) {
2158 EHStack.pushCleanup<ConditionalCleanupType>(
2159 static_cast<CleanupKind>(cleanupKind & ~NormalCleanup), SavedAddr, type,
2160 destroyer, useEHCleanupForArray);
2161 initFullExprCleanupWithFlag(ActiveFlag);
2162 }
2163
2164 pushCleanupAfterFullExprWithActiveFlag<ConditionalCleanupType>(
2165 cleanupKind, ActiveFlag, SavedAddr, type, destroyer,
2166 useEHCleanupForArray);
2167 }
2168
2169 /// emitDestroy - Immediately perform the destruction of the given
2170 /// object.
2171 ///
2172 /// \param addr - the address of the object; a type*
2173 /// \param type - the type of the object; if an array type, all
2174 /// objects are destroyed in reverse order
2175 /// \param destroyer - the function to call to destroy individual
2176 /// elements
2177 /// \param useEHCleanupForArray - whether an EH cleanup should be
2178 /// used when destroying array elements, in case one of the
2179 /// destructions throws an exception
emitDestroy(Address addr,QualType type,Destroyer * destroyer,bool useEHCleanupForArray)2180 void CodeGenFunction::emitDestroy(Address addr, QualType type,
2181 Destroyer *destroyer,
2182 bool useEHCleanupForArray) {
2183 const ArrayType *arrayType = getContext().getAsArrayType(type);
2184 if (!arrayType)
2185 return destroyer(*this, addr, type);
2186
2187 llvm::Value *length = emitArrayLength(arrayType, type, addr);
2188
2189 CharUnits elementAlign =
2190 addr.getAlignment()
2191 .alignmentOfArrayElement(getContext().getTypeSizeInChars(type));
2192
2193 // Normally we have to check whether the array is zero-length.
2194 bool checkZeroLength = true;
2195
2196 // But if the array length is constant, we can suppress that.
2197 if (llvm::ConstantInt *constLength = dyn_cast<llvm::ConstantInt>(length)) {
2198 // ...and if it's constant zero, we can just skip the entire thing.
2199 if (constLength->isZero()) return;
2200 checkZeroLength = false;
2201 }
2202
2203 llvm::Value *begin = addr.getPointer();
2204 llvm::Value *end =
2205 Builder.CreateInBoundsGEP(addr.getElementType(), begin, length);
2206 emitArrayDestroy(begin, end, type, elementAlign, destroyer,
2207 checkZeroLength, useEHCleanupForArray);
2208 }
2209
2210 /// emitArrayDestroy - Destroys all the elements of the given array,
2211 /// beginning from last to first. The array cannot be zero-length.
2212 ///
2213 /// \param begin - a type* denoting the first element of the array
2214 /// \param end - a type* denoting one past the end of the array
2215 /// \param elementType - the element type of the array
2216 /// \param destroyer - the function to call to destroy elements
2217 /// \param useEHCleanup - whether to push an EH cleanup to destroy
2218 /// the remaining elements in case the destruction of a single
2219 /// element throws
emitArrayDestroy(llvm::Value * begin,llvm::Value * end,QualType elementType,CharUnits elementAlign,Destroyer * destroyer,bool checkZeroLength,bool useEHCleanup)2220 void CodeGenFunction::emitArrayDestroy(llvm::Value *begin,
2221 llvm::Value *end,
2222 QualType elementType,
2223 CharUnits elementAlign,
2224 Destroyer *destroyer,
2225 bool checkZeroLength,
2226 bool useEHCleanup) {
2227 assert(!elementType->isArrayType());
2228
2229 // The basic structure here is a do-while loop, because we don't
2230 // need to check for the zero-element case.
2231 llvm::BasicBlock *bodyBB = createBasicBlock("arraydestroy.body");
2232 llvm::BasicBlock *doneBB = createBasicBlock("arraydestroy.done");
2233
2234 if (checkZeroLength) {
2235 llvm::Value *isEmpty = Builder.CreateICmpEQ(begin, end,
2236 "arraydestroy.isempty");
2237 Builder.CreateCondBr(isEmpty, doneBB, bodyBB);
2238 }
2239
2240 // Enter the loop body, making that address the current address.
2241 llvm::BasicBlock *entryBB = Builder.GetInsertBlock();
2242 EmitBlock(bodyBB);
2243 llvm::PHINode *elementPast =
2244 Builder.CreatePHI(begin->getType(), 2, "arraydestroy.elementPast");
2245 elementPast->addIncoming(end, entryBB);
2246
2247 // Shift the address back by one element.
2248 llvm::Value *negativeOne = llvm::ConstantInt::get(SizeTy, -1, true);
2249 llvm::Value *element = Builder.CreateInBoundsGEP(
2250 elementPast->getType()->getPointerElementType(), elementPast, negativeOne,
2251 "arraydestroy.element");
2252
2253 if (useEHCleanup)
2254 pushRegularPartialArrayCleanup(begin, element, elementType, elementAlign,
2255 destroyer);
2256
2257 // Perform the actual destruction there.
2258 destroyer(*this, Address(element, elementAlign), elementType);
2259
2260 if (useEHCleanup)
2261 PopCleanupBlock();
2262
2263 // Check whether we've reached the end.
2264 llvm::Value *done = Builder.CreateICmpEQ(element, begin, "arraydestroy.done");
2265 Builder.CreateCondBr(done, doneBB, bodyBB);
2266 elementPast->addIncoming(element, Builder.GetInsertBlock());
2267
2268 // Done.
2269 EmitBlock(doneBB);
2270 }
2271
2272 /// Perform partial array destruction as if in an EH cleanup. Unlike
2273 /// emitArrayDestroy, the element type here may still be an array type.
emitPartialArrayDestroy(CodeGenFunction & CGF,llvm::Value * begin,llvm::Value * end,QualType type,CharUnits elementAlign,CodeGenFunction::Destroyer * destroyer)2274 static void emitPartialArrayDestroy(CodeGenFunction &CGF,
2275 llvm::Value *begin, llvm::Value *end,
2276 QualType type, CharUnits elementAlign,
2277 CodeGenFunction::Destroyer *destroyer) {
2278 // If the element type is itself an array, drill down.
2279 unsigned arrayDepth = 0;
2280 while (const ArrayType *arrayType = CGF.getContext().getAsArrayType(type)) {
2281 // VLAs don't require a GEP index to walk into.
2282 if (!isa<VariableArrayType>(arrayType))
2283 arrayDepth++;
2284 type = arrayType->getElementType();
2285 }
2286
2287 if (arrayDepth) {
2288 llvm::Value *zero = llvm::ConstantInt::get(CGF.SizeTy, 0);
2289
2290 SmallVector<llvm::Value*,4> gepIndices(arrayDepth+1, zero);
2291 llvm::Type *elemTy = begin->getType()->getPointerElementType();
2292 begin = CGF.Builder.CreateInBoundsGEP(
2293 elemTy, begin, gepIndices, "pad.arraybegin");
2294 end = CGF.Builder.CreateInBoundsGEP(
2295 elemTy, end, gepIndices, "pad.arrayend");
2296 }
2297
2298 // Destroy the array. We don't ever need an EH cleanup because we
2299 // assume that we're in an EH cleanup ourselves, so a throwing
2300 // destructor causes an immediate terminate.
2301 CGF.emitArrayDestroy(begin, end, type, elementAlign, destroyer,
2302 /*checkZeroLength*/ true, /*useEHCleanup*/ false);
2303 }
2304
2305 namespace {
2306 /// RegularPartialArrayDestroy - a cleanup which performs a partial
2307 /// array destroy where the end pointer is regularly determined and
2308 /// does not need to be loaded from a local.
2309 class RegularPartialArrayDestroy final : public EHScopeStack::Cleanup {
2310 llvm::Value *ArrayBegin;
2311 llvm::Value *ArrayEnd;
2312 QualType ElementType;
2313 CodeGenFunction::Destroyer *Destroyer;
2314 CharUnits ElementAlign;
2315 public:
RegularPartialArrayDestroy(llvm::Value * arrayBegin,llvm::Value * arrayEnd,QualType elementType,CharUnits elementAlign,CodeGenFunction::Destroyer * destroyer)2316 RegularPartialArrayDestroy(llvm::Value *arrayBegin, llvm::Value *arrayEnd,
2317 QualType elementType, CharUnits elementAlign,
2318 CodeGenFunction::Destroyer *destroyer)
2319 : ArrayBegin(arrayBegin), ArrayEnd(arrayEnd),
2320 ElementType(elementType), Destroyer(destroyer),
2321 ElementAlign(elementAlign) {}
2322
Emit(CodeGenFunction & CGF,Flags flags)2323 void Emit(CodeGenFunction &CGF, Flags flags) override {
2324 emitPartialArrayDestroy(CGF, ArrayBegin, ArrayEnd,
2325 ElementType, ElementAlign, Destroyer);
2326 }
2327 };
2328
2329 /// IrregularPartialArrayDestroy - a cleanup which performs a
2330 /// partial array destroy where the end pointer is irregularly
2331 /// determined and must be loaded from a local.
2332 class IrregularPartialArrayDestroy final : public EHScopeStack::Cleanup {
2333 llvm::Value *ArrayBegin;
2334 Address ArrayEndPointer;
2335 QualType ElementType;
2336 CodeGenFunction::Destroyer *Destroyer;
2337 CharUnits ElementAlign;
2338 public:
IrregularPartialArrayDestroy(llvm::Value * arrayBegin,Address arrayEndPointer,QualType elementType,CharUnits elementAlign,CodeGenFunction::Destroyer * destroyer)2339 IrregularPartialArrayDestroy(llvm::Value *arrayBegin,
2340 Address arrayEndPointer,
2341 QualType elementType,
2342 CharUnits elementAlign,
2343 CodeGenFunction::Destroyer *destroyer)
2344 : ArrayBegin(arrayBegin), ArrayEndPointer(arrayEndPointer),
2345 ElementType(elementType), Destroyer(destroyer),
2346 ElementAlign(elementAlign) {}
2347
Emit(CodeGenFunction & CGF,Flags flags)2348 void Emit(CodeGenFunction &CGF, Flags flags) override {
2349 llvm::Value *arrayEnd = CGF.Builder.CreateLoad(ArrayEndPointer);
2350 emitPartialArrayDestroy(CGF, ArrayBegin, arrayEnd,
2351 ElementType, ElementAlign, Destroyer);
2352 }
2353 };
2354 } // end anonymous namespace
2355
2356 /// pushIrregularPartialArrayCleanup - Push an EH cleanup to destroy
2357 /// already-constructed elements of the given array. The cleanup
2358 /// may be popped with DeactivateCleanupBlock or PopCleanupBlock.
2359 ///
2360 /// \param elementType - the immediate element type of the array;
2361 /// possibly still an array type
pushIrregularPartialArrayCleanup(llvm::Value * arrayBegin,Address arrayEndPointer,QualType elementType,CharUnits elementAlign,Destroyer * destroyer)2362 void CodeGenFunction::pushIrregularPartialArrayCleanup(llvm::Value *arrayBegin,
2363 Address arrayEndPointer,
2364 QualType elementType,
2365 CharUnits elementAlign,
2366 Destroyer *destroyer) {
2367 pushFullExprCleanup<IrregularPartialArrayDestroy>(EHCleanup,
2368 arrayBegin, arrayEndPointer,
2369 elementType, elementAlign,
2370 destroyer);
2371 }
2372
2373 /// pushRegularPartialArrayCleanup - Push an EH cleanup to destroy
2374 /// already-constructed elements of the given array. The cleanup
2375 /// may be popped with DeactivateCleanupBlock or PopCleanupBlock.
2376 ///
2377 /// \param elementType - the immediate element type of the array;
2378 /// possibly still an array type
pushRegularPartialArrayCleanup(llvm::Value * arrayBegin,llvm::Value * arrayEnd,QualType elementType,CharUnits elementAlign,Destroyer * destroyer)2379 void CodeGenFunction::pushRegularPartialArrayCleanup(llvm::Value *arrayBegin,
2380 llvm::Value *arrayEnd,
2381 QualType elementType,
2382 CharUnits elementAlign,
2383 Destroyer *destroyer) {
2384 pushFullExprCleanup<RegularPartialArrayDestroy>(EHCleanup,
2385 arrayBegin, arrayEnd,
2386 elementType, elementAlign,
2387 destroyer);
2388 }
2389
2390 /// Lazily declare the @llvm.lifetime.start intrinsic.
getLLVMLifetimeStartFn()2391 llvm::Function *CodeGenModule::getLLVMLifetimeStartFn() {
2392 if (LifetimeStartFn)
2393 return LifetimeStartFn;
2394 LifetimeStartFn = llvm::Intrinsic::getDeclaration(&getModule(),
2395 llvm::Intrinsic::lifetime_start, AllocaInt8PtrTy);
2396 return LifetimeStartFn;
2397 }
2398
2399 /// Lazily declare the @llvm.lifetime.end intrinsic.
getLLVMLifetimeEndFn()2400 llvm::Function *CodeGenModule::getLLVMLifetimeEndFn() {
2401 if (LifetimeEndFn)
2402 return LifetimeEndFn;
2403 LifetimeEndFn = llvm::Intrinsic::getDeclaration(&getModule(),
2404 llvm::Intrinsic::lifetime_end, AllocaInt8PtrTy);
2405 return LifetimeEndFn;
2406 }
2407
2408 namespace {
2409 /// A cleanup to perform a release of an object at the end of a
2410 /// function. This is used to balance out the incoming +1 of a
2411 /// ns_consumed argument when we can't reasonably do that just by
2412 /// not doing the initial retain for a __block argument.
2413 struct ConsumeARCParameter final : EHScopeStack::Cleanup {
ConsumeARCParameter__anonf2b58dfb0511::ConsumeARCParameter2414 ConsumeARCParameter(llvm::Value *param,
2415 ARCPreciseLifetime_t precise)
2416 : Param(param), Precise(precise) {}
2417
2418 llvm::Value *Param;
2419 ARCPreciseLifetime_t Precise;
2420
Emit__anonf2b58dfb0511::ConsumeARCParameter2421 void Emit(CodeGenFunction &CGF, Flags flags) override {
2422 CGF.EmitARCRelease(Param, Precise);
2423 }
2424 };
2425 } // end anonymous namespace
2426
2427 /// Emit an alloca (or GlobalValue depending on target)
2428 /// for the specified parameter and set up LocalDeclMap.
EmitParmDecl(const VarDecl & D,ParamValue Arg,unsigned ArgNo)2429 void CodeGenFunction::EmitParmDecl(const VarDecl &D, ParamValue Arg,
2430 unsigned ArgNo) {
2431 // FIXME: Why isn't ImplicitParamDecl a ParmVarDecl?
2432 assert((isa<ParmVarDecl>(D) || isa<ImplicitParamDecl>(D)) &&
2433 "Invalid argument to EmitParmDecl");
2434
2435 Arg.getAnyValue()->setName(D.getName());
2436
2437 QualType Ty = D.getType();
2438
2439 // Use better IR generation for certain implicit parameters.
2440 if (auto IPD = dyn_cast<ImplicitParamDecl>(&D)) {
2441 // The only implicit argument a block has is its literal.
2442 // This may be passed as an inalloca'ed value on Windows x86.
2443 if (BlockInfo) {
2444 llvm::Value *V = Arg.isIndirect()
2445 ? Builder.CreateLoad(Arg.getIndirectAddress())
2446 : Arg.getDirectValue();
2447 setBlockContextParameter(IPD, ArgNo, V);
2448 return;
2449 }
2450 }
2451
2452 Address DeclPtr = Address::invalid();
2453 bool DoStore = false;
2454 bool IsScalar = hasScalarEvaluationKind(Ty);
2455 // If we already have a pointer to the argument, reuse the input pointer.
2456 if (Arg.isIndirect()) {
2457 DeclPtr = Arg.getIndirectAddress();
2458 // If we have a prettier pointer type at this point, bitcast to that.
2459 unsigned AS = DeclPtr.getType()->getAddressSpace();
2460 llvm::Type *IRTy = ConvertTypeForMem(Ty)->getPointerTo(AS);
2461 if (DeclPtr.getType() != IRTy)
2462 DeclPtr = Builder.CreateBitCast(DeclPtr, IRTy, D.getName());
2463 // Indirect argument is in alloca address space, which may be different
2464 // from the default address space.
2465 auto AllocaAS = CGM.getASTAllocaAddressSpace();
2466 auto *V = DeclPtr.getPointer();
2467 auto SrcLangAS = getLangOpts().OpenCL ? LangAS::opencl_private : AllocaAS;
2468 auto DestLangAS =
2469 getLangOpts().OpenCL ? LangAS::opencl_private : LangAS::Default;
2470 if (SrcLangAS != DestLangAS) {
2471 assert(getContext().getTargetAddressSpace(SrcLangAS) ==
2472 CGM.getDataLayout().getAllocaAddrSpace());
2473 auto DestAS = getContext().getTargetAddressSpace(DestLangAS);
2474 auto *T = V->getType()->getPointerElementType()->getPointerTo(DestAS);
2475 DeclPtr = Address(getTargetHooks().performAddrSpaceCast(
2476 *this, V, SrcLangAS, DestLangAS, T, true),
2477 DeclPtr.getAlignment());
2478 }
2479
2480 // Push a destructor cleanup for this parameter if the ABI requires it.
2481 // Don't push a cleanup in a thunk for a method that will also emit a
2482 // cleanup.
2483 if (Ty->isRecordType() && !CurFuncIsThunk &&
2484 Ty->castAs<RecordType>()->getDecl()->isParamDestroyedInCallee()) {
2485 if (QualType::DestructionKind DtorKind =
2486 D.needsDestruction(getContext())) {
2487 assert((DtorKind == QualType::DK_cxx_destructor ||
2488 DtorKind == QualType::DK_nontrivial_c_struct) &&
2489 "unexpected destructor type");
2490 pushDestroy(DtorKind, DeclPtr, Ty);
2491 CalleeDestructedParamCleanups[cast<ParmVarDecl>(&D)] =
2492 EHStack.stable_begin();
2493 }
2494 }
2495 } else {
2496 // Check if the parameter address is controlled by OpenMP runtime.
2497 Address OpenMPLocalAddr =
2498 getLangOpts().OpenMP
2499 ? CGM.getOpenMPRuntime().getAddressOfLocalVariable(*this, &D)
2500 : Address::invalid();
2501 if (getLangOpts().OpenMP && OpenMPLocalAddr.isValid()) {
2502 DeclPtr = OpenMPLocalAddr;
2503 } else {
2504 // Otherwise, create a temporary to hold the value.
2505 DeclPtr = CreateMemTemp(Ty, getContext().getDeclAlign(&D),
2506 D.getName() + ".addr");
2507 }
2508 DoStore = true;
2509 }
2510
2511 llvm::Value *ArgVal = (DoStore ? Arg.getDirectValue() : nullptr);
2512
2513 LValue lv = MakeAddrLValue(DeclPtr, Ty);
2514 if (IsScalar) {
2515 Qualifiers qs = Ty.getQualifiers();
2516 if (Qualifiers::ObjCLifetime lt = qs.getObjCLifetime()) {
2517 // We honor __attribute__((ns_consumed)) for types with lifetime.
2518 // For __strong, it's handled by just skipping the initial retain;
2519 // otherwise we have to balance out the initial +1 with an extra
2520 // cleanup to do the release at the end of the function.
2521 bool isConsumed = D.hasAttr<NSConsumedAttr>();
2522
2523 // If a parameter is pseudo-strong then we can omit the implicit retain.
2524 if (D.isARCPseudoStrong()) {
2525 assert(lt == Qualifiers::OCL_Strong &&
2526 "pseudo-strong variable isn't strong?");
2527 assert(qs.hasConst() && "pseudo-strong variable should be const!");
2528 lt = Qualifiers::OCL_ExplicitNone;
2529 }
2530
2531 // Load objects passed indirectly.
2532 if (Arg.isIndirect() && !ArgVal)
2533 ArgVal = Builder.CreateLoad(DeclPtr);
2534
2535 if (lt == Qualifiers::OCL_Strong) {
2536 if (!isConsumed) {
2537 if (CGM.getCodeGenOpts().OptimizationLevel == 0) {
2538 // use objc_storeStrong(&dest, value) for retaining the
2539 // object. But first, store a null into 'dest' because
2540 // objc_storeStrong attempts to release its old value.
2541 llvm::Value *Null = CGM.EmitNullConstant(D.getType());
2542 EmitStoreOfScalar(Null, lv, /* isInitialization */ true);
2543 EmitARCStoreStrongCall(lv.getAddress(*this), ArgVal, true);
2544 DoStore = false;
2545 }
2546 else
2547 // Don't use objc_retainBlock for block pointers, because we
2548 // don't want to Block_copy something just because we got it
2549 // as a parameter.
2550 ArgVal = EmitARCRetainNonBlock(ArgVal);
2551 }
2552 } else {
2553 // Push the cleanup for a consumed parameter.
2554 if (isConsumed) {
2555 ARCPreciseLifetime_t precise = (D.hasAttr<ObjCPreciseLifetimeAttr>()
2556 ? ARCPreciseLifetime : ARCImpreciseLifetime);
2557 EHStack.pushCleanup<ConsumeARCParameter>(getARCCleanupKind(), ArgVal,
2558 precise);
2559 }
2560
2561 if (lt == Qualifiers::OCL_Weak) {
2562 EmitARCInitWeak(DeclPtr, ArgVal);
2563 DoStore = false; // The weak init is a store, no need to do two.
2564 }
2565 }
2566
2567 // Enter the cleanup scope.
2568 EmitAutoVarWithLifetime(*this, D, DeclPtr, lt);
2569 }
2570 }
2571
2572 // Store the initial value into the alloca.
2573 if (DoStore)
2574 EmitStoreOfScalar(ArgVal, lv, /* isInitialization */ true);
2575
2576 setAddrOfLocalVar(&D, DeclPtr);
2577
2578 // Emit debug info for param declarations in non-thunk functions.
2579 if (CGDebugInfo *DI = getDebugInfo()) {
2580 if (CGM.getCodeGenOpts().hasReducedDebugInfo() && !CurFuncIsThunk) {
2581 llvm::DILocalVariable *DILocalVar = DI->EmitDeclareOfArgVariable(
2582 &D, DeclPtr.getPointer(), ArgNo, Builder);
2583 if (const auto *Var = dyn_cast_or_null<ParmVarDecl>(&D))
2584 DI->getParamDbgMappings().insert({Var, DILocalVar});
2585 }
2586 }
2587
2588 if (D.hasAttr<AnnotateAttr>())
2589 EmitVarAnnotations(&D, DeclPtr.getPointer());
2590
2591 // We can only check return value nullability if all arguments to the
2592 // function satisfy their nullability preconditions. This makes it necessary
2593 // to emit null checks for args in the function body itself.
2594 if (requiresReturnValueNullabilityCheck()) {
2595 auto Nullability = Ty->getNullability(getContext());
2596 if (Nullability && *Nullability == NullabilityKind::NonNull) {
2597 SanitizerScope SanScope(this);
2598 RetValNullabilityPrecondition =
2599 Builder.CreateAnd(RetValNullabilityPrecondition,
2600 Builder.CreateIsNotNull(Arg.getAnyValue()));
2601 }
2602 }
2603 }
2604
EmitOMPDeclareReduction(const OMPDeclareReductionDecl * D,CodeGenFunction * CGF)2605 void CodeGenModule::EmitOMPDeclareReduction(const OMPDeclareReductionDecl *D,
2606 CodeGenFunction *CGF) {
2607 if (!LangOpts.OpenMP || (!LangOpts.EmitAllDecls && !D->isUsed()))
2608 return;
2609 getOpenMPRuntime().emitUserDefinedReduction(CGF, D);
2610 }
2611
EmitOMPDeclareMapper(const OMPDeclareMapperDecl * D,CodeGenFunction * CGF)2612 void CodeGenModule::EmitOMPDeclareMapper(const OMPDeclareMapperDecl *D,
2613 CodeGenFunction *CGF) {
2614 if (!LangOpts.OpenMP || LangOpts.OpenMPSimd ||
2615 (!LangOpts.EmitAllDecls && !D->isUsed()))
2616 return;
2617 getOpenMPRuntime().emitUserDefinedMapper(D, CGF);
2618 }
2619
EmitOMPRequiresDecl(const OMPRequiresDecl * D)2620 void CodeGenModule::EmitOMPRequiresDecl(const OMPRequiresDecl *D) {
2621 getOpenMPRuntime().processRequiresDirective(D);
2622 }
2623
EmitOMPAllocateDecl(const OMPAllocateDecl * D)2624 void CodeGenModule::EmitOMPAllocateDecl(const OMPAllocateDecl *D) {
2625 for (const Expr *E : D->varlists()) {
2626 const auto *DE = cast<DeclRefExpr>(E);
2627 const auto *VD = cast<VarDecl>(DE->getDecl());
2628
2629 // Skip all but globals.
2630 if (!VD->hasGlobalStorage())
2631 continue;
2632
2633 // Check if the global has been materialized yet or not. If not, we are done
2634 // as any later generation will utilize the OMPAllocateDeclAttr. However, if
2635 // we already emitted the global we might have done so before the
2636 // OMPAllocateDeclAttr was attached, leading to the wrong address space
2637 // (potentially). While not pretty, common practise is to remove the old IR
2638 // global and generate a new one, so we do that here too. Uses are replaced
2639 // properly.
2640 StringRef MangledName = getMangledName(VD);
2641 llvm::GlobalValue *Entry = GetGlobalValue(MangledName);
2642 if (!Entry)
2643 continue;
2644
2645 // We can also keep the existing global if the address space is what we
2646 // expect it to be, if not, it is replaced.
2647 QualType ASTTy = VD->getType();
2648 clang::LangAS GVAS = GetGlobalVarAddressSpace(VD);
2649 auto TargetAS = getContext().getTargetAddressSpace(GVAS);
2650 if (Entry->getType()->getAddressSpace() == TargetAS)
2651 continue;
2652
2653 // Make a new global with the correct type / address space.
2654 llvm::Type *Ty = getTypes().ConvertTypeForMem(ASTTy);
2655 llvm::PointerType *PTy = llvm::PointerType::get(Ty, TargetAS);
2656
2657 // Replace all uses of the old global with a cast. Since we mutate the type
2658 // in place we neeed an intermediate that takes the spot of the old entry
2659 // until we can create the cast.
2660 llvm::GlobalVariable *DummyGV = new llvm::GlobalVariable(
2661 getModule(), Entry->getValueType(), false,
2662 llvm::GlobalValue::CommonLinkage, nullptr, "dummy", nullptr,
2663 llvm::GlobalVariable::NotThreadLocal, Entry->getAddressSpace());
2664 Entry->replaceAllUsesWith(DummyGV);
2665
2666 Entry->mutateType(PTy);
2667 llvm::Constant *NewPtrForOldDecl =
2668 llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(
2669 Entry, DummyGV->getType());
2670
2671 // Now we have a casted version of the changed global, the dummy can be
2672 // replaced and deleted.
2673 DummyGV->replaceAllUsesWith(NewPtrForOldDecl);
2674 DummyGV->eraseFromParent();
2675 }
2676 }
2677