1 //===--- CGDecl.cpp - Emit LLVM Code for declarations ---------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This contains code to emit Decl nodes as LLVM code.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #include "CGBlocks.h"
14 #include "CGCXXABI.h"
15 #include "CGCleanup.h"
16 #include "CGDebugInfo.h"
17 #include "CGOpenCLRuntime.h"
18 #include "CGOpenMPRuntime.h"
19 #include "CodeGenFunction.h"
20 #include "CodeGenModule.h"
21 #include "ConstantEmitter.h"
22 #include "PatternInit.h"
23 #include "TargetInfo.h"
24 #include "clang/AST/ASTContext.h"
25 #include "clang/AST/Attr.h"
26 #include "clang/AST/CharUnits.h"
27 #include "clang/AST/Decl.h"
28 #include "clang/AST/DeclObjC.h"
29 #include "clang/AST/DeclOpenMP.h"
30 #include "clang/Basic/CodeGenOptions.h"
31 #include "clang/Basic/SourceManager.h"
32 #include "clang/Basic/TargetInfo.h"
33 #include "clang/CodeGen/CGFunctionInfo.h"
34 #include "clang/Sema/Sema.h"
35 #include "llvm/Analysis/ValueTracking.h"
36 #include "llvm/IR/DataLayout.h"
37 #include "llvm/IR/GlobalVariable.h"
38 #include "llvm/IR/Intrinsics.h"
39 #include "llvm/IR/Type.h"
40
41 using namespace clang;
42 using namespace CodeGen;
43
44 static_assert(clang::Sema::MaximumAlignment <= llvm::Value::MaximumAlignment,
45 "Clang max alignment greater than what LLVM supports?");
46
EmitDecl(const Decl & D)47 void CodeGenFunction::EmitDecl(const Decl &D) {
48 switch (D.getKind()) {
49 case Decl::BuiltinTemplate:
50 case Decl::TranslationUnit:
51 case Decl::ExternCContext:
52 case Decl::Namespace:
53 case Decl::UnresolvedUsingTypename:
54 case Decl::ClassTemplateSpecialization:
55 case Decl::ClassTemplatePartialSpecialization:
56 case Decl::VarTemplateSpecialization:
57 case Decl::VarTemplatePartialSpecialization:
58 case Decl::TemplateTypeParm:
59 case Decl::UnresolvedUsingValue:
60 case Decl::NonTypeTemplateParm:
61 case Decl::CXXDeductionGuide:
62 case Decl::CXXMethod:
63 case Decl::CXXConstructor:
64 case Decl::CXXDestructor:
65 case Decl::CXXConversion:
66 case Decl::Field:
67 case Decl::MSProperty:
68 case Decl::IndirectField:
69 case Decl::ObjCIvar:
70 case Decl::ObjCAtDefsField:
71 case Decl::ParmVar:
72 case Decl::ImplicitParam:
73 case Decl::ClassTemplate:
74 case Decl::VarTemplate:
75 case Decl::FunctionTemplate:
76 case Decl::TypeAliasTemplate:
77 case Decl::TemplateTemplateParm:
78 case Decl::ObjCMethod:
79 case Decl::ObjCCategory:
80 case Decl::ObjCProtocol:
81 case Decl::ObjCInterface:
82 case Decl::ObjCCategoryImpl:
83 case Decl::ObjCImplementation:
84 case Decl::ObjCProperty:
85 case Decl::ObjCCompatibleAlias:
86 case Decl::PragmaComment:
87 case Decl::PragmaDetectMismatch:
88 case Decl::AccessSpec:
89 case Decl::LinkageSpec:
90 case Decl::Export:
91 case Decl::ObjCPropertyImpl:
92 case Decl::FileScopeAsm:
93 case Decl::Friend:
94 case Decl::FriendTemplate:
95 case Decl::Block:
96 case Decl::Captured:
97 case Decl::ClassScopeFunctionSpecialization:
98 case Decl::UsingShadow:
99 case Decl::ConstructorUsingShadow:
100 case Decl::ObjCTypeParam:
101 case Decl::Binding:
102 llvm_unreachable("Declaration should not be in declstmts!");
103 case Decl::Function: // void X();
104 case Decl::Record: // struct/union/class X;
105 case Decl::Enum: // enum X;
106 case Decl::EnumConstant: // enum ? { X = ? }
107 case Decl::CXXRecord: // struct/union/class X; [C++]
108 case Decl::StaticAssert: // static_assert(X, ""); [C++0x]
109 case Decl::Label: // __label__ x;
110 case Decl::Import:
111 case Decl::MSGuid: // __declspec(uuid("..."))
112 case Decl::OMPThreadPrivate:
113 case Decl::OMPAllocate:
114 case Decl::OMPCapturedExpr:
115 case Decl::OMPRequires:
116 case Decl::Empty:
117 case Decl::Concept:
118 case Decl::LifetimeExtendedTemporary:
119 case Decl::RequiresExprBody:
120 // None of these decls require codegen support.
121 return;
122
123 case Decl::NamespaceAlias:
124 if (CGDebugInfo *DI = getDebugInfo())
125 DI->EmitNamespaceAlias(cast<NamespaceAliasDecl>(D));
126 return;
127 case Decl::Using: // using X; [C++]
128 if (CGDebugInfo *DI = getDebugInfo())
129 DI->EmitUsingDecl(cast<UsingDecl>(D));
130 return;
131 case Decl::UsingPack:
132 for (auto *Using : cast<UsingPackDecl>(D).expansions())
133 EmitDecl(*Using);
134 return;
135 case Decl::UsingDirective: // using namespace X; [C++]
136 if (CGDebugInfo *DI = getDebugInfo())
137 DI->EmitUsingDirective(cast<UsingDirectiveDecl>(D));
138 return;
139 case Decl::Var:
140 case Decl::Decomposition: {
141 const VarDecl &VD = cast<VarDecl>(D);
142 assert(VD.isLocalVarDecl() &&
143 "Should not see file-scope variables inside a function!");
144 EmitVarDecl(VD);
145 if (auto *DD = dyn_cast<DecompositionDecl>(&VD))
146 for (auto *B : DD->bindings())
147 if (auto *HD = B->getHoldingVar())
148 EmitVarDecl(*HD);
149 return;
150 }
151
152 case Decl::OMPDeclareReduction:
153 return CGM.EmitOMPDeclareReduction(cast<OMPDeclareReductionDecl>(&D), this);
154
155 case Decl::OMPDeclareMapper:
156 return CGM.EmitOMPDeclareMapper(cast<OMPDeclareMapperDecl>(&D), this);
157
158 case Decl::Typedef: // typedef int X;
159 case Decl::TypeAlias: { // using X = int; [C++0x]
160 const TypedefNameDecl &TD = cast<TypedefNameDecl>(D);
161 QualType Ty = TD.getUnderlyingType();
162
163 if (Ty->isVariablyModifiedType())
164 EmitVariablyModifiedType(Ty);
165
166 return;
167 }
168 }
169 }
170
171 /// EmitVarDecl - This method handles emission of any variable declaration
172 /// inside a function, including static vars etc.
EmitVarDecl(const VarDecl & D)173 void CodeGenFunction::EmitVarDecl(const VarDecl &D) {
174 if (D.hasExternalStorage())
175 // Don't emit it now, allow it to be emitted lazily on its first use.
176 return;
177
178 // Some function-scope variable does not have static storage but still
179 // needs to be emitted like a static variable, e.g. a function-scope
180 // variable in constant address space in OpenCL.
181 if (D.getStorageDuration() != SD_Automatic) {
182 // Static sampler variables translated to function calls.
183 if (D.getType()->isSamplerT())
184 return;
185
186 llvm::GlobalValue::LinkageTypes Linkage =
187 CGM.getLLVMLinkageVarDefinition(&D, /*IsConstant=*/false);
188
189 // FIXME: We need to force the emission/use of a guard variable for
190 // some variables even if we can constant-evaluate them because
191 // we can't guarantee every translation unit will constant-evaluate them.
192
193 return EmitStaticVarDecl(D, Linkage);
194 }
195
196 if (D.getType().getAddressSpace() == LangAS::opencl_local)
197 return CGM.getOpenCLRuntime().EmitWorkGroupLocalVarDecl(*this, D);
198
199 assert(D.hasLocalStorage());
200 return EmitAutoVarDecl(D);
201 }
202
getStaticDeclName(CodeGenModule & CGM,const VarDecl & D)203 static std::string getStaticDeclName(CodeGenModule &CGM, const VarDecl &D) {
204 if (CGM.getLangOpts().CPlusPlus)
205 return CGM.getMangledName(&D).str();
206
207 // If this isn't C++, we don't need a mangled name, just a pretty one.
208 assert(!D.isExternallyVisible() && "name shouldn't matter");
209 std::string ContextName;
210 const DeclContext *DC = D.getDeclContext();
211 if (auto *CD = dyn_cast<CapturedDecl>(DC))
212 DC = cast<DeclContext>(CD->getNonClosureContext());
213 if (const auto *FD = dyn_cast<FunctionDecl>(DC))
214 ContextName = std::string(CGM.getMangledName(FD));
215 else if (const auto *BD = dyn_cast<BlockDecl>(DC))
216 ContextName = std::string(CGM.getBlockMangledName(GlobalDecl(), BD));
217 else if (const auto *OMD = dyn_cast<ObjCMethodDecl>(DC))
218 ContextName = OMD->getSelector().getAsString();
219 else
220 llvm_unreachable("Unknown context for static var decl");
221
222 ContextName += "." + D.getNameAsString();
223 return ContextName;
224 }
225
getOrCreateStaticVarDecl(const VarDecl & D,llvm::GlobalValue::LinkageTypes Linkage)226 llvm::Constant *CodeGenModule::getOrCreateStaticVarDecl(
227 const VarDecl &D, llvm::GlobalValue::LinkageTypes Linkage) {
228 // In general, we don't always emit static var decls once before we reference
229 // them. It is possible to reference them before emitting the function that
230 // contains them, and it is possible to emit the containing function multiple
231 // times.
232 if (llvm::Constant *ExistingGV = StaticLocalDeclMap[&D])
233 return ExistingGV;
234
235 QualType Ty = D.getType();
236 assert(Ty->isConstantSizeType() && "VLAs can't be static");
237
238 // Use the label if the variable is renamed with the asm-label extension.
239 std::string Name;
240 if (D.hasAttr<AsmLabelAttr>())
241 Name = std::string(getMangledName(&D));
242 else
243 Name = getStaticDeclName(*this, D);
244
245 llvm::Type *LTy = getTypes().ConvertTypeForMem(Ty);
246 LangAS AS = GetGlobalVarAddressSpace(&D);
247 unsigned TargetAS = getContext().getTargetAddressSpace(AS);
248
249 // OpenCL variables in local address space and CUDA shared
250 // variables cannot have an initializer.
251 llvm::Constant *Init = nullptr;
252 if (Ty.getAddressSpace() == LangAS::opencl_local ||
253 D.hasAttr<CUDASharedAttr>() || D.hasAttr<LoaderUninitializedAttr>())
254 Init = llvm::UndefValue::get(LTy);
255 else
256 Init = EmitNullConstant(Ty);
257
258 llvm::GlobalVariable *GV = new llvm::GlobalVariable(
259 getModule(), LTy, Ty.isConstant(getContext()), Linkage, Init, Name,
260 nullptr, llvm::GlobalVariable::NotThreadLocal, TargetAS);
261 GV->setAlignment(getContext().getDeclAlign(&D).getAsAlign());
262
263 if (supportsCOMDAT() && GV->isWeakForLinker())
264 GV->setComdat(TheModule.getOrInsertComdat(GV->getName()));
265
266 if (D.getTLSKind())
267 setTLSMode(GV, D);
268
269 setGVProperties(GV, &D);
270
271 // Make sure the result is of the correct type.
272 LangAS ExpectedAS = Ty.getAddressSpace();
273 llvm::Constant *Addr = GV;
274 if (AS != ExpectedAS) {
275 Addr = getTargetCodeGenInfo().performAddrSpaceCast(
276 *this, GV, AS, ExpectedAS,
277 LTy->getPointerTo(getContext().getTargetAddressSpace(ExpectedAS)));
278 }
279
280 setStaticLocalDeclAddress(&D, Addr);
281
282 // Ensure that the static local gets initialized by making sure the parent
283 // function gets emitted eventually.
284 const Decl *DC = cast<Decl>(D.getDeclContext());
285
286 // We can't name blocks or captured statements directly, so try to emit their
287 // parents.
288 if (isa<BlockDecl>(DC) || isa<CapturedDecl>(DC)) {
289 DC = DC->getNonClosureContext();
290 // FIXME: Ensure that global blocks get emitted.
291 if (!DC)
292 return Addr;
293 }
294
295 GlobalDecl GD;
296 if (const auto *CD = dyn_cast<CXXConstructorDecl>(DC))
297 GD = GlobalDecl(CD, Ctor_Base);
298 else if (const auto *DD = dyn_cast<CXXDestructorDecl>(DC))
299 GD = GlobalDecl(DD, Dtor_Base);
300 else if (const auto *FD = dyn_cast<FunctionDecl>(DC))
301 GD = GlobalDecl(FD);
302 else {
303 // Don't do anything for Obj-C method decls or global closures. We should
304 // never defer them.
305 assert(isa<ObjCMethodDecl>(DC) && "unexpected parent code decl");
306 }
307 if (GD.getDecl()) {
308 // Disable emission of the parent function for the OpenMP device codegen.
309 CGOpenMPRuntime::DisableAutoDeclareTargetRAII NoDeclTarget(*this);
310 (void)GetAddrOfGlobal(GD);
311 }
312
313 return Addr;
314 }
315
316 /// AddInitializerToStaticVarDecl - Add the initializer for 'D' to the
317 /// global variable that has already been created for it. If the initializer
318 /// has a different type than GV does, this may free GV and return a different
319 /// one. Otherwise it just returns GV.
320 llvm::GlobalVariable *
AddInitializerToStaticVarDecl(const VarDecl & D,llvm::GlobalVariable * GV)321 CodeGenFunction::AddInitializerToStaticVarDecl(const VarDecl &D,
322 llvm::GlobalVariable *GV) {
323 ConstantEmitter emitter(*this);
324 llvm::Constant *Init = emitter.tryEmitForInitializer(D);
325
326 // If constant emission failed, then this should be a C++ static
327 // initializer.
328 if (!Init) {
329 if (!getLangOpts().CPlusPlus)
330 CGM.ErrorUnsupported(D.getInit(), "constant l-value expression");
331 else if (HaveInsertPoint()) {
332 // Since we have a static initializer, this global variable can't
333 // be constant.
334 GV->setConstant(false);
335
336 EmitCXXGuardedInit(D, GV, /*PerformInit*/true);
337 }
338 return GV;
339 }
340
341 // The initializer may differ in type from the global. Rewrite
342 // the global to match the initializer. (We have to do this
343 // because some types, like unions, can't be completely represented
344 // in the LLVM type system.)
345 if (GV->getValueType() != Init->getType()) {
346 llvm::GlobalVariable *OldGV = GV;
347
348 GV = new llvm::GlobalVariable(CGM.getModule(), Init->getType(),
349 OldGV->isConstant(),
350 OldGV->getLinkage(), Init, "",
351 /*InsertBefore*/ OldGV,
352 OldGV->getThreadLocalMode(),
353 CGM.getContext().getTargetAddressSpace(D.getType()));
354 GV->setVisibility(OldGV->getVisibility());
355 GV->setDSOLocal(OldGV->isDSOLocal());
356 GV->setComdat(OldGV->getComdat());
357
358 // Steal the name of the old global
359 GV->takeName(OldGV);
360
361 // Replace all uses of the old global with the new global
362 llvm::Constant *NewPtrForOldDecl =
363 llvm::ConstantExpr::getBitCast(GV, OldGV->getType());
364 OldGV->replaceAllUsesWith(NewPtrForOldDecl);
365
366 // Erase the old global, since it is no longer used.
367 OldGV->eraseFromParent();
368 }
369
370 GV->setConstant(CGM.isTypeConstant(D.getType(), true));
371 GV->setInitializer(Init);
372
373 emitter.finalize(GV);
374
375 if (D.needsDestruction(getContext()) == QualType::DK_cxx_destructor &&
376 HaveInsertPoint()) {
377 // We have a constant initializer, but a nontrivial destructor. We still
378 // need to perform a guarded "initialization" in order to register the
379 // destructor.
380 EmitCXXGuardedInit(D, GV, /*PerformInit*/false);
381 }
382
383 return GV;
384 }
385
EmitStaticVarDecl(const VarDecl & D,llvm::GlobalValue::LinkageTypes Linkage)386 void CodeGenFunction::EmitStaticVarDecl(const VarDecl &D,
387 llvm::GlobalValue::LinkageTypes Linkage) {
388 // Check to see if we already have a global variable for this
389 // declaration. This can happen when double-emitting function
390 // bodies, e.g. with complete and base constructors.
391 llvm::Constant *addr = CGM.getOrCreateStaticVarDecl(D, Linkage);
392 CharUnits alignment = getContext().getDeclAlign(&D);
393
394 // Store into LocalDeclMap before generating initializer to handle
395 // circular references.
396 setAddrOfLocalVar(&D, Address(addr, alignment));
397
398 // We can't have a VLA here, but we can have a pointer to a VLA,
399 // even though that doesn't really make any sense.
400 // Make sure to evaluate VLA bounds now so that we have them for later.
401 if (D.getType()->isVariablyModifiedType())
402 EmitVariablyModifiedType(D.getType());
403
404 // Save the type in case adding the initializer forces a type change.
405 llvm::Type *expectedType = addr->getType();
406
407 llvm::GlobalVariable *var =
408 cast<llvm::GlobalVariable>(addr->stripPointerCasts());
409
410 // CUDA's local and local static __shared__ variables should not
411 // have any non-empty initializers. This is ensured by Sema.
412 // Whatever initializer such variable may have when it gets here is
413 // a no-op and should not be emitted.
414 bool isCudaSharedVar = getLangOpts().CUDA && getLangOpts().CUDAIsDevice &&
415 D.hasAttr<CUDASharedAttr>();
416 // If this value has an initializer, emit it.
417 if (D.getInit() && !isCudaSharedVar)
418 var = AddInitializerToStaticVarDecl(D, var);
419
420 var->setAlignment(alignment.getAsAlign());
421
422 if (D.hasAttr<AnnotateAttr>())
423 CGM.AddGlobalAnnotations(&D, var);
424
425 if (auto *SA = D.getAttr<PragmaClangBSSSectionAttr>())
426 var->addAttribute("bss-section", SA->getName());
427 if (auto *SA = D.getAttr<PragmaClangDataSectionAttr>())
428 var->addAttribute("data-section", SA->getName());
429 if (auto *SA = D.getAttr<PragmaClangRodataSectionAttr>())
430 var->addAttribute("rodata-section", SA->getName());
431 if (auto *SA = D.getAttr<PragmaClangRelroSectionAttr>())
432 var->addAttribute("relro-section", SA->getName());
433
434 if (const SectionAttr *SA = D.getAttr<SectionAttr>())
435 var->setSection(SA->getName());
436
437 if (D.hasAttr<UsedAttr>())
438 CGM.addUsedGlobal(var);
439
440 // We may have to cast the constant because of the initializer
441 // mismatch above.
442 //
443 // FIXME: It is really dangerous to store this in the map; if anyone
444 // RAUW's the GV uses of this constant will be invalid.
445 llvm::Constant *castedAddr =
446 llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(var, expectedType);
447 if (var != castedAddr)
448 LocalDeclMap.find(&D)->second = Address(castedAddr, alignment);
449 CGM.setStaticLocalDeclAddress(&D, castedAddr);
450
451 CGM.getSanitizerMetadata()->reportGlobalToASan(var, D);
452
453 // Emit global variable debug descriptor for static vars.
454 CGDebugInfo *DI = getDebugInfo();
455 if (DI && CGM.getCodeGenOpts().hasReducedDebugInfo()) {
456 DI->setLocation(D.getLocation());
457 DI->EmitGlobalVariable(var, &D);
458 }
459 }
460
461 namespace {
462 struct DestroyObject final : EHScopeStack::Cleanup {
DestroyObject__anona90623d20111::DestroyObject463 DestroyObject(Address addr, QualType type,
464 CodeGenFunction::Destroyer *destroyer,
465 bool useEHCleanupForArray)
466 : addr(addr), type(type), destroyer(destroyer),
467 useEHCleanupForArray(useEHCleanupForArray) {}
468
469 Address addr;
470 QualType type;
471 CodeGenFunction::Destroyer *destroyer;
472 bool useEHCleanupForArray;
473
Emit__anona90623d20111::DestroyObject474 void Emit(CodeGenFunction &CGF, Flags flags) override {
475 // Don't use an EH cleanup recursively from an EH cleanup.
476 bool useEHCleanupForArray =
477 flags.isForNormalCleanup() && this->useEHCleanupForArray;
478
479 CGF.emitDestroy(addr, type, destroyer, useEHCleanupForArray);
480 }
481 };
482
483 template <class Derived>
484 struct DestroyNRVOVariable : EHScopeStack::Cleanup {
DestroyNRVOVariable__anona90623d20111::DestroyNRVOVariable485 DestroyNRVOVariable(Address addr, QualType type, llvm::Value *NRVOFlag)
486 : NRVOFlag(NRVOFlag), Loc(addr), Ty(type) {}
487
488 llvm::Value *NRVOFlag;
489 Address Loc;
490 QualType Ty;
491
Emit__anona90623d20111::DestroyNRVOVariable492 void Emit(CodeGenFunction &CGF, Flags flags) override {
493 // Along the exceptions path we always execute the dtor.
494 bool NRVO = flags.isForNormalCleanup() && NRVOFlag;
495
496 llvm::BasicBlock *SkipDtorBB = nullptr;
497 if (NRVO) {
498 // If we exited via NRVO, we skip the destructor call.
499 llvm::BasicBlock *RunDtorBB = CGF.createBasicBlock("nrvo.unused");
500 SkipDtorBB = CGF.createBasicBlock("nrvo.skipdtor");
501 llvm::Value *DidNRVO =
502 CGF.Builder.CreateFlagLoad(NRVOFlag, "nrvo.val");
503 CGF.Builder.CreateCondBr(DidNRVO, SkipDtorBB, RunDtorBB);
504 CGF.EmitBlock(RunDtorBB);
505 }
506
507 static_cast<Derived *>(this)->emitDestructorCall(CGF);
508
509 if (NRVO) CGF.EmitBlock(SkipDtorBB);
510 }
511
512 virtual ~DestroyNRVOVariable() = default;
513 };
514
515 struct DestroyNRVOVariableCXX final
516 : DestroyNRVOVariable<DestroyNRVOVariableCXX> {
DestroyNRVOVariableCXX__anona90623d20111::DestroyNRVOVariableCXX517 DestroyNRVOVariableCXX(Address addr, QualType type,
518 const CXXDestructorDecl *Dtor, llvm::Value *NRVOFlag)
519 : DestroyNRVOVariable<DestroyNRVOVariableCXX>(addr, type, NRVOFlag),
520 Dtor(Dtor) {}
521
522 const CXXDestructorDecl *Dtor;
523
emitDestructorCall__anona90623d20111::DestroyNRVOVariableCXX524 void emitDestructorCall(CodeGenFunction &CGF) {
525 CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete,
526 /*ForVirtualBase=*/false,
527 /*Delegating=*/false, Loc, Ty);
528 }
529 };
530
531 struct DestroyNRVOVariableC final
532 : DestroyNRVOVariable<DestroyNRVOVariableC> {
DestroyNRVOVariableC__anona90623d20111::DestroyNRVOVariableC533 DestroyNRVOVariableC(Address addr, llvm::Value *NRVOFlag, QualType Ty)
534 : DestroyNRVOVariable<DestroyNRVOVariableC>(addr, Ty, NRVOFlag) {}
535
emitDestructorCall__anona90623d20111::DestroyNRVOVariableC536 void emitDestructorCall(CodeGenFunction &CGF) {
537 CGF.destroyNonTrivialCStruct(CGF, Loc, Ty);
538 }
539 };
540
541 struct CallStackRestore final : EHScopeStack::Cleanup {
542 Address Stack;
CallStackRestore__anona90623d20111::CallStackRestore543 CallStackRestore(Address Stack) : Stack(Stack) {}
Emit__anona90623d20111::CallStackRestore544 void Emit(CodeGenFunction &CGF, Flags flags) override {
545 llvm::Value *V = CGF.Builder.CreateLoad(Stack);
546 llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stackrestore);
547 CGF.Builder.CreateCall(F, V);
548 }
549 };
550
551 struct ExtendGCLifetime final : EHScopeStack::Cleanup {
552 const VarDecl &Var;
ExtendGCLifetime__anona90623d20111::ExtendGCLifetime553 ExtendGCLifetime(const VarDecl *var) : Var(*var) {}
554
Emit__anona90623d20111::ExtendGCLifetime555 void Emit(CodeGenFunction &CGF, Flags flags) override {
556 // Compute the address of the local variable, in case it's a
557 // byref or something.
558 DeclRefExpr DRE(CGF.getContext(), const_cast<VarDecl *>(&Var), false,
559 Var.getType(), VK_LValue, SourceLocation());
560 llvm::Value *value = CGF.EmitLoadOfScalar(CGF.EmitDeclRefLValue(&DRE),
561 SourceLocation());
562 CGF.EmitExtendGCLifetime(value);
563 }
564 };
565
566 struct CallCleanupFunction final : EHScopeStack::Cleanup {
567 llvm::Constant *CleanupFn;
568 const CGFunctionInfo &FnInfo;
569 const VarDecl &Var;
570
CallCleanupFunction__anona90623d20111::CallCleanupFunction571 CallCleanupFunction(llvm::Constant *CleanupFn, const CGFunctionInfo *Info,
572 const VarDecl *Var)
573 : CleanupFn(CleanupFn), FnInfo(*Info), Var(*Var) {}
574
Emit__anona90623d20111::CallCleanupFunction575 void Emit(CodeGenFunction &CGF, Flags flags) override {
576 DeclRefExpr DRE(CGF.getContext(), const_cast<VarDecl *>(&Var), false,
577 Var.getType(), VK_LValue, SourceLocation());
578 // Compute the address of the local variable, in case it's a byref
579 // or something.
580 llvm::Value *Addr = CGF.EmitDeclRefLValue(&DRE).getPointer(CGF);
581
582 // In some cases, the type of the function argument will be different from
583 // the type of the pointer. An example of this is
584 // void f(void* arg);
585 // __attribute__((cleanup(f))) void *g;
586 //
587 // To fix this we insert a bitcast here.
588 QualType ArgTy = FnInfo.arg_begin()->type;
589 llvm::Value *Arg =
590 CGF.Builder.CreateBitCast(Addr, CGF.ConvertType(ArgTy));
591
592 CallArgList Args;
593 Args.add(RValue::get(Arg),
594 CGF.getContext().getPointerType(Var.getType()));
595 auto Callee = CGCallee::forDirect(CleanupFn);
596 CGF.EmitCall(FnInfo, Callee, ReturnValueSlot(), Args);
597 }
598 };
599 } // end anonymous namespace
600
601 /// EmitAutoVarWithLifetime - Does the setup required for an automatic
602 /// variable with lifetime.
EmitAutoVarWithLifetime(CodeGenFunction & CGF,const VarDecl & var,Address addr,Qualifiers::ObjCLifetime lifetime)603 static void EmitAutoVarWithLifetime(CodeGenFunction &CGF, const VarDecl &var,
604 Address addr,
605 Qualifiers::ObjCLifetime lifetime) {
606 switch (lifetime) {
607 case Qualifiers::OCL_None:
608 llvm_unreachable("present but none");
609
610 case Qualifiers::OCL_ExplicitNone:
611 // nothing to do
612 break;
613
614 case Qualifiers::OCL_Strong: {
615 CodeGenFunction::Destroyer *destroyer =
616 (var.hasAttr<ObjCPreciseLifetimeAttr>()
617 ? CodeGenFunction::destroyARCStrongPrecise
618 : CodeGenFunction::destroyARCStrongImprecise);
619
620 CleanupKind cleanupKind = CGF.getARCCleanupKind();
621 CGF.pushDestroy(cleanupKind, addr, var.getType(), destroyer,
622 cleanupKind & EHCleanup);
623 break;
624 }
625 case Qualifiers::OCL_Autoreleasing:
626 // nothing to do
627 break;
628
629 case Qualifiers::OCL_Weak:
630 // __weak objects always get EH cleanups; otherwise, exceptions
631 // could cause really nasty crashes instead of mere leaks.
632 CGF.pushDestroy(NormalAndEHCleanup, addr, var.getType(),
633 CodeGenFunction::destroyARCWeak,
634 /*useEHCleanup*/ true);
635 break;
636 }
637 }
638
isAccessedBy(const VarDecl & var,const Stmt * s)639 static bool isAccessedBy(const VarDecl &var, const Stmt *s) {
640 if (const Expr *e = dyn_cast<Expr>(s)) {
641 // Skip the most common kinds of expressions that make
642 // hierarchy-walking expensive.
643 s = e = e->IgnoreParenCasts();
644
645 if (const DeclRefExpr *ref = dyn_cast<DeclRefExpr>(e))
646 return (ref->getDecl() == &var);
647 if (const BlockExpr *be = dyn_cast<BlockExpr>(e)) {
648 const BlockDecl *block = be->getBlockDecl();
649 for (const auto &I : block->captures()) {
650 if (I.getVariable() == &var)
651 return true;
652 }
653 }
654 }
655
656 for (const Stmt *SubStmt : s->children())
657 // SubStmt might be null; as in missing decl or conditional of an if-stmt.
658 if (SubStmt && isAccessedBy(var, SubStmt))
659 return true;
660
661 return false;
662 }
663
isAccessedBy(const ValueDecl * decl,const Expr * e)664 static bool isAccessedBy(const ValueDecl *decl, const Expr *e) {
665 if (!decl) return false;
666 if (!isa<VarDecl>(decl)) return false;
667 const VarDecl *var = cast<VarDecl>(decl);
668 return isAccessedBy(*var, e);
669 }
670
tryEmitARCCopyWeakInit(CodeGenFunction & CGF,const LValue & destLV,const Expr * init)671 static bool tryEmitARCCopyWeakInit(CodeGenFunction &CGF,
672 const LValue &destLV, const Expr *init) {
673 bool needsCast = false;
674
675 while (auto castExpr = dyn_cast<CastExpr>(init->IgnoreParens())) {
676 switch (castExpr->getCastKind()) {
677 // Look through casts that don't require representation changes.
678 case CK_NoOp:
679 case CK_BitCast:
680 case CK_BlockPointerToObjCPointerCast:
681 needsCast = true;
682 break;
683
684 // If we find an l-value to r-value cast from a __weak variable,
685 // emit this operation as a copy or move.
686 case CK_LValueToRValue: {
687 const Expr *srcExpr = castExpr->getSubExpr();
688 if (srcExpr->getType().getObjCLifetime() != Qualifiers::OCL_Weak)
689 return false;
690
691 // Emit the source l-value.
692 LValue srcLV = CGF.EmitLValue(srcExpr);
693
694 // Handle a formal type change to avoid asserting.
695 auto srcAddr = srcLV.getAddress(CGF);
696 if (needsCast) {
697 srcAddr = CGF.Builder.CreateElementBitCast(
698 srcAddr, destLV.getAddress(CGF).getElementType());
699 }
700
701 // If it was an l-value, use objc_copyWeak.
702 if (srcExpr->getValueKind() == VK_LValue) {
703 CGF.EmitARCCopyWeak(destLV.getAddress(CGF), srcAddr);
704 } else {
705 assert(srcExpr->getValueKind() == VK_XValue);
706 CGF.EmitARCMoveWeak(destLV.getAddress(CGF), srcAddr);
707 }
708 return true;
709 }
710
711 // Stop at anything else.
712 default:
713 return false;
714 }
715
716 init = castExpr->getSubExpr();
717 }
718 return false;
719 }
720
drillIntoBlockVariable(CodeGenFunction & CGF,LValue & lvalue,const VarDecl * var)721 static void drillIntoBlockVariable(CodeGenFunction &CGF,
722 LValue &lvalue,
723 const VarDecl *var) {
724 lvalue.setAddress(CGF.emitBlockByrefAddress(lvalue.getAddress(CGF), var));
725 }
726
EmitNullabilityCheck(LValue LHS,llvm::Value * RHS,SourceLocation Loc)727 void CodeGenFunction::EmitNullabilityCheck(LValue LHS, llvm::Value *RHS,
728 SourceLocation Loc) {
729 if (!SanOpts.has(SanitizerKind::NullabilityAssign))
730 return;
731
732 auto Nullability = LHS.getType()->getNullability(getContext());
733 if (!Nullability || *Nullability != NullabilityKind::NonNull)
734 return;
735
736 // Check if the right hand side of the assignment is nonnull, if the left
737 // hand side must be nonnull.
738 SanitizerScope SanScope(this);
739 llvm::Value *IsNotNull = Builder.CreateIsNotNull(RHS);
740 llvm::Constant *StaticData[] = {
741 EmitCheckSourceLocation(Loc), EmitCheckTypeDescriptor(LHS.getType()),
742 llvm::ConstantInt::get(Int8Ty, 0), // The LogAlignment info is unused.
743 llvm::ConstantInt::get(Int8Ty, TCK_NonnullAssign)};
744 EmitCheck({{IsNotNull, SanitizerKind::NullabilityAssign}},
745 SanitizerHandler::TypeMismatch, StaticData, RHS);
746 }
747
EmitScalarInit(const Expr * init,const ValueDecl * D,LValue lvalue,bool capturedByInit)748 void CodeGenFunction::EmitScalarInit(const Expr *init, const ValueDecl *D,
749 LValue lvalue, bool capturedByInit) {
750 Qualifiers::ObjCLifetime lifetime = lvalue.getObjCLifetime();
751 if (!lifetime) {
752 llvm::Value *value = EmitScalarExpr(init);
753 if (capturedByInit)
754 drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D));
755 EmitNullabilityCheck(lvalue, value, init->getExprLoc());
756 EmitStoreThroughLValue(RValue::get(value), lvalue, true);
757 return;
758 }
759
760 if (const CXXDefaultInitExpr *DIE = dyn_cast<CXXDefaultInitExpr>(init))
761 init = DIE->getExpr();
762
763 // If we're emitting a value with lifetime, we have to do the
764 // initialization *before* we leave the cleanup scopes.
765 if (const ExprWithCleanups *EWC = dyn_cast<ExprWithCleanups>(init))
766 init = EWC->getSubExpr();
767 CodeGenFunction::RunCleanupsScope Scope(*this);
768
769 // We have to maintain the illusion that the variable is
770 // zero-initialized. If the variable might be accessed in its
771 // initializer, zero-initialize before running the initializer, then
772 // actually perform the initialization with an assign.
773 bool accessedByInit = false;
774 if (lifetime != Qualifiers::OCL_ExplicitNone)
775 accessedByInit = (capturedByInit || isAccessedBy(D, init));
776 if (accessedByInit) {
777 LValue tempLV = lvalue;
778 // Drill down to the __block object if necessary.
779 if (capturedByInit) {
780 // We can use a simple GEP for this because it can't have been
781 // moved yet.
782 tempLV.setAddress(emitBlockByrefAddress(tempLV.getAddress(*this),
783 cast<VarDecl>(D),
784 /*follow*/ false));
785 }
786
787 auto ty =
788 cast<llvm::PointerType>(tempLV.getAddress(*this).getElementType());
789 llvm::Value *zero = CGM.getNullPointer(ty, tempLV.getType());
790
791 // If __weak, we want to use a barrier under certain conditions.
792 if (lifetime == Qualifiers::OCL_Weak)
793 EmitARCInitWeak(tempLV.getAddress(*this), zero);
794
795 // Otherwise just do a simple store.
796 else
797 EmitStoreOfScalar(zero, tempLV, /* isInitialization */ true);
798 }
799
800 // Emit the initializer.
801 llvm::Value *value = nullptr;
802
803 switch (lifetime) {
804 case Qualifiers::OCL_None:
805 llvm_unreachable("present but none");
806
807 case Qualifiers::OCL_Strong: {
808 if (!D || !isa<VarDecl>(D) || !cast<VarDecl>(D)->isARCPseudoStrong()) {
809 value = EmitARCRetainScalarExpr(init);
810 break;
811 }
812 // If D is pseudo-strong, treat it like __unsafe_unretained here. This means
813 // that we omit the retain, and causes non-autoreleased return values to be
814 // immediately released.
815 LLVM_FALLTHROUGH;
816 }
817
818 case Qualifiers::OCL_ExplicitNone:
819 value = EmitARCUnsafeUnretainedScalarExpr(init);
820 break;
821
822 case Qualifiers::OCL_Weak: {
823 // If it's not accessed by the initializer, try to emit the
824 // initialization with a copy or move.
825 if (!accessedByInit && tryEmitARCCopyWeakInit(*this, lvalue, init)) {
826 return;
827 }
828
829 // No way to optimize a producing initializer into this. It's not
830 // worth optimizing for, because the value will immediately
831 // disappear in the common case.
832 value = EmitScalarExpr(init);
833
834 if (capturedByInit) drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D));
835 if (accessedByInit)
836 EmitARCStoreWeak(lvalue.getAddress(*this), value, /*ignored*/ true);
837 else
838 EmitARCInitWeak(lvalue.getAddress(*this), value);
839 return;
840 }
841
842 case Qualifiers::OCL_Autoreleasing:
843 value = EmitARCRetainAutoreleaseScalarExpr(init);
844 break;
845 }
846
847 if (capturedByInit) drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D));
848
849 EmitNullabilityCheck(lvalue, value, init->getExprLoc());
850
851 // If the variable might have been accessed by its initializer, we
852 // might have to initialize with a barrier. We have to do this for
853 // both __weak and __strong, but __weak got filtered out above.
854 if (accessedByInit && lifetime == Qualifiers::OCL_Strong) {
855 llvm::Value *oldValue = EmitLoadOfScalar(lvalue, init->getExprLoc());
856 EmitStoreOfScalar(value, lvalue, /* isInitialization */ true);
857 EmitARCRelease(oldValue, ARCImpreciseLifetime);
858 return;
859 }
860
861 EmitStoreOfScalar(value, lvalue, /* isInitialization */ true);
862 }
863
864 /// Decide whether we can emit the non-zero parts of the specified initializer
865 /// with equal or fewer than NumStores scalar stores.
canEmitInitWithFewStoresAfterBZero(llvm::Constant * Init,unsigned & NumStores)866 static bool canEmitInitWithFewStoresAfterBZero(llvm::Constant *Init,
867 unsigned &NumStores) {
868 // Zero and Undef never requires any extra stores.
869 if (isa<llvm::ConstantAggregateZero>(Init) ||
870 isa<llvm::ConstantPointerNull>(Init) ||
871 isa<llvm::UndefValue>(Init))
872 return true;
873 if (isa<llvm::ConstantInt>(Init) || isa<llvm::ConstantFP>(Init) ||
874 isa<llvm::ConstantVector>(Init) || isa<llvm::BlockAddress>(Init) ||
875 isa<llvm::ConstantExpr>(Init))
876 return Init->isNullValue() || NumStores--;
877
878 // See if we can emit each element.
879 if (isa<llvm::ConstantArray>(Init) || isa<llvm::ConstantStruct>(Init)) {
880 for (unsigned i = 0, e = Init->getNumOperands(); i != e; ++i) {
881 llvm::Constant *Elt = cast<llvm::Constant>(Init->getOperand(i));
882 if (!canEmitInitWithFewStoresAfterBZero(Elt, NumStores))
883 return false;
884 }
885 return true;
886 }
887
888 if (llvm::ConstantDataSequential *CDS =
889 dyn_cast<llvm::ConstantDataSequential>(Init)) {
890 for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
891 llvm::Constant *Elt = CDS->getElementAsConstant(i);
892 if (!canEmitInitWithFewStoresAfterBZero(Elt, NumStores))
893 return false;
894 }
895 return true;
896 }
897
898 // Anything else is hard and scary.
899 return false;
900 }
901
902 /// For inits that canEmitInitWithFewStoresAfterBZero returned true for, emit
903 /// the scalar stores that would be required.
emitStoresForInitAfterBZero(CodeGenModule & CGM,llvm::Constant * Init,Address Loc,bool isVolatile,CGBuilderTy & Builder)904 static void emitStoresForInitAfterBZero(CodeGenModule &CGM,
905 llvm::Constant *Init, Address Loc,
906 bool isVolatile, CGBuilderTy &Builder) {
907 assert(!Init->isNullValue() && !isa<llvm::UndefValue>(Init) &&
908 "called emitStoresForInitAfterBZero for zero or undef value.");
909
910 if (isa<llvm::ConstantInt>(Init) || isa<llvm::ConstantFP>(Init) ||
911 isa<llvm::ConstantVector>(Init) || isa<llvm::BlockAddress>(Init) ||
912 isa<llvm::ConstantExpr>(Init)) {
913 Builder.CreateStore(Init, Loc, isVolatile);
914 return;
915 }
916
917 if (llvm::ConstantDataSequential *CDS =
918 dyn_cast<llvm::ConstantDataSequential>(Init)) {
919 for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
920 llvm::Constant *Elt = CDS->getElementAsConstant(i);
921
922 // If necessary, get a pointer to the element and emit it.
923 if (!Elt->isNullValue() && !isa<llvm::UndefValue>(Elt))
924 emitStoresForInitAfterBZero(
925 CGM, Elt, Builder.CreateConstInBoundsGEP2_32(Loc, 0, i), isVolatile,
926 Builder);
927 }
928 return;
929 }
930
931 assert((isa<llvm::ConstantStruct>(Init) || isa<llvm::ConstantArray>(Init)) &&
932 "Unknown value type!");
933
934 for (unsigned i = 0, e = Init->getNumOperands(); i != e; ++i) {
935 llvm::Constant *Elt = cast<llvm::Constant>(Init->getOperand(i));
936
937 // If necessary, get a pointer to the element and emit it.
938 if (!Elt->isNullValue() && !isa<llvm::UndefValue>(Elt))
939 emitStoresForInitAfterBZero(CGM, Elt,
940 Builder.CreateConstInBoundsGEP2_32(Loc, 0, i),
941 isVolatile, Builder);
942 }
943 }
944
945 /// Decide whether we should use bzero plus some stores to initialize a local
946 /// variable instead of using a memcpy from a constant global. It is beneficial
947 /// to use bzero if the global is all zeros, or mostly zeros and large.
shouldUseBZeroPlusStoresToInitialize(llvm::Constant * Init,uint64_t GlobalSize)948 static bool shouldUseBZeroPlusStoresToInitialize(llvm::Constant *Init,
949 uint64_t GlobalSize) {
950 // If a global is all zeros, always use a bzero.
951 if (isa<llvm::ConstantAggregateZero>(Init)) return true;
952
953 // If a non-zero global is <= 32 bytes, always use a memcpy. If it is large,
954 // do it if it will require 6 or fewer scalar stores.
955 // TODO: Should budget depends on the size? Avoiding a large global warrants
956 // plopping in more stores.
957 unsigned StoreBudget = 6;
958 uint64_t SizeLimit = 32;
959
960 return GlobalSize > SizeLimit &&
961 canEmitInitWithFewStoresAfterBZero(Init, StoreBudget);
962 }
963
964 /// Decide whether we should use memset to initialize a local variable instead
965 /// of using a memcpy from a constant global. Assumes we've already decided to
966 /// not user bzero.
967 /// FIXME We could be more clever, as we are for bzero above, and generate
968 /// memset followed by stores. It's unclear that's worth the effort.
shouldUseMemSetToInitialize(llvm::Constant * Init,uint64_t GlobalSize,const llvm::DataLayout & DL)969 static llvm::Value *shouldUseMemSetToInitialize(llvm::Constant *Init,
970 uint64_t GlobalSize,
971 const llvm::DataLayout &DL) {
972 uint64_t SizeLimit = 32;
973 if (GlobalSize <= SizeLimit)
974 return nullptr;
975 return llvm::isBytewiseValue(Init, DL);
976 }
977
978 /// Decide whether we want to split a constant structure or array store into a
979 /// sequence of its fields' stores. This may cost us code size and compilation
980 /// speed, but plays better with store optimizations.
shouldSplitConstantStore(CodeGenModule & CGM,uint64_t GlobalByteSize)981 static bool shouldSplitConstantStore(CodeGenModule &CGM,
982 uint64_t GlobalByteSize) {
983 // Don't break things that occupy more than one cacheline.
984 uint64_t ByteSizeLimit = 64;
985 if (CGM.getCodeGenOpts().OptimizationLevel == 0)
986 return false;
987 if (GlobalByteSize <= ByteSizeLimit)
988 return true;
989 return false;
990 }
991
992 enum class IsPattern { No, Yes };
993
994 /// Generate a constant filled with either a pattern or zeroes.
patternOrZeroFor(CodeGenModule & CGM,IsPattern isPattern,llvm::Type * Ty)995 static llvm::Constant *patternOrZeroFor(CodeGenModule &CGM, IsPattern isPattern,
996 llvm::Type *Ty) {
997 if (isPattern == IsPattern::Yes)
998 return initializationPatternFor(CGM, Ty);
999 else
1000 return llvm::Constant::getNullValue(Ty);
1001 }
1002
1003 static llvm::Constant *constWithPadding(CodeGenModule &CGM, IsPattern isPattern,
1004 llvm::Constant *constant);
1005
1006 /// Helper function for constWithPadding() to deal with padding in structures.
constStructWithPadding(CodeGenModule & CGM,IsPattern isPattern,llvm::StructType * STy,llvm::Constant * constant)1007 static llvm::Constant *constStructWithPadding(CodeGenModule &CGM,
1008 IsPattern isPattern,
1009 llvm::StructType *STy,
1010 llvm::Constant *constant) {
1011 const llvm::DataLayout &DL = CGM.getDataLayout();
1012 const llvm::StructLayout *Layout = DL.getStructLayout(STy);
1013 llvm::Type *Int8Ty = llvm::IntegerType::getInt8Ty(CGM.getLLVMContext());
1014 unsigned SizeSoFar = 0;
1015 SmallVector<llvm::Constant *, 8> Values;
1016 bool NestedIntact = true;
1017 for (unsigned i = 0, e = STy->getNumElements(); i != e; i++) {
1018 unsigned CurOff = Layout->getElementOffset(i);
1019 if (SizeSoFar < CurOff) {
1020 assert(!STy->isPacked());
1021 auto *PadTy = llvm::ArrayType::get(Int8Ty, CurOff - SizeSoFar);
1022 Values.push_back(patternOrZeroFor(CGM, isPattern, PadTy));
1023 }
1024 llvm::Constant *CurOp;
1025 if (constant->isZeroValue())
1026 CurOp = llvm::Constant::getNullValue(STy->getElementType(i));
1027 else
1028 CurOp = cast<llvm::Constant>(constant->getAggregateElement(i));
1029 auto *NewOp = constWithPadding(CGM, isPattern, CurOp);
1030 if (CurOp != NewOp)
1031 NestedIntact = false;
1032 Values.push_back(NewOp);
1033 SizeSoFar = CurOff + DL.getTypeAllocSize(CurOp->getType());
1034 }
1035 unsigned TotalSize = Layout->getSizeInBytes();
1036 if (SizeSoFar < TotalSize) {
1037 auto *PadTy = llvm::ArrayType::get(Int8Ty, TotalSize - SizeSoFar);
1038 Values.push_back(patternOrZeroFor(CGM, isPattern, PadTy));
1039 }
1040 if (NestedIntact && Values.size() == STy->getNumElements())
1041 return constant;
1042 return llvm::ConstantStruct::getAnon(Values, STy->isPacked());
1043 }
1044
1045 /// Replace all padding bytes in a given constant with either a pattern byte or
1046 /// 0x00.
constWithPadding(CodeGenModule & CGM,IsPattern isPattern,llvm::Constant * constant)1047 static llvm::Constant *constWithPadding(CodeGenModule &CGM, IsPattern isPattern,
1048 llvm::Constant *constant) {
1049 llvm::Type *OrigTy = constant->getType();
1050 if (const auto STy = dyn_cast<llvm::StructType>(OrigTy))
1051 return constStructWithPadding(CGM, isPattern, STy, constant);
1052 if (auto *ArrayTy = dyn_cast<llvm::ArrayType>(OrigTy)) {
1053 llvm::SmallVector<llvm::Constant *, 8> Values;
1054 uint64_t Size = ArrayTy->getNumElements();
1055 if (!Size)
1056 return constant;
1057 llvm::Type *ElemTy = ArrayTy->getElementType();
1058 bool ZeroInitializer = constant->isNullValue();
1059 llvm::Constant *OpValue, *PaddedOp;
1060 if (ZeroInitializer) {
1061 OpValue = llvm::Constant::getNullValue(ElemTy);
1062 PaddedOp = constWithPadding(CGM, isPattern, OpValue);
1063 }
1064 for (unsigned Op = 0; Op != Size; ++Op) {
1065 if (!ZeroInitializer) {
1066 OpValue = constant->getAggregateElement(Op);
1067 PaddedOp = constWithPadding(CGM, isPattern, OpValue);
1068 }
1069 Values.push_back(PaddedOp);
1070 }
1071 auto *NewElemTy = Values[0]->getType();
1072 if (NewElemTy == ElemTy)
1073 return constant;
1074 auto *NewArrayTy = llvm::ArrayType::get(NewElemTy, Size);
1075 return llvm::ConstantArray::get(NewArrayTy, Values);
1076 }
1077 // FIXME: Add handling for tail padding in vectors. Vectors don't
1078 // have padding between or inside elements, but the total amount of
1079 // data can be less than the allocated size.
1080 return constant;
1081 }
1082
createUnnamedGlobalFrom(const VarDecl & D,llvm::Constant * Constant,CharUnits Align)1083 Address CodeGenModule::createUnnamedGlobalFrom(const VarDecl &D,
1084 llvm::Constant *Constant,
1085 CharUnits Align) {
1086 auto FunctionName = [&](const DeclContext *DC) -> std::string {
1087 if (const auto *FD = dyn_cast<FunctionDecl>(DC)) {
1088 if (const auto *CC = dyn_cast<CXXConstructorDecl>(FD))
1089 return CC->getNameAsString();
1090 if (const auto *CD = dyn_cast<CXXDestructorDecl>(FD))
1091 return CD->getNameAsString();
1092 return std::string(getMangledName(FD));
1093 } else if (const auto *OM = dyn_cast<ObjCMethodDecl>(DC)) {
1094 return OM->getNameAsString();
1095 } else if (isa<BlockDecl>(DC)) {
1096 return "<block>";
1097 } else if (isa<CapturedDecl>(DC)) {
1098 return "<captured>";
1099 } else {
1100 llvm_unreachable("expected a function or method");
1101 }
1102 };
1103
1104 // Form a simple per-variable cache of these values in case we find we
1105 // want to reuse them.
1106 llvm::GlobalVariable *&CacheEntry = InitializerConstants[&D];
1107 if (!CacheEntry || CacheEntry->getInitializer() != Constant) {
1108 auto *Ty = Constant->getType();
1109 bool isConstant = true;
1110 llvm::GlobalVariable *InsertBefore = nullptr;
1111 unsigned AS =
1112 getContext().getTargetAddressSpace(getStringLiteralAddressSpace());
1113 std::string Name;
1114 if (D.hasGlobalStorage())
1115 Name = getMangledName(&D).str() + ".const";
1116 else if (const DeclContext *DC = D.getParentFunctionOrMethod())
1117 Name = ("__const." + FunctionName(DC) + "." + D.getName()).str();
1118 else
1119 llvm_unreachable("local variable has no parent function or method");
1120 llvm::GlobalVariable *GV = new llvm::GlobalVariable(
1121 getModule(), Ty, isConstant, llvm::GlobalValue::PrivateLinkage,
1122 Constant, Name, InsertBefore, llvm::GlobalValue::NotThreadLocal, AS);
1123 GV->setAlignment(Align.getAsAlign());
1124 GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
1125 CacheEntry = GV;
1126 } else if (CacheEntry->getAlignment() < Align.getQuantity()) {
1127 CacheEntry->setAlignment(Align.getAsAlign());
1128 }
1129
1130 return Address(CacheEntry, Align);
1131 }
1132
createUnnamedGlobalForMemcpyFrom(CodeGenModule & CGM,const VarDecl & D,CGBuilderTy & Builder,llvm::Constant * Constant,CharUnits Align)1133 static Address createUnnamedGlobalForMemcpyFrom(CodeGenModule &CGM,
1134 const VarDecl &D,
1135 CGBuilderTy &Builder,
1136 llvm::Constant *Constant,
1137 CharUnits Align) {
1138 Address SrcPtr = CGM.createUnnamedGlobalFrom(D, Constant, Align);
1139 llvm::Type *BP = llvm::PointerType::getInt8PtrTy(CGM.getLLVMContext(),
1140 SrcPtr.getAddressSpace());
1141 if (SrcPtr.getType() != BP)
1142 SrcPtr = Builder.CreateBitCast(SrcPtr, BP);
1143 return SrcPtr;
1144 }
1145
emitStoresForConstant(CodeGenModule & CGM,const VarDecl & D,Address Loc,bool isVolatile,CGBuilderTy & Builder,llvm::Constant * constant)1146 static void emitStoresForConstant(CodeGenModule &CGM, const VarDecl &D,
1147 Address Loc, bool isVolatile,
1148 CGBuilderTy &Builder,
1149 llvm::Constant *constant) {
1150 auto *Ty = constant->getType();
1151 uint64_t ConstantSize = CGM.getDataLayout().getTypeAllocSize(Ty);
1152 if (!ConstantSize)
1153 return;
1154
1155 bool canDoSingleStore = Ty->isIntOrIntVectorTy() ||
1156 Ty->isPtrOrPtrVectorTy() || Ty->isFPOrFPVectorTy();
1157 if (canDoSingleStore) {
1158 Builder.CreateStore(constant, Loc, isVolatile);
1159 return;
1160 }
1161
1162 auto *SizeVal = llvm::ConstantInt::get(CGM.IntPtrTy, ConstantSize);
1163
1164 // If the initializer is all or mostly the same, codegen with bzero / memset
1165 // then do a few stores afterward.
1166 if (shouldUseBZeroPlusStoresToInitialize(constant, ConstantSize)) {
1167 Builder.CreateMemSet(Loc, llvm::ConstantInt::get(CGM.Int8Ty, 0), SizeVal,
1168 isVolatile);
1169
1170 bool valueAlreadyCorrect =
1171 constant->isNullValue() || isa<llvm::UndefValue>(constant);
1172 if (!valueAlreadyCorrect) {
1173 Loc = Builder.CreateBitCast(Loc, Ty->getPointerTo(Loc.getAddressSpace()));
1174 emitStoresForInitAfterBZero(CGM, constant, Loc, isVolatile, Builder);
1175 }
1176 return;
1177 }
1178
1179 // If the initializer is a repeated byte pattern, use memset.
1180 llvm::Value *Pattern =
1181 shouldUseMemSetToInitialize(constant, ConstantSize, CGM.getDataLayout());
1182 if (Pattern) {
1183 uint64_t Value = 0x00;
1184 if (!isa<llvm::UndefValue>(Pattern)) {
1185 const llvm::APInt &AP = cast<llvm::ConstantInt>(Pattern)->getValue();
1186 assert(AP.getBitWidth() <= 8);
1187 Value = AP.getLimitedValue();
1188 }
1189 Builder.CreateMemSet(Loc, llvm::ConstantInt::get(CGM.Int8Ty, Value), SizeVal,
1190 isVolatile);
1191 return;
1192 }
1193
1194 // If the initializer is small, use a handful of stores.
1195 if (shouldSplitConstantStore(CGM, ConstantSize)) {
1196 if (auto *STy = dyn_cast<llvm::StructType>(Ty)) {
1197 // FIXME: handle the case when STy != Loc.getElementType().
1198 if (STy == Loc.getElementType()) {
1199 for (unsigned i = 0; i != constant->getNumOperands(); i++) {
1200 Address EltPtr = Builder.CreateStructGEP(Loc, i);
1201 emitStoresForConstant(
1202 CGM, D, EltPtr, isVolatile, Builder,
1203 cast<llvm::Constant>(Builder.CreateExtractValue(constant, i)));
1204 }
1205 return;
1206 }
1207 } else if (auto *ATy = dyn_cast<llvm::ArrayType>(Ty)) {
1208 // FIXME: handle the case when ATy != Loc.getElementType().
1209 if (ATy == Loc.getElementType()) {
1210 for (unsigned i = 0; i != ATy->getNumElements(); i++) {
1211 Address EltPtr = Builder.CreateConstArrayGEP(Loc, i);
1212 emitStoresForConstant(
1213 CGM, D, EltPtr, isVolatile, Builder,
1214 cast<llvm::Constant>(Builder.CreateExtractValue(constant, i)));
1215 }
1216 return;
1217 }
1218 }
1219 }
1220
1221 // Copy from a global.
1222 Builder.CreateMemCpy(Loc,
1223 createUnnamedGlobalForMemcpyFrom(
1224 CGM, D, Builder, constant, Loc.getAlignment()),
1225 SizeVal, isVolatile);
1226 }
1227
emitStoresForZeroInit(CodeGenModule & CGM,const VarDecl & D,Address Loc,bool isVolatile,CGBuilderTy & Builder)1228 static void emitStoresForZeroInit(CodeGenModule &CGM, const VarDecl &D,
1229 Address Loc, bool isVolatile,
1230 CGBuilderTy &Builder) {
1231 llvm::Type *ElTy = Loc.getElementType();
1232 llvm::Constant *constant =
1233 constWithPadding(CGM, IsPattern::No, llvm::Constant::getNullValue(ElTy));
1234 emitStoresForConstant(CGM, D, Loc, isVolatile, Builder, constant);
1235 }
1236
emitStoresForPatternInit(CodeGenModule & CGM,const VarDecl & D,Address Loc,bool isVolatile,CGBuilderTy & Builder)1237 static void emitStoresForPatternInit(CodeGenModule &CGM, const VarDecl &D,
1238 Address Loc, bool isVolatile,
1239 CGBuilderTy &Builder) {
1240 llvm::Type *ElTy = Loc.getElementType();
1241 llvm::Constant *constant = constWithPadding(
1242 CGM, IsPattern::Yes, initializationPatternFor(CGM, ElTy));
1243 assert(!isa<llvm::UndefValue>(constant));
1244 emitStoresForConstant(CGM, D, Loc, isVolatile, Builder, constant);
1245 }
1246
containsUndef(llvm::Constant * constant)1247 static bool containsUndef(llvm::Constant *constant) {
1248 auto *Ty = constant->getType();
1249 if (isa<llvm::UndefValue>(constant))
1250 return true;
1251 if (Ty->isStructTy() || Ty->isArrayTy() || Ty->isVectorTy())
1252 for (llvm::Use &Op : constant->operands())
1253 if (containsUndef(cast<llvm::Constant>(Op)))
1254 return true;
1255 return false;
1256 }
1257
replaceUndef(CodeGenModule & CGM,IsPattern isPattern,llvm::Constant * constant)1258 static llvm::Constant *replaceUndef(CodeGenModule &CGM, IsPattern isPattern,
1259 llvm::Constant *constant) {
1260 auto *Ty = constant->getType();
1261 if (isa<llvm::UndefValue>(constant))
1262 return patternOrZeroFor(CGM, isPattern, Ty);
1263 if (!(Ty->isStructTy() || Ty->isArrayTy() || Ty->isVectorTy()))
1264 return constant;
1265 if (!containsUndef(constant))
1266 return constant;
1267 llvm::SmallVector<llvm::Constant *, 8> Values(constant->getNumOperands());
1268 for (unsigned Op = 0, NumOp = constant->getNumOperands(); Op != NumOp; ++Op) {
1269 auto *OpValue = cast<llvm::Constant>(constant->getOperand(Op));
1270 Values[Op] = replaceUndef(CGM, isPattern, OpValue);
1271 }
1272 if (Ty->isStructTy())
1273 return llvm::ConstantStruct::get(cast<llvm::StructType>(Ty), Values);
1274 if (Ty->isArrayTy())
1275 return llvm::ConstantArray::get(cast<llvm::ArrayType>(Ty), Values);
1276 assert(Ty->isVectorTy());
1277 return llvm::ConstantVector::get(Values);
1278 }
1279
1280 /// EmitAutoVarDecl - Emit code and set up an entry in LocalDeclMap for a
1281 /// variable declaration with auto, register, or no storage class specifier.
1282 /// These turn into simple stack objects, or GlobalValues depending on target.
EmitAutoVarDecl(const VarDecl & D)1283 void CodeGenFunction::EmitAutoVarDecl(const VarDecl &D) {
1284 AutoVarEmission emission = EmitAutoVarAlloca(D);
1285 EmitAutoVarInit(emission);
1286 EmitAutoVarCleanups(emission);
1287 }
1288
1289 /// Emit a lifetime.begin marker if some criteria are satisfied.
1290 /// \return a pointer to the temporary size Value if a marker was emitted, null
1291 /// otherwise
EmitLifetimeStart(uint64_t Size,llvm::Value * Addr)1292 llvm::Value *CodeGenFunction::EmitLifetimeStart(uint64_t Size,
1293 llvm::Value *Addr) {
1294 if (!ShouldEmitLifetimeMarkers)
1295 return nullptr;
1296
1297 assert(Addr->getType()->getPointerAddressSpace() ==
1298 CGM.getDataLayout().getAllocaAddrSpace() &&
1299 "Pointer should be in alloca address space");
1300 llvm::Value *SizeV = llvm::ConstantInt::get(Int64Ty, Size);
1301 Addr = Builder.CreateBitCast(Addr, AllocaInt8PtrTy);
1302 llvm::CallInst *C =
1303 Builder.CreateCall(CGM.getLLVMLifetimeStartFn(), {SizeV, Addr});
1304 C->setDoesNotThrow();
1305 return SizeV;
1306 }
1307
EmitLifetimeEnd(llvm::Value * Size,llvm::Value * Addr)1308 void CodeGenFunction::EmitLifetimeEnd(llvm::Value *Size, llvm::Value *Addr) {
1309 assert(Addr->getType()->getPointerAddressSpace() ==
1310 CGM.getDataLayout().getAllocaAddrSpace() &&
1311 "Pointer should be in alloca address space");
1312 Addr = Builder.CreateBitCast(Addr, AllocaInt8PtrTy);
1313 llvm::CallInst *C =
1314 Builder.CreateCall(CGM.getLLVMLifetimeEndFn(), {Size, Addr});
1315 C->setDoesNotThrow();
1316 }
1317
EmitAndRegisterVariableArrayDimensions(CGDebugInfo * DI,const VarDecl & D,bool EmitDebugInfo)1318 void CodeGenFunction::EmitAndRegisterVariableArrayDimensions(
1319 CGDebugInfo *DI, const VarDecl &D, bool EmitDebugInfo) {
1320 // For each dimension stores its QualType and corresponding
1321 // size-expression Value.
1322 SmallVector<CodeGenFunction::VlaSizePair, 4> Dimensions;
1323 SmallVector<IdentifierInfo *, 4> VLAExprNames;
1324
1325 // Break down the array into individual dimensions.
1326 QualType Type1D = D.getType();
1327 while (getContext().getAsVariableArrayType(Type1D)) {
1328 auto VlaSize = getVLAElements1D(Type1D);
1329 if (auto *C = dyn_cast<llvm::ConstantInt>(VlaSize.NumElts))
1330 Dimensions.emplace_back(C, Type1D.getUnqualifiedType());
1331 else {
1332 // Generate a locally unique name for the size expression.
1333 Twine Name = Twine("__vla_expr") + Twine(VLAExprCounter++);
1334 SmallString<12> Buffer;
1335 StringRef NameRef = Name.toStringRef(Buffer);
1336 auto &Ident = getContext().Idents.getOwn(NameRef);
1337 VLAExprNames.push_back(&Ident);
1338 auto SizeExprAddr =
1339 CreateDefaultAlignTempAlloca(VlaSize.NumElts->getType(), NameRef);
1340 Builder.CreateStore(VlaSize.NumElts, SizeExprAddr);
1341 Dimensions.emplace_back(SizeExprAddr.getPointer(),
1342 Type1D.getUnqualifiedType());
1343 }
1344 Type1D = VlaSize.Type;
1345 }
1346
1347 if (!EmitDebugInfo)
1348 return;
1349
1350 // Register each dimension's size-expression with a DILocalVariable,
1351 // so that it can be used by CGDebugInfo when instantiating a DISubrange
1352 // to describe this array.
1353 unsigned NameIdx = 0;
1354 for (auto &VlaSize : Dimensions) {
1355 llvm::Metadata *MD;
1356 if (auto *C = dyn_cast<llvm::ConstantInt>(VlaSize.NumElts))
1357 MD = llvm::ConstantAsMetadata::get(C);
1358 else {
1359 // Create an artificial VarDecl to generate debug info for.
1360 IdentifierInfo *NameIdent = VLAExprNames[NameIdx++];
1361 auto VlaExprTy = VlaSize.NumElts->getType()->getPointerElementType();
1362 auto QT = getContext().getIntTypeForBitwidth(
1363 VlaExprTy->getScalarSizeInBits(), false);
1364 auto *ArtificialDecl = VarDecl::Create(
1365 getContext(), const_cast<DeclContext *>(D.getDeclContext()),
1366 D.getLocation(), D.getLocation(), NameIdent, QT,
1367 getContext().CreateTypeSourceInfo(QT), SC_Auto);
1368 ArtificialDecl->setImplicit();
1369
1370 MD = DI->EmitDeclareOfAutoVariable(ArtificialDecl, VlaSize.NumElts,
1371 Builder);
1372 }
1373 assert(MD && "No Size expression debug node created");
1374 DI->registerVLASizeExpression(VlaSize.Type, MD);
1375 }
1376 }
1377
1378 /// EmitAutoVarAlloca - Emit the alloca and debug information for a
1379 /// local variable. Does not emit initialization or destruction.
1380 CodeGenFunction::AutoVarEmission
EmitAutoVarAlloca(const VarDecl & D)1381 CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) {
1382 QualType Ty = D.getType();
1383 assert(
1384 Ty.getAddressSpace() == LangAS::Default ||
1385 (Ty.getAddressSpace() == LangAS::opencl_private && getLangOpts().OpenCL));
1386
1387 AutoVarEmission emission(D);
1388
1389 bool isEscapingByRef = D.isEscapingByref();
1390 emission.IsEscapingByRef = isEscapingByRef;
1391
1392 CharUnits alignment = getContext().getDeclAlign(&D);
1393
1394 // If the type is variably-modified, emit all the VLA sizes for it.
1395 if (Ty->isVariablyModifiedType())
1396 EmitVariablyModifiedType(Ty);
1397
1398 auto *DI = getDebugInfo();
1399 bool EmitDebugInfo = DI && CGM.getCodeGenOpts().hasReducedDebugInfo();
1400
1401 Address address = Address::invalid();
1402 Address AllocaAddr = Address::invalid();
1403 Address OpenMPLocalAddr = Address::invalid();
1404 if (CGM.getLangOpts().OpenMPIRBuilder)
1405 OpenMPLocalAddr = OMPBuilderCBHelpers::getAddressOfLocalVariable(*this, &D);
1406 else
1407 OpenMPLocalAddr =
1408 getLangOpts().OpenMP
1409 ? CGM.getOpenMPRuntime().getAddressOfLocalVariable(*this, &D)
1410 : Address::invalid();
1411
1412 bool NRVO = getLangOpts().ElideConstructors && D.isNRVOVariable();
1413
1414 if (getLangOpts().OpenMP && OpenMPLocalAddr.isValid()) {
1415 address = OpenMPLocalAddr;
1416 } else if (Ty->isConstantSizeType()) {
1417 // If this value is an array or struct with a statically determinable
1418 // constant initializer, there are optimizations we can do.
1419 //
1420 // TODO: We should constant-evaluate the initializer of any variable,
1421 // as long as it is initialized by a constant expression. Currently,
1422 // isConstantInitializer produces wrong answers for structs with
1423 // reference or bitfield members, and a few other cases, and checking
1424 // for POD-ness protects us from some of these.
1425 if (D.getInit() && (Ty->isArrayType() || Ty->isRecordType()) &&
1426 (D.isConstexpr() ||
1427 ((Ty.isPODType(getContext()) ||
1428 getContext().getBaseElementType(Ty)->isObjCObjectPointerType()) &&
1429 D.getInit()->isConstantInitializer(getContext(), false)))) {
1430
1431 // If the variable's a const type, and it's neither an NRVO
1432 // candidate nor a __block variable and has no mutable members,
1433 // emit it as a global instead.
1434 // Exception is if a variable is located in non-constant address space
1435 // in OpenCL.
1436 if ((!getLangOpts().OpenCL ||
1437 Ty.getAddressSpace() == LangAS::opencl_constant) &&
1438 (CGM.getCodeGenOpts().MergeAllConstants && !NRVO &&
1439 !isEscapingByRef && CGM.isTypeConstant(Ty, true))) {
1440 EmitStaticVarDecl(D, llvm::GlobalValue::InternalLinkage);
1441
1442 // Signal this condition to later callbacks.
1443 emission.Addr = Address::invalid();
1444 assert(emission.wasEmittedAsGlobal());
1445 return emission;
1446 }
1447
1448 // Otherwise, tell the initialization code that we're in this case.
1449 emission.IsConstantAggregate = true;
1450 }
1451
1452 // A normal fixed sized variable becomes an alloca in the entry block,
1453 // unless:
1454 // - it's an NRVO variable.
1455 // - we are compiling OpenMP and it's an OpenMP local variable.
1456 if (NRVO) {
1457 // The named return value optimization: allocate this variable in the
1458 // return slot, so that we can elide the copy when returning this
1459 // variable (C++0x [class.copy]p34).
1460 address = ReturnValue;
1461
1462 if (const RecordType *RecordTy = Ty->getAs<RecordType>()) {
1463 const auto *RD = RecordTy->getDecl();
1464 const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD);
1465 if ((CXXRD && !CXXRD->hasTrivialDestructor()) ||
1466 RD->isNonTrivialToPrimitiveDestroy()) {
1467 // Create a flag that is used to indicate when the NRVO was applied
1468 // to this variable. Set it to zero to indicate that NRVO was not
1469 // applied.
1470 llvm::Value *Zero = Builder.getFalse();
1471 Address NRVOFlag =
1472 CreateTempAlloca(Zero->getType(), CharUnits::One(), "nrvo");
1473 EnsureInsertPoint();
1474 Builder.CreateStore(Zero, NRVOFlag);
1475
1476 // Record the NRVO flag for this variable.
1477 NRVOFlags[&D] = NRVOFlag.getPointer();
1478 emission.NRVOFlag = NRVOFlag.getPointer();
1479 }
1480 }
1481 } else {
1482 CharUnits allocaAlignment;
1483 llvm::Type *allocaTy;
1484 if (isEscapingByRef) {
1485 auto &byrefInfo = getBlockByrefInfo(&D);
1486 allocaTy = byrefInfo.Type;
1487 allocaAlignment = byrefInfo.ByrefAlignment;
1488 } else {
1489 allocaTy = ConvertTypeForMem(Ty);
1490 allocaAlignment = alignment;
1491 }
1492
1493 // Create the alloca. Note that we set the name separately from
1494 // building the instruction so that it's there even in no-asserts
1495 // builds.
1496 address = CreateTempAlloca(allocaTy, allocaAlignment, D.getName(),
1497 /*ArraySize=*/nullptr, &AllocaAddr);
1498
1499 // Don't emit lifetime markers for MSVC catch parameters. The lifetime of
1500 // the catch parameter starts in the catchpad instruction, and we can't
1501 // insert code in those basic blocks.
1502 bool IsMSCatchParam =
1503 D.isExceptionVariable() && getTarget().getCXXABI().isMicrosoft();
1504
1505 // Emit a lifetime intrinsic if meaningful. There's no point in doing this
1506 // if we don't have a valid insertion point (?).
1507 if (HaveInsertPoint() && !IsMSCatchParam) {
1508 // If there's a jump into the lifetime of this variable, its lifetime
1509 // gets broken up into several regions in IR, which requires more work
1510 // to handle correctly. For now, just omit the intrinsics; this is a
1511 // rare case, and it's better to just be conservatively correct.
1512 // PR28267.
1513 //
1514 // We have to do this in all language modes if there's a jump past the
1515 // declaration. We also have to do it in C if there's a jump to an
1516 // earlier point in the current block because non-VLA lifetimes begin as
1517 // soon as the containing block is entered, not when its variables
1518 // actually come into scope; suppressing the lifetime annotations
1519 // completely in this case is unnecessarily pessimistic, but again, this
1520 // is rare.
1521 if (!Bypasses.IsBypassed(&D) &&
1522 !(!getLangOpts().CPlusPlus && hasLabelBeenSeenInCurrentScope())) {
1523 llvm::TypeSize size =
1524 CGM.getDataLayout().getTypeAllocSize(allocaTy);
1525 emission.SizeForLifetimeMarkers =
1526 size.isScalable() ? EmitLifetimeStart(-1, AllocaAddr.getPointer())
1527 : EmitLifetimeStart(size.getFixedSize(),
1528 AllocaAddr.getPointer());
1529 }
1530 } else {
1531 assert(!emission.useLifetimeMarkers());
1532 }
1533 }
1534 } else {
1535 EnsureInsertPoint();
1536
1537 if (!DidCallStackSave) {
1538 // Save the stack.
1539 Address Stack =
1540 CreateTempAlloca(Int8PtrTy, getPointerAlign(), "saved_stack");
1541
1542 llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::stacksave);
1543 llvm::Value *V = Builder.CreateCall(F);
1544 Builder.CreateStore(V, Stack);
1545
1546 DidCallStackSave = true;
1547
1548 // Push a cleanup block and restore the stack there.
1549 // FIXME: in general circumstances, this should be an EH cleanup.
1550 pushStackRestore(NormalCleanup, Stack);
1551 }
1552
1553 auto VlaSize = getVLASize(Ty);
1554 llvm::Type *llvmTy = ConvertTypeForMem(VlaSize.Type);
1555
1556 // Allocate memory for the array.
1557 address = CreateTempAlloca(llvmTy, alignment, "vla", VlaSize.NumElts,
1558 &AllocaAddr);
1559
1560 // If we have debug info enabled, properly describe the VLA dimensions for
1561 // this type by registering the vla size expression for each of the
1562 // dimensions.
1563 EmitAndRegisterVariableArrayDimensions(DI, D, EmitDebugInfo);
1564 }
1565
1566 setAddrOfLocalVar(&D, address);
1567 emission.Addr = address;
1568 emission.AllocaAddr = AllocaAddr;
1569
1570 // Emit debug info for local var declaration.
1571 if (EmitDebugInfo && HaveInsertPoint()) {
1572 Address DebugAddr = address;
1573 bool UsePointerValue = NRVO && ReturnValuePointer.isValid();
1574 DI->setLocation(D.getLocation());
1575
1576 // If NRVO, use a pointer to the return address.
1577 if (UsePointerValue)
1578 DebugAddr = ReturnValuePointer;
1579
1580 (void)DI->EmitDeclareOfAutoVariable(&D, DebugAddr.getPointer(), Builder,
1581 UsePointerValue);
1582 }
1583
1584 if (D.hasAttr<AnnotateAttr>() && HaveInsertPoint())
1585 EmitVarAnnotations(&D, address.getPointer());
1586
1587 // Make sure we call @llvm.lifetime.end.
1588 if (emission.useLifetimeMarkers())
1589 EHStack.pushCleanup<CallLifetimeEnd>(NormalEHLifetimeMarker,
1590 emission.getOriginalAllocatedAddress(),
1591 emission.getSizeForLifetimeMarkers());
1592
1593 return emission;
1594 }
1595
1596 static bool isCapturedBy(const VarDecl &, const Expr *);
1597
1598 /// Determines whether the given __block variable is potentially
1599 /// captured by the given statement.
isCapturedBy(const VarDecl & Var,const Stmt * S)1600 static bool isCapturedBy(const VarDecl &Var, const Stmt *S) {
1601 if (const Expr *E = dyn_cast<Expr>(S))
1602 return isCapturedBy(Var, E);
1603 for (const Stmt *SubStmt : S->children())
1604 if (isCapturedBy(Var, SubStmt))
1605 return true;
1606 return false;
1607 }
1608
1609 /// Determines whether the given __block variable is potentially
1610 /// captured by the given expression.
isCapturedBy(const VarDecl & Var,const Expr * E)1611 static bool isCapturedBy(const VarDecl &Var, const Expr *E) {
1612 // Skip the most common kinds of expressions that make
1613 // hierarchy-walking expensive.
1614 E = E->IgnoreParenCasts();
1615
1616 if (const BlockExpr *BE = dyn_cast<BlockExpr>(E)) {
1617 const BlockDecl *Block = BE->getBlockDecl();
1618 for (const auto &I : Block->captures()) {
1619 if (I.getVariable() == &Var)
1620 return true;
1621 }
1622
1623 // No need to walk into the subexpressions.
1624 return false;
1625 }
1626
1627 if (const StmtExpr *SE = dyn_cast<StmtExpr>(E)) {
1628 const CompoundStmt *CS = SE->getSubStmt();
1629 for (const auto *BI : CS->body())
1630 if (const auto *BIE = dyn_cast<Expr>(BI)) {
1631 if (isCapturedBy(Var, BIE))
1632 return true;
1633 }
1634 else if (const auto *DS = dyn_cast<DeclStmt>(BI)) {
1635 // special case declarations
1636 for (const auto *I : DS->decls()) {
1637 if (const auto *VD = dyn_cast<VarDecl>((I))) {
1638 const Expr *Init = VD->getInit();
1639 if (Init && isCapturedBy(Var, Init))
1640 return true;
1641 }
1642 }
1643 }
1644 else
1645 // FIXME. Make safe assumption assuming arbitrary statements cause capturing.
1646 // Later, provide code to poke into statements for capture analysis.
1647 return true;
1648 return false;
1649 }
1650
1651 for (const Stmt *SubStmt : E->children())
1652 if (isCapturedBy(Var, SubStmt))
1653 return true;
1654
1655 return false;
1656 }
1657
1658 /// Determine whether the given initializer is trivial in the sense
1659 /// that it requires no code to be generated.
isTrivialInitializer(const Expr * Init)1660 bool CodeGenFunction::isTrivialInitializer(const Expr *Init) {
1661 if (!Init)
1662 return true;
1663
1664 if (const CXXConstructExpr *Construct = dyn_cast<CXXConstructExpr>(Init))
1665 if (CXXConstructorDecl *Constructor = Construct->getConstructor())
1666 if (Constructor->isTrivial() &&
1667 Constructor->isDefaultConstructor() &&
1668 !Construct->requiresZeroInitialization())
1669 return true;
1670
1671 return false;
1672 }
1673
emitZeroOrPatternForAutoVarInit(QualType type,const VarDecl & D,Address Loc)1674 void CodeGenFunction::emitZeroOrPatternForAutoVarInit(QualType type,
1675 const VarDecl &D,
1676 Address Loc) {
1677 auto trivialAutoVarInit = getContext().getLangOpts().getTrivialAutoVarInit();
1678 CharUnits Size = getContext().getTypeSizeInChars(type);
1679 bool isVolatile = type.isVolatileQualified();
1680 if (!Size.isZero()) {
1681 switch (trivialAutoVarInit) {
1682 case LangOptions::TrivialAutoVarInitKind::Uninitialized:
1683 llvm_unreachable("Uninitialized handled by caller");
1684 case LangOptions::TrivialAutoVarInitKind::Zero:
1685 if (CGM.stopAutoInit())
1686 return;
1687 emitStoresForZeroInit(CGM, D, Loc, isVolatile, Builder);
1688 break;
1689 case LangOptions::TrivialAutoVarInitKind::Pattern:
1690 if (CGM.stopAutoInit())
1691 return;
1692 emitStoresForPatternInit(CGM, D, Loc, isVolatile, Builder);
1693 break;
1694 }
1695 return;
1696 }
1697
1698 // VLAs look zero-sized to getTypeInfo. We can't emit constant stores to
1699 // them, so emit a memcpy with the VLA size to initialize each element.
1700 // Technically zero-sized or negative-sized VLAs are undefined, and UBSan
1701 // will catch that code, but there exists code which generates zero-sized
1702 // VLAs. Be nice and initialize whatever they requested.
1703 const auto *VlaType = getContext().getAsVariableArrayType(type);
1704 if (!VlaType)
1705 return;
1706 auto VlaSize = getVLASize(VlaType);
1707 auto SizeVal = VlaSize.NumElts;
1708 CharUnits EltSize = getContext().getTypeSizeInChars(VlaSize.Type);
1709 switch (trivialAutoVarInit) {
1710 case LangOptions::TrivialAutoVarInitKind::Uninitialized:
1711 llvm_unreachable("Uninitialized handled by caller");
1712
1713 case LangOptions::TrivialAutoVarInitKind::Zero:
1714 if (CGM.stopAutoInit())
1715 return;
1716 if (!EltSize.isOne())
1717 SizeVal = Builder.CreateNUWMul(SizeVal, CGM.getSize(EltSize));
1718 Builder.CreateMemSet(Loc, llvm::ConstantInt::get(Int8Ty, 0), SizeVal,
1719 isVolatile);
1720 break;
1721
1722 case LangOptions::TrivialAutoVarInitKind::Pattern: {
1723 if (CGM.stopAutoInit())
1724 return;
1725 llvm::Type *ElTy = Loc.getElementType();
1726 llvm::Constant *Constant = constWithPadding(
1727 CGM, IsPattern::Yes, initializationPatternFor(CGM, ElTy));
1728 CharUnits ConstantAlign = getContext().getTypeAlignInChars(VlaSize.Type);
1729 llvm::BasicBlock *SetupBB = createBasicBlock("vla-setup.loop");
1730 llvm::BasicBlock *LoopBB = createBasicBlock("vla-init.loop");
1731 llvm::BasicBlock *ContBB = createBasicBlock("vla-init.cont");
1732 llvm::Value *IsZeroSizedVLA = Builder.CreateICmpEQ(
1733 SizeVal, llvm::ConstantInt::get(SizeVal->getType(), 0),
1734 "vla.iszerosized");
1735 Builder.CreateCondBr(IsZeroSizedVLA, ContBB, SetupBB);
1736 EmitBlock(SetupBB);
1737 if (!EltSize.isOne())
1738 SizeVal = Builder.CreateNUWMul(SizeVal, CGM.getSize(EltSize));
1739 llvm::Value *BaseSizeInChars =
1740 llvm::ConstantInt::get(IntPtrTy, EltSize.getQuantity());
1741 Address Begin = Builder.CreateElementBitCast(Loc, Int8Ty, "vla.begin");
1742 llvm::Value *End =
1743 Builder.CreateInBoundsGEP(Begin.getPointer(), SizeVal, "vla.end");
1744 llvm::BasicBlock *OriginBB = Builder.GetInsertBlock();
1745 EmitBlock(LoopBB);
1746 llvm::PHINode *Cur = Builder.CreatePHI(Begin.getType(), 2, "vla.cur");
1747 Cur->addIncoming(Begin.getPointer(), OriginBB);
1748 CharUnits CurAlign = Loc.getAlignment().alignmentOfArrayElement(EltSize);
1749 Builder.CreateMemCpy(Address(Cur, CurAlign),
1750 createUnnamedGlobalForMemcpyFrom(
1751 CGM, D, Builder, Constant, ConstantAlign),
1752 BaseSizeInChars, isVolatile);
1753 llvm::Value *Next =
1754 Builder.CreateInBoundsGEP(Int8Ty, Cur, BaseSizeInChars, "vla.next");
1755 llvm::Value *Done = Builder.CreateICmpEQ(Next, End, "vla-init.isdone");
1756 Builder.CreateCondBr(Done, ContBB, LoopBB);
1757 Cur->addIncoming(Next, LoopBB);
1758 EmitBlock(ContBB);
1759 } break;
1760 }
1761 }
1762
EmitAutoVarInit(const AutoVarEmission & emission)1763 void CodeGenFunction::EmitAutoVarInit(const AutoVarEmission &emission) {
1764 assert(emission.Variable && "emission was not valid!");
1765
1766 // If this was emitted as a global constant, we're done.
1767 if (emission.wasEmittedAsGlobal()) return;
1768
1769 const VarDecl &D = *emission.Variable;
1770 auto DL = ApplyDebugLocation::CreateDefaultArtificial(*this, D.getLocation());
1771 QualType type = D.getType();
1772
1773 // If this local has an initializer, emit it now.
1774 const Expr *Init = D.getInit();
1775
1776 // If we are at an unreachable point, we don't need to emit the initializer
1777 // unless it contains a label.
1778 if (!HaveInsertPoint()) {
1779 if (!Init || !ContainsLabel(Init)) return;
1780 EnsureInsertPoint();
1781 }
1782
1783 // Initialize the structure of a __block variable.
1784 if (emission.IsEscapingByRef)
1785 emitByrefStructureInit(emission);
1786
1787 // Initialize the variable here if it doesn't have a initializer and it is a
1788 // C struct that is non-trivial to initialize or an array containing such a
1789 // struct.
1790 if (!Init &&
1791 type.isNonTrivialToPrimitiveDefaultInitialize() ==
1792 QualType::PDIK_Struct) {
1793 LValue Dst = MakeAddrLValue(emission.getAllocatedAddress(), type);
1794 if (emission.IsEscapingByRef)
1795 drillIntoBlockVariable(*this, Dst, &D);
1796 defaultInitNonTrivialCStructVar(Dst);
1797 return;
1798 }
1799
1800 // Check whether this is a byref variable that's potentially
1801 // captured and moved by its own initializer. If so, we'll need to
1802 // emit the initializer first, then copy into the variable.
1803 bool capturedByInit =
1804 Init && emission.IsEscapingByRef && isCapturedBy(D, Init);
1805
1806 bool locIsByrefHeader = !capturedByInit;
1807 const Address Loc =
1808 locIsByrefHeader ? emission.getObjectAddress(*this) : emission.Addr;
1809
1810 // Note: constexpr already initializes everything correctly.
1811 LangOptions::TrivialAutoVarInitKind trivialAutoVarInit =
1812 (D.isConstexpr()
1813 ? LangOptions::TrivialAutoVarInitKind::Uninitialized
1814 : (D.getAttr<UninitializedAttr>()
1815 ? LangOptions::TrivialAutoVarInitKind::Uninitialized
1816 : getContext().getLangOpts().getTrivialAutoVarInit()));
1817
1818 auto initializeWhatIsTechnicallyUninitialized = [&](Address Loc) {
1819 if (trivialAutoVarInit ==
1820 LangOptions::TrivialAutoVarInitKind::Uninitialized)
1821 return;
1822
1823 // Only initialize a __block's storage: we always initialize the header.
1824 if (emission.IsEscapingByRef && !locIsByrefHeader)
1825 Loc = emitBlockByrefAddress(Loc, &D, /*follow=*/false);
1826
1827 return emitZeroOrPatternForAutoVarInit(type, D, Loc);
1828 };
1829
1830 if (isTrivialInitializer(Init))
1831 return initializeWhatIsTechnicallyUninitialized(Loc);
1832
1833 llvm::Constant *constant = nullptr;
1834 if (emission.IsConstantAggregate ||
1835 D.mightBeUsableInConstantExpressions(getContext())) {
1836 assert(!capturedByInit && "constant init contains a capturing block?");
1837 constant = ConstantEmitter(*this).tryEmitAbstractForInitializer(D);
1838 if (constant && !constant->isZeroValue() &&
1839 (trivialAutoVarInit !=
1840 LangOptions::TrivialAutoVarInitKind::Uninitialized)) {
1841 IsPattern isPattern =
1842 (trivialAutoVarInit == LangOptions::TrivialAutoVarInitKind::Pattern)
1843 ? IsPattern::Yes
1844 : IsPattern::No;
1845 // C guarantees that brace-init with fewer initializers than members in
1846 // the aggregate will initialize the rest of the aggregate as-if it were
1847 // static initialization. In turn static initialization guarantees that
1848 // padding is initialized to zero bits. We could instead pattern-init if D
1849 // has any ImplicitValueInitExpr, but that seems to be unintuitive
1850 // behavior.
1851 constant = constWithPadding(CGM, IsPattern::No,
1852 replaceUndef(CGM, isPattern, constant));
1853 }
1854 }
1855
1856 if (!constant) {
1857 initializeWhatIsTechnicallyUninitialized(Loc);
1858 LValue lv = MakeAddrLValue(Loc, type);
1859 lv.setNonGC(true);
1860 return EmitExprAsInit(Init, &D, lv, capturedByInit);
1861 }
1862
1863 if (!emission.IsConstantAggregate) {
1864 // For simple scalar/complex initialization, store the value directly.
1865 LValue lv = MakeAddrLValue(Loc, type);
1866 lv.setNonGC(true);
1867 return EmitStoreThroughLValue(RValue::get(constant), lv, true);
1868 }
1869
1870 llvm::Type *BP = CGM.Int8Ty->getPointerTo(Loc.getAddressSpace());
1871 emitStoresForConstant(
1872 CGM, D, (Loc.getType() == BP) ? Loc : Builder.CreateBitCast(Loc, BP),
1873 type.isVolatileQualified(), Builder, constant);
1874 }
1875
1876 /// Emit an expression as an initializer for an object (variable, field, etc.)
1877 /// at the given location. The expression is not necessarily the normal
1878 /// initializer for the object, and the address is not necessarily
1879 /// its normal location.
1880 ///
1881 /// \param init the initializing expression
1882 /// \param D the object to act as if we're initializing
1883 /// \param lvalue the lvalue to initialize
1884 /// \param capturedByInit true if \p D is a __block variable
1885 /// whose address is potentially changed by the initializer
EmitExprAsInit(const Expr * init,const ValueDecl * D,LValue lvalue,bool capturedByInit)1886 void CodeGenFunction::EmitExprAsInit(const Expr *init, const ValueDecl *D,
1887 LValue lvalue, bool capturedByInit) {
1888 QualType type = D->getType();
1889
1890 if (type->isReferenceType()) {
1891 RValue rvalue = EmitReferenceBindingToExpr(init);
1892 if (capturedByInit)
1893 drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D));
1894 EmitStoreThroughLValue(rvalue, lvalue, true);
1895 return;
1896 }
1897 switch (getEvaluationKind(type)) {
1898 case TEK_Scalar:
1899 EmitScalarInit(init, D, lvalue, capturedByInit);
1900 return;
1901 case TEK_Complex: {
1902 ComplexPairTy complex = EmitComplexExpr(init);
1903 if (capturedByInit)
1904 drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D));
1905 EmitStoreOfComplex(complex, lvalue, /*init*/ true);
1906 return;
1907 }
1908 case TEK_Aggregate:
1909 if (type->isAtomicType()) {
1910 EmitAtomicInit(const_cast<Expr*>(init), lvalue);
1911 } else {
1912 AggValueSlot::Overlap_t Overlap = AggValueSlot::MayOverlap;
1913 if (isa<VarDecl>(D))
1914 Overlap = AggValueSlot::DoesNotOverlap;
1915 else if (auto *FD = dyn_cast<FieldDecl>(D))
1916 Overlap = getOverlapForFieldInit(FD);
1917 // TODO: how can we delay here if D is captured by its initializer?
1918 EmitAggExpr(init, AggValueSlot::forLValue(
1919 lvalue, *this, AggValueSlot::IsDestructed,
1920 AggValueSlot::DoesNotNeedGCBarriers,
1921 AggValueSlot::IsNotAliased, Overlap));
1922 }
1923 return;
1924 }
1925 llvm_unreachable("bad evaluation kind");
1926 }
1927
1928 /// Enter a destroy cleanup for the given local variable.
emitAutoVarTypeCleanup(const CodeGenFunction::AutoVarEmission & emission,QualType::DestructionKind dtorKind)1929 void CodeGenFunction::emitAutoVarTypeCleanup(
1930 const CodeGenFunction::AutoVarEmission &emission,
1931 QualType::DestructionKind dtorKind) {
1932 assert(dtorKind != QualType::DK_none);
1933
1934 // Note that for __block variables, we want to destroy the
1935 // original stack object, not the possibly forwarded object.
1936 Address addr = emission.getObjectAddress(*this);
1937
1938 const VarDecl *var = emission.Variable;
1939 QualType type = var->getType();
1940
1941 CleanupKind cleanupKind = NormalAndEHCleanup;
1942 CodeGenFunction::Destroyer *destroyer = nullptr;
1943
1944 switch (dtorKind) {
1945 case QualType::DK_none:
1946 llvm_unreachable("no cleanup for trivially-destructible variable");
1947
1948 case QualType::DK_cxx_destructor:
1949 // If there's an NRVO flag on the emission, we need a different
1950 // cleanup.
1951 if (emission.NRVOFlag) {
1952 assert(!type->isArrayType());
1953 CXXDestructorDecl *dtor = type->getAsCXXRecordDecl()->getDestructor();
1954 EHStack.pushCleanup<DestroyNRVOVariableCXX>(cleanupKind, addr, type, dtor,
1955 emission.NRVOFlag);
1956 return;
1957 }
1958 break;
1959
1960 case QualType::DK_objc_strong_lifetime:
1961 // Suppress cleanups for pseudo-strong variables.
1962 if (var->isARCPseudoStrong()) return;
1963
1964 // Otherwise, consider whether to use an EH cleanup or not.
1965 cleanupKind = getARCCleanupKind();
1966
1967 // Use the imprecise destroyer by default.
1968 if (!var->hasAttr<ObjCPreciseLifetimeAttr>())
1969 destroyer = CodeGenFunction::destroyARCStrongImprecise;
1970 break;
1971
1972 case QualType::DK_objc_weak_lifetime:
1973 break;
1974
1975 case QualType::DK_nontrivial_c_struct:
1976 destroyer = CodeGenFunction::destroyNonTrivialCStruct;
1977 if (emission.NRVOFlag) {
1978 assert(!type->isArrayType());
1979 EHStack.pushCleanup<DestroyNRVOVariableC>(cleanupKind, addr,
1980 emission.NRVOFlag, type);
1981 return;
1982 }
1983 break;
1984 }
1985
1986 // If we haven't chosen a more specific destroyer, use the default.
1987 if (!destroyer) destroyer = getDestroyer(dtorKind);
1988
1989 // Use an EH cleanup in array destructors iff the destructor itself
1990 // is being pushed as an EH cleanup.
1991 bool useEHCleanup = (cleanupKind & EHCleanup);
1992 EHStack.pushCleanup<DestroyObject>(cleanupKind, addr, type, destroyer,
1993 useEHCleanup);
1994 }
1995
EmitAutoVarCleanups(const AutoVarEmission & emission)1996 void CodeGenFunction::EmitAutoVarCleanups(const AutoVarEmission &emission) {
1997 assert(emission.Variable && "emission was not valid!");
1998
1999 // If this was emitted as a global constant, we're done.
2000 if (emission.wasEmittedAsGlobal()) return;
2001
2002 // If we don't have an insertion point, we're done. Sema prevents
2003 // us from jumping into any of these scopes anyway.
2004 if (!HaveInsertPoint()) return;
2005
2006 const VarDecl &D = *emission.Variable;
2007
2008 // Check the type for a cleanup.
2009 if (QualType::DestructionKind dtorKind = D.needsDestruction(getContext()))
2010 emitAutoVarTypeCleanup(emission, dtorKind);
2011
2012 // In GC mode, honor objc_precise_lifetime.
2013 if (getLangOpts().getGC() != LangOptions::NonGC &&
2014 D.hasAttr<ObjCPreciseLifetimeAttr>()) {
2015 EHStack.pushCleanup<ExtendGCLifetime>(NormalCleanup, &D);
2016 }
2017
2018 // Handle the cleanup attribute.
2019 if (const CleanupAttr *CA = D.getAttr<CleanupAttr>()) {
2020 const FunctionDecl *FD = CA->getFunctionDecl();
2021
2022 llvm::Constant *F = CGM.GetAddrOfFunction(FD);
2023 assert(F && "Could not find function!");
2024
2025 const CGFunctionInfo &Info = CGM.getTypes().arrangeFunctionDeclaration(FD);
2026 EHStack.pushCleanup<CallCleanupFunction>(NormalAndEHCleanup, F, &Info, &D);
2027 }
2028
2029 // If this is a block variable, call _Block_object_destroy
2030 // (on the unforwarded address). Don't enter this cleanup if we're in pure-GC
2031 // mode.
2032 if (emission.IsEscapingByRef &&
2033 CGM.getLangOpts().getGC() != LangOptions::GCOnly) {
2034 BlockFieldFlags Flags = BLOCK_FIELD_IS_BYREF;
2035 if (emission.Variable->getType().isObjCGCWeak())
2036 Flags |= BLOCK_FIELD_IS_WEAK;
2037 enterByrefCleanup(NormalAndEHCleanup, emission.Addr, Flags,
2038 /*LoadBlockVarAddr*/ false,
2039 cxxDestructorCanThrow(emission.Variable->getType()));
2040 }
2041 }
2042
2043 CodeGenFunction::Destroyer *
getDestroyer(QualType::DestructionKind kind)2044 CodeGenFunction::getDestroyer(QualType::DestructionKind kind) {
2045 switch (kind) {
2046 case QualType::DK_none: llvm_unreachable("no destroyer for trivial dtor");
2047 case QualType::DK_cxx_destructor:
2048 return destroyCXXObject;
2049 case QualType::DK_objc_strong_lifetime:
2050 return destroyARCStrongPrecise;
2051 case QualType::DK_objc_weak_lifetime:
2052 return destroyARCWeak;
2053 case QualType::DK_nontrivial_c_struct:
2054 return destroyNonTrivialCStruct;
2055 }
2056 llvm_unreachable("Unknown DestructionKind");
2057 }
2058
2059 /// pushEHDestroy - Push the standard destructor for the given type as
2060 /// an EH-only cleanup.
pushEHDestroy(QualType::DestructionKind dtorKind,Address addr,QualType type)2061 void CodeGenFunction::pushEHDestroy(QualType::DestructionKind dtorKind,
2062 Address addr, QualType type) {
2063 assert(dtorKind && "cannot push destructor for trivial type");
2064 assert(needsEHCleanup(dtorKind));
2065
2066 pushDestroy(EHCleanup, addr, type, getDestroyer(dtorKind), true);
2067 }
2068
2069 /// pushDestroy - Push the standard destructor for the given type as
2070 /// at least a normal cleanup.
pushDestroy(QualType::DestructionKind dtorKind,Address addr,QualType type)2071 void CodeGenFunction::pushDestroy(QualType::DestructionKind dtorKind,
2072 Address addr, QualType type) {
2073 assert(dtorKind && "cannot push destructor for trivial type");
2074
2075 CleanupKind cleanupKind = getCleanupKind(dtorKind);
2076 pushDestroy(cleanupKind, addr, type, getDestroyer(dtorKind),
2077 cleanupKind & EHCleanup);
2078 }
2079
pushDestroy(CleanupKind cleanupKind,Address addr,QualType type,Destroyer * destroyer,bool useEHCleanupForArray)2080 void CodeGenFunction::pushDestroy(CleanupKind cleanupKind, Address addr,
2081 QualType type, Destroyer *destroyer,
2082 bool useEHCleanupForArray) {
2083 pushFullExprCleanup<DestroyObject>(cleanupKind, addr, type,
2084 destroyer, useEHCleanupForArray);
2085 }
2086
pushStackRestore(CleanupKind Kind,Address SPMem)2087 void CodeGenFunction::pushStackRestore(CleanupKind Kind, Address SPMem) {
2088 EHStack.pushCleanup<CallStackRestore>(Kind, SPMem);
2089 }
2090
pushLifetimeExtendedDestroy(CleanupKind cleanupKind,Address addr,QualType type,Destroyer * destroyer,bool useEHCleanupForArray)2091 void CodeGenFunction::pushLifetimeExtendedDestroy(
2092 CleanupKind cleanupKind, Address addr, QualType type,
2093 Destroyer *destroyer, bool useEHCleanupForArray) {
2094 // Push an EH-only cleanup for the object now.
2095 // FIXME: When popping normal cleanups, we need to keep this EH cleanup
2096 // around in case a temporary's destructor throws an exception.
2097 if (cleanupKind & EHCleanup)
2098 EHStack.pushCleanup<DestroyObject>(
2099 static_cast<CleanupKind>(cleanupKind & ~NormalCleanup), addr, type,
2100 destroyer, useEHCleanupForArray);
2101
2102 // Remember that we need to push a full cleanup for the object at the
2103 // end of the full-expression.
2104 pushCleanupAfterFullExpr<DestroyObject>(
2105 cleanupKind, addr, type, destroyer, useEHCleanupForArray);
2106 }
2107
2108 /// emitDestroy - Immediately perform the destruction of the given
2109 /// object.
2110 ///
2111 /// \param addr - the address of the object; a type*
2112 /// \param type - the type of the object; if an array type, all
2113 /// objects are destroyed in reverse order
2114 /// \param destroyer - the function to call to destroy individual
2115 /// elements
2116 /// \param useEHCleanupForArray - whether an EH cleanup should be
2117 /// used when destroying array elements, in case one of the
2118 /// destructions throws an exception
emitDestroy(Address addr,QualType type,Destroyer * destroyer,bool useEHCleanupForArray)2119 void CodeGenFunction::emitDestroy(Address addr, QualType type,
2120 Destroyer *destroyer,
2121 bool useEHCleanupForArray) {
2122 const ArrayType *arrayType = getContext().getAsArrayType(type);
2123 if (!arrayType)
2124 return destroyer(*this, addr, type);
2125
2126 llvm::Value *length = emitArrayLength(arrayType, type, addr);
2127
2128 CharUnits elementAlign =
2129 addr.getAlignment()
2130 .alignmentOfArrayElement(getContext().getTypeSizeInChars(type));
2131
2132 // Normally we have to check whether the array is zero-length.
2133 bool checkZeroLength = true;
2134
2135 // But if the array length is constant, we can suppress that.
2136 if (llvm::ConstantInt *constLength = dyn_cast<llvm::ConstantInt>(length)) {
2137 // ...and if it's constant zero, we can just skip the entire thing.
2138 if (constLength->isZero()) return;
2139 checkZeroLength = false;
2140 }
2141
2142 llvm::Value *begin = addr.getPointer();
2143 llvm::Value *end = Builder.CreateInBoundsGEP(begin, length);
2144 emitArrayDestroy(begin, end, type, elementAlign, destroyer,
2145 checkZeroLength, useEHCleanupForArray);
2146 }
2147
2148 /// emitArrayDestroy - Destroys all the elements of the given array,
2149 /// beginning from last to first. The array cannot be zero-length.
2150 ///
2151 /// \param begin - a type* denoting the first element of the array
2152 /// \param end - a type* denoting one past the end of the array
2153 /// \param elementType - the element type of the array
2154 /// \param destroyer - the function to call to destroy elements
2155 /// \param useEHCleanup - whether to push an EH cleanup to destroy
2156 /// the remaining elements in case the destruction of a single
2157 /// element throws
emitArrayDestroy(llvm::Value * begin,llvm::Value * end,QualType elementType,CharUnits elementAlign,Destroyer * destroyer,bool checkZeroLength,bool useEHCleanup)2158 void CodeGenFunction::emitArrayDestroy(llvm::Value *begin,
2159 llvm::Value *end,
2160 QualType elementType,
2161 CharUnits elementAlign,
2162 Destroyer *destroyer,
2163 bool checkZeroLength,
2164 bool useEHCleanup) {
2165 assert(!elementType->isArrayType());
2166
2167 // The basic structure here is a do-while loop, because we don't
2168 // need to check for the zero-element case.
2169 llvm::BasicBlock *bodyBB = createBasicBlock("arraydestroy.body");
2170 llvm::BasicBlock *doneBB = createBasicBlock("arraydestroy.done");
2171
2172 if (checkZeroLength) {
2173 llvm::Value *isEmpty = Builder.CreateICmpEQ(begin, end,
2174 "arraydestroy.isempty");
2175 Builder.CreateCondBr(isEmpty, doneBB, bodyBB);
2176 }
2177
2178 // Enter the loop body, making that address the current address.
2179 llvm::BasicBlock *entryBB = Builder.GetInsertBlock();
2180 EmitBlock(bodyBB);
2181 llvm::PHINode *elementPast =
2182 Builder.CreatePHI(begin->getType(), 2, "arraydestroy.elementPast");
2183 elementPast->addIncoming(end, entryBB);
2184
2185 // Shift the address back by one element.
2186 llvm::Value *negativeOne = llvm::ConstantInt::get(SizeTy, -1, true);
2187 llvm::Value *element = Builder.CreateInBoundsGEP(elementPast, negativeOne,
2188 "arraydestroy.element");
2189
2190 if (useEHCleanup)
2191 pushRegularPartialArrayCleanup(begin, element, elementType, elementAlign,
2192 destroyer);
2193
2194 // Perform the actual destruction there.
2195 destroyer(*this, Address(element, elementAlign), elementType);
2196
2197 if (useEHCleanup)
2198 PopCleanupBlock();
2199
2200 // Check whether we've reached the end.
2201 llvm::Value *done = Builder.CreateICmpEQ(element, begin, "arraydestroy.done");
2202 Builder.CreateCondBr(done, doneBB, bodyBB);
2203 elementPast->addIncoming(element, Builder.GetInsertBlock());
2204
2205 // Done.
2206 EmitBlock(doneBB);
2207 }
2208
2209 /// Perform partial array destruction as if in an EH cleanup. Unlike
2210 /// emitArrayDestroy, the element type here may still be an array type.
emitPartialArrayDestroy(CodeGenFunction & CGF,llvm::Value * begin,llvm::Value * end,QualType type,CharUnits elementAlign,CodeGenFunction::Destroyer * destroyer)2211 static void emitPartialArrayDestroy(CodeGenFunction &CGF,
2212 llvm::Value *begin, llvm::Value *end,
2213 QualType type, CharUnits elementAlign,
2214 CodeGenFunction::Destroyer *destroyer) {
2215 // If the element type is itself an array, drill down.
2216 unsigned arrayDepth = 0;
2217 while (const ArrayType *arrayType = CGF.getContext().getAsArrayType(type)) {
2218 // VLAs don't require a GEP index to walk into.
2219 if (!isa<VariableArrayType>(arrayType))
2220 arrayDepth++;
2221 type = arrayType->getElementType();
2222 }
2223
2224 if (arrayDepth) {
2225 llvm::Value *zero = llvm::ConstantInt::get(CGF.SizeTy, 0);
2226
2227 SmallVector<llvm::Value*,4> gepIndices(arrayDepth+1, zero);
2228 begin = CGF.Builder.CreateInBoundsGEP(begin, gepIndices, "pad.arraybegin");
2229 end = CGF.Builder.CreateInBoundsGEP(end, gepIndices, "pad.arrayend");
2230 }
2231
2232 // Destroy the array. We don't ever need an EH cleanup because we
2233 // assume that we're in an EH cleanup ourselves, so a throwing
2234 // destructor causes an immediate terminate.
2235 CGF.emitArrayDestroy(begin, end, type, elementAlign, destroyer,
2236 /*checkZeroLength*/ true, /*useEHCleanup*/ false);
2237 }
2238
2239 namespace {
2240 /// RegularPartialArrayDestroy - a cleanup which performs a partial
2241 /// array destroy where the end pointer is regularly determined and
2242 /// does not need to be loaded from a local.
2243 class RegularPartialArrayDestroy final : public EHScopeStack::Cleanup {
2244 llvm::Value *ArrayBegin;
2245 llvm::Value *ArrayEnd;
2246 QualType ElementType;
2247 CodeGenFunction::Destroyer *Destroyer;
2248 CharUnits ElementAlign;
2249 public:
RegularPartialArrayDestroy(llvm::Value * arrayBegin,llvm::Value * arrayEnd,QualType elementType,CharUnits elementAlign,CodeGenFunction::Destroyer * destroyer)2250 RegularPartialArrayDestroy(llvm::Value *arrayBegin, llvm::Value *arrayEnd,
2251 QualType elementType, CharUnits elementAlign,
2252 CodeGenFunction::Destroyer *destroyer)
2253 : ArrayBegin(arrayBegin), ArrayEnd(arrayEnd),
2254 ElementType(elementType), Destroyer(destroyer),
2255 ElementAlign(elementAlign) {}
2256
Emit(CodeGenFunction & CGF,Flags flags)2257 void Emit(CodeGenFunction &CGF, Flags flags) override {
2258 emitPartialArrayDestroy(CGF, ArrayBegin, ArrayEnd,
2259 ElementType, ElementAlign, Destroyer);
2260 }
2261 };
2262
2263 /// IrregularPartialArrayDestroy - a cleanup which performs a
2264 /// partial array destroy where the end pointer is irregularly
2265 /// determined and must be loaded from a local.
2266 class IrregularPartialArrayDestroy final : public EHScopeStack::Cleanup {
2267 llvm::Value *ArrayBegin;
2268 Address ArrayEndPointer;
2269 QualType ElementType;
2270 CodeGenFunction::Destroyer *Destroyer;
2271 CharUnits ElementAlign;
2272 public:
IrregularPartialArrayDestroy(llvm::Value * arrayBegin,Address arrayEndPointer,QualType elementType,CharUnits elementAlign,CodeGenFunction::Destroyer * destroyer)2273 IrregularPartialArrayDestroy(llvm::Value *arrayBegin,
2274 Address arrayEndPointer,
2275 QualType elementType,
2276 CharUnits elementAlign,
2277 CodeGenFunction::Destroyer *destroyer)
2278 : ArrayBegin(arrayBegin), ArrayEndPointer(arrayEndPointer),
2279 ElementType(elementType), Destroyer(destroyer),
2280 ElementAlign(elementAlign) {}
2281
Emit(CodeGenFunction & CGF,Flags flags)2282 void Emit(CodeGenFunction &CGF, Flags flags) override {
2283 llvm::Value *arrayEnd = CGF.Builder.CreateLoad(ArrayEndPointer);
2284 emitPartialArrayDestroy(CGF, ArrayBegin, arrayEnd,
2285 ElementType, ElementAlign, Destroyer);
2286 }
2287 };
2288 } // end anonymous namespace
2289
2290 /// pushIrregularPartialArrayCleanup - Push an EH cleanup to destroy
2291 /// already-constructed elements of the given array. The cleanup
2292 /// may be popped with DeactivateCleanupBlock or PopCleanupBlock.
2293 ///
2294 /// \param elementType - the immediate element type of the array;
2295 /// possibly still an array type
pushIrregularPartialArrayCleanup(llvm::Value * arrayBegin,Address arrayEndPointer,QualType elementType,CharUnits elementAlign,Destroyer * destroyer)2296 void CodeGenFunction::pushIrregularPartialArrayCleanup(llvm::Value *arrayBegin,
2297 Address arrayEndPointer,
2298 QualType elementType,
2299 CharUnits elementAlign,
2300 Destroyer *destroyer) {
2301 pushFullExprCleanup<IrregularPartialArrayDestroy>(EHCleanup,
2302 arrayBegin, arrayEndPointer,
2303 elementType, elementAlign,
2304 destroyer);
2305 }
2306
2307 /// pushRegularPartialArrayCleanup - Push an EH cleanup to destroy
2308 /// already-constructed elements of the given array. The cleanup
2309 /// may be popped with DeactivateCleanupBlock or PopCleanupBlock.
2310 ///
2311 /// \param elementType - the immediate element type of the array;
2312 /// possibly still an array type
pushRegularPartialArrayCleanup(llvm::Value * arrayBegin,llvm::Value * arrayEnd,QualType elementType,CharUnits elementAlign,Destroyer * destroyer)2313 void CodeGenFunction::pushRegularPartialArrayCleanup(llvm::Value *arrayBegin,
2314 llvm::Value *arrayEnd,
2315 QualType elementType,
2316 CharUnits elementAlign,
2317 Destroyer *destroyer) {
2318 pushFullExprCleanup<RegularPartialArrayDestroy>(EHCleanup,
2319 arrayBegin, arrayEnd,
2320 elementType, elementAlign,
2321 destroyer);
2322 }
2323
2324 /// Lazily declare the @llvm.lifetime.start intrinsic.
getLLVMLifetimeStartFn()2325 llvm::Function *CodeGenModule::getLLVMLifetimeStartFn() {
2326 if (LifetimeStartFn)
2327 return LifetimeStartFn;
2328 LifetimeStartFn = llvm::Intrinsic::getDeclaration(&getModule(),
2329 llvm::Intrinsic::lifetime_start, AllocaInt8PtrTy);
2330 return LifetimeStartFn;
2331 }
2332
2333 /// Lazily declare the @llvm.lifetime.end intrinsic.
getLLVMLifetimeEndFn()2334 llvm::Function *CodeGenModule::getLLVMLifetimeEndFn() {
2335 if (LifetimeEndFn)
2336 return LifetimeEndFn;
2337 LifetimeEndFn = llvm::Intrinsic::getDeclaration(&getModule(),
2338 llvm::Intrinsic::lifetime_end, AllocaInt8PtrTy);
2339 return LifetimeEndFn;
2340 }
2341
2342 namespace {
2343 /// A cleanup to perform a release of an object at the end of a
2344 /// function. This is used to balance out the incoming +1 of a
2345 /// ns_consumed argument when we can't reasonably do that just by
2346 /// not doing the initial retain for a __block argument.
2347 struct ConsumeARCParameter final : EHScopeStack::Cleanup {
ConsumeARCParameter__anona90623d20511::ConsumeARCParameter2348 ConsumeARCParameter(llvm::Value *param,
2349 ARCPreciseLifetime_t precise)
2350 : Param(param), Precise(precise) {}
2351
2352 llvm::Value *Param;
2353 ARCPreciseLifetime_t Precise;
2354
Emit__anona90623d20511::ConsumeARCParameter2355 void Emit(CodeGenFunction &CGF, Flags flags) override {
2356 CGF.EmitARCRelease(Param, Precise);
2357 }
2358 };
2359 } // end anonymous namespace
2360
2361 /// Emit an alloca (or GlobalValue depending on target)
2362 /// for the specified parameter and set up LocalDeclMap.
EmitParmDecl(const VarDecl & D,ParamValue Arg,unsigned ArgNo)2363 void CodeGenFunction::EmitParmDecl(const VarDecl &D, ParamValue Arg,
2364 unsigned ArgNo) {
2365 // FIXME: Why isn't ImplicitParamDecl a ParmVarDecl?
2366 assert((isa<ParmVarDecl>(D) || isa<ImplicitParamDecl>(D)) &&
2367 "Invalid argument to EmitParmDecl");
2368
2369 Arg.getAnyValue()->setName(D.getName());
2370
2371 QualType Ty = D.getType();
2372
2373 // Use better IR generation for certain implicit parameters.
2374 if (auto IPD = dyn_cast<ImplicitParamDecl>(&D)) {
2375 // The only implicit argument a block has is its literal.
2376 // This may be passed as an inalloca'ed value on Windows x86.
2377 if (BlockInfo) {
2378 llvm::Value *V = Arg.isIndirect()
2379 ? Builder.CreateLoad(Arg.getIndirectAddress())
2380 : Arg.getDirectValue();
2381 setBlockContextParameter(IPD, ArgNo, V);
2382 return;
2383 }
2384 }
2385
2386 Address DeclPtr = Address::invalid();
2387 bool DoStore = false;
2388 bool IsScalar = hasScalarEvaluationKind(Ty);
2389 // If we already have a pointer to the argument, reuse the input pointer.
2390 if (Arg.isIndirect()) {
2391 DeclPtr = Arg.getIndirectAddress();
2392 // If we have a prettier pointer type at this point, bitcast to that.
2393 unsigned AS = DeclPtr.getType()->getAddressSpace();
2394 llvm::Type *IRTy = ConvertTypeForMem(Ty)->getPointerTo(AS);
2395 if (DeclPtr.getType() != IRTy)
2396 DeclPtr = Builder.CreateBitCast(DeclPtr, IRTy, D.getName());
2397 // Indirect argument is in alloca address space, which may be different
2398 // from the default address space.
2399 auto AllocaAS = CGM.getASTAllocaAddressSpace();
2400 auto *V = DeclPtr.getPointer();
2401 auto SrcLangAS = getLangOpts().OpenCL ? LangAS::opencl_private : AllocaAS;
2402 auto DestLangAS =
2403 getLangOpts().OpenCL ? LangAS::opencl_private : LangAS::Default;
2404 if (SrcLangAS != DestLangAS) {
2405 assert(getContext().getTargetAddressSpace(SrcLangAS) ==
2406 CGM.getDataLayout().getAllocaAddrSpace());
2407 auto DestAS = getContext().getTargetAddressSpace(DestLangAS);
2408 auto *T = V->getType()->getPointerElementType()->getPointerTo(DestAS);
2409 DeclPtr = Address(getTargetHooks().performAddrSpaceCast(
2410 *this, V, SrcLangAS, DestLangAS, T, true),
2411 DeclPtr.getAlignment());
2412 }
2413
2414 // Push a destructor cleanup for this parameter if the ABI requires it.
2415 // Don't push a cleanup in a thunk for a method that will also emit a
2416 // cleanup.
2417 if (hasAggregateEvaluationKind(Ty) && !CurFuncIsThunk &&
2418 Ty->castAs<RecordType>()->getDecl()->isParamDestroyedInCallee()) {
2419 if (QualType::DestructionKind DtorKind =
2420 D.needsDestruction(getContext())) {
2421 assert((DtorKind == QualType::DK_cxx_destructor ||
2422 DtorKind == QualType::DK_nontrivial_c_struct) &&
2423 "unexpected destructor type");
2424 pushDestroy(DtorKind, DeclPtr, Ty);
2425 CalleeDestructedParamCleanups[cast<ParmVarDecl>(&D)] =
2426 EHStack.stable_begin();
2427 }
2428 }
2429 } else {
2430 // Check if the parameter address is controlled by OpenMP runtime.
2431 Address OpenMPLocalAddr =
2432 getLangOpts().OpenMP
2433 ? CGM.getOpenMPRuntime().getAddressOfLocalVariable(*this, &D)
2434 : Address::invalid();
2435 if (getLangOpts().OpenMP && OpenMPLocalAddr.isValid()) {
2436 DeclPtr = OpenMPLocalAddr;
2437 } else {
2438 // Otherwise, create a temporary to hold the value.
2439 DeclPtr = CreateMemTemp(Ty, getContext().getDeclAlign(&D),
2440 D.getName() + ".addr");
2441 }
2442 DoStore = true;
2443 }
2444
2445 llvm::Value *ArgVal = (DoStore ? Arg.getDirectValue() : nullptr);
2446
2447 LValue lv = MakeAddrLValue(DeclPtr, Ty);
2448 if (IsScalar) {
2449 Qualifiers qs = Ty.getQualifiers();
2450 if (Qualifiers::ObjCLifetime lt = qs.getObjCLifetime()) {
2451 // We honor __attribute__((ns_consumed)) for types with lifetime.
2452 // For __strong, it's handled by just skipping the initial retain;
2453 // otherwise we have to balance out the initial +1 with an extra
2454 // cleanup to do the release at the end of the function.
2455 bool isConsumed = D.hasAttr<NSConsumedAttr>();
2456
2457 // If a parameter is pseudo-strong then we can omit the implicit retain.
2458 if (D.isARCPseudoStrong()) {
2459 assert(lt == Qualifiers::OCL_Strong &&
2460 "pseudo-strong variable isn't strong?");
2461 assert(qs.hasConst() && "pseudo-strong variable should be const!");
2462 lt = Qualifiers::OCL_ExplicitNone;
2463 }
2464
2465 // Load objects passed indirectly.
2466 if (Arg.isIndirect() && !ArgVal)
2467 ArgVal = Builder.CreateLoad(DeclPtr);
2468
2469 if (lt == Qualifiers::OCL_Strong) {
2470 if (!isConsumed) {
2471 if (CGM.getCodeGenOpts().OptimizationLevel == 0) {
2472 // use objc_storeStrong(&dest, value) for retaining the
2473 // object. But first, store a null into 'dest' because
2474 // objc_storeStrong attempts to release its old value.
2475 llvm::Value *Null = CGM.EmitNullConstant(D.getType());
2476 EmitStoreOfScalar(Null, lv, /* isInitialization */ true);
2477 EmitARCStoreStrongCall(lv.getAddress(*this), ArgVal, true);
2478 DoStore = false;
2479 }
2480 else
2481 // Don't use objc_retainBlock for block pointers, because we
2482 // don't want to Block_copy something just because we got it
2483 // as a parameter.
2484 ArgVal = EmitARCRetainNonBlock(ArgVal);
2485 }
2486 } else {
2487 // Push the cleanup for a consumed parameter.
2488 if (isConsumed) {
2489 ARCPreciseLifetime_t precise = (D.hasAttr<ObjCPreciseLifetimeAttr>()
2490 ? ARCPreciseLifetime : ARCImpreciseLifetime);
2491 EHStack.pushCleanup<ConsumeARCParameter>(getARCCleanupKind(), ArgVal,
2492 precise);
2493 }
2494
2495 if (lt == Qualifiers::OCL_Weak) {
2496 EmitARCInitWeak(DeclPtr, ArgVal);
2497 DoStore = false; // The weak init is a store, no need to do two.
2498 }
2499 }
2500
2501 // Enter the cleanup scope.
2502 EmitAutoVarWithLifetime(*this, D, DeclPtr, lt);
2503 }
2504 }
2505
2506 // Store the initial value into the alloca.
2507 if (DoStore)
2508 EmitStoreOfScalar(ArgVal, lv, /* isInitialization */ true);
2509
2510 setAddrOfLocalVar(&D, DeclPtr);
2511
2512 // Emit debug info for param declarations in non-thunk functions.
2513 if (CGDebugInfo *DI = getDebugInfo()) {
2514 if (CGM.getCodeGenOpts().hasReducedDebugInfo() && !CurFuncIsThunk) {
2515 DI->EmitDeclareOfArgVariable(&D, DeclPtr.getPointer(), ArgNo, Builder);
2516 }
2517 }
2518
2519 if (D.hasAttr<AnnotateAttr>())
2520 EmitVarAnnotations(&D, DeclPtr.getPointer());
2521
2522 // We can only check return value nullability if all arguments to the
2523 // function satisfy their nullability preconditions. This makes it necessary
2524 // to emit null checks for args in the function body itself.
2525 if (requiresReturnValueNullabilityCheck()) {
2526 auto Nullability = Ty->getNullability(getContext());
2527 if (Nullability && *Nullability == NullabilityKind::NonNull) {
2528 SanitizerScope SanScope(this);
2529 RetValNullabilityPrecondition =
2530 Builder.CreateAnd(RetValNullabilityPrecondition,
2531 Builder.CreateIsNotNull(Arg.getAnyValue()));
2532 }
2533 }
2534 }
2535
EmitOMPDeclareReduction(const OMPDeclareReductionDecl * D,CodeGenFunction * CGF)2536 void CodeGenModule::EmitOMPDeclareReduction(const OMPDeclareReductionDecl *D,
2537 CodeGenFunction *CGF) {
2538 if (!LangOpts.OpenMP || (!LangOpts.EmitAllDecls && !D->isUsed()))
2539 return;
2540 getOpenMPRuntime().emitUserDefinedReduction(CGF, D);
2541 }
2542
EmitOMPDeclareMapper(const OMPDeclareMapperDecl * D,CodeGenFunction * CGF)2543 void CodeGenModule::EmitOMPDeclareMapper(const OMPDeclareMapperDecl *D,
2544 CodeGenFunction *CGF) {
2545 if (!LangOpts.OpenMP || LangOpts.OpenMPSimd ||
2546 (!LangOpts.EmitAllDecls && !D->isUsed()))
2547 return;
2548 getOpenMPRuntime().emitUserDefinedMapper(D, CGF);
2549 }
2550
EmitOMPRequiresDecl(const OMPRequiresDecl * D)2551 void CodeGenModule::EmitOMPRequiresDecl(const OMPRequiresDecl *D) {
2552 getOpenMPRuntime().processRequiresDirective(D);
2553 }
2554