1 //===--- CGDecl.cpp - Emit LLVM Code for declarations ---------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This contains code to emit Decl nodes as LLVM code.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "CGBlocks.h"
14 #include "CGCXXABI.h"
15 #include "CGCleanup.h"
16 #include "CGDebugInfo.h"
17 #include "CGOpenCLRuntime.h"
18 #include "CGOpenMPRuntime.h"
19 #include "CodeGenFunction.h"
20 #include "CodeGenModule.h"
21 #include "ConstantEmitter.h"
22 #include "PatternInit.h"
23 #include "TargetInfo.h"
24 #include "clang/AST/ASTContext.h"
25 #include "clang/AST/Attr.h"
26 #include "clang/AST/CharUnits.h"
27 #include "clang/AST/Decl.h"
28 #include "clang/AST/DeclObjC.h"
29 #include "clang/AST/DeclOpenMP.h"
30 #include "clang/Basic/CodeGenOptions.h"
31 #include "clang/Basic/SourceManager.h"
32 #include "clang/Basic/TargetInfo.h"
33 #include "clang/CodeGen/CGFunctionInfo.h"
34 #include "clang/Sema/Sema.h"
35 #include "llvm/Analysis/ValueTracking.h"
36 #include "llvm/IR/DataLayout.h"
37 #include "llvm/IR/GlobalVariable.h"
38 #include "llvm/IR/Intrinsics.h"
39 #include "llvm/IR/Type.h"
40 
41 using namespace clang;
42 using namespace CodeGen;
43 
44 static_assert(clang::Sema::MaximumAlignment <= llvm::Value::MaximumAlignment,
45               "Clang max alignment greater than what LLVM supports?");
46 
47 void CodeGenFunction::EmitDecl(const Decl &D) {
48   switch (D.getKind()) {
49   case Decl::BuiltinTemplate:
50   case Decl::TranslationUnit:
51   case Decl::ExternCContext:
52   case Decl::Namespace:
53   case Decl::UnresolvedUsingTypename:
54   case Decl::ClassTemplateSpecialization:
55   case Decl::ClassTemplatePartialSpecialization:
56   case Decl::VarTemplateSpecialization:
57   case Decl::VarTemplatePartialSpecialization:
58   case Decl::TemplateTypeParm:
59   case Decl::UnresolvedUsingValue:
60   case Decl::NonTypeTemplateParm:
61   case Decl::CXXDeductionGuide:
62   case Decl::CXXMethod:
63   case Decl::CXXConstructor:
64   case Decl::CXXDestructor:
65   case Decl::CXXConversion:
66   case Decl::Field:
67   case Decl::MSProperty:
68   case Decl::IndirectField:
69   case Decl::ObjCIvar:
70   case Decl::ObjCAtDefsField:
71   case Decl::ParmVar:
72   case Decl::ImplicitParam:
73   case Decl::ClassTemplate:
74   case Decl::VarTemplate:
75   case Decl::FunctionTemplate:
76   case Decl::TypeAliasTemplate:
77   case Decl::TemplateTemplateParm:
78   case Decl::ObjCMethod:
79   case Decl::ObjCCategory:
80   case Decl::ObjCProtocol:
81   case Decl::ObjCInterface:
82   case Decl::ObjCCategoryImpl:
83   case Decl::ObjCImplementation:
84   case Decl::ObjCProperty:
85   case Decl::ObjCCompatibleAlias:
86   case Decl::PragmaComment:
87   case Decl::PragmaDetectMismatch:
88   case Decl::AccessSpec:
89   case Decl::LinkageSpec:
90   case Decl::Export:
91   case Decl::ObjCPropertyImpl:
92   case Decl::FileScopeAsm:
93   case Decl::Friend:
94   case Decl::FriendTemplate:
95   case Decl::Block:
96   case Decl::Captured:
97   case Decl::ClassScopeFunctionSpecialization:
98   case Decl::UsingShadow:
99   case Decl::ConstructorUsingShadow:
100   case Decl::ObjCTypeParam:
101   case Decl::Binding:
102   case Decl::UnresolvedUsingIfExists:
103     llvm_unreachable("Declaration should not be in declstmts!");
104   case Decl::Record:    // struct/union/class X;
105   case Decl::CXXRecord: // struct/union/class X; [C++]
106     if (CGDebugInfo *DI = getDebugInfo())
107       if (cast<RecordDecl>(D).getDefinition())
108         DI->EmitAndRetainType(getContext().getRecordType(cast<RecordDecl>(&D)));
109     return;
110   case Decl::Enum:      // enum X;
111     if (CGDebugInfo *DI = getDebugInfo())
112       if (cast<EnumDecl>(D).getDefinition())
113         DI->EmitAndRetainType(getContext().getEnumType(cast<EnumDecl>(&D)));
114     return;
115   case Decl::Function:     // void X();
116   case Decl::EnumConstant: // enum ? { X = ? }
117   case Decl::StaticAssert: // static_assert(X, ""); [C++0x]
118   case Decl::Label:        // __label__ x;
119   case Decl::Import:
120   case Decl::MSGuid:    // __declspec(uuid("..."))
121   case Decl::UnnamedGlobalConstant:
122   case Decl::TemplateParamObject:
123   case Decl::OMPThreadPrivate:
124   case Decl::OMPAllocate:
125   case Decl::OMPCapturedExpr:
126   case Decl::OMPRequires:
127   case Decl::Empty:
128   case Decl::Concept:
129   case Decl::LifetimeExtendedTemporary:
130   case Decl::RequiresExprBody:
131     // None of these decls require codegen support.
132     return;
133 
134   case Decl::NamespaceAlias:
135     if (CGDebugInfo *DI = getDebugInfo())
136         DI->EmitNamespaceAlias(cast<NamespaceAliasDecl>(D));
137     return;
138   case Decl::Using:          // using X; [C++]
139     if (CGDebugInfo *DI = getDebugInfo())
140         DI->EmitUsingDecl(cast<UsingDecl>(D));
141     return;
142   case Decl::UsingEnum: // using enum X; [C++]
143     if (CGDebugInfo *DI = getDebugInfo())
144       DI->EmitUsingEnumDecl(cast<UsingEnumDecl>(D));
145     return;
146   case Decl::UsingPack:
147     for (auto *Using : cast<UsingPackDecl>(D).expansions())
148       EmitDecl(*Using);
149     return;
150   case Decl::UsingDirective: // using namespace X; [C++]
151     if (CGDebugInfo *DI = getDebugInfo())
152       DI->EmitUsingDirective(cast<UsingDirectiveDecl>(D));
153     return;
154   case Decl::Var:
155   case Decl::Decomposition: {
156     const VarDecl &VD = cast<VarDecl>(D);
157     assert(VD.isLocalVarDecl() &&
158            "Should not see file-scope variables inside a function!");
159     EmitVarDecl(VD);
160     if (auto *DD = dyn_cast<DecompositionDecl>(&VD))
161       for (auto *B : DD->bindings())
162         if (auto *HD = B->getHoldingVar())
163           EmitVarDecl(*HD);
164     return;
165   }
166 
167   case Decl::OMPDeclareReduction:
168     return CGM.EmitOMPDeclareReduction(cast<OMPDeclareReductionDecl>(&D), this);
169 
170   case Decl::OMPDeclareMapper:
171     return CGM.EmitOMPDeclareMapper(cast<OMPDeclareMapperDecl>(&D), this);
172 
173   case Decl::Typedef:      // typedef int X;
174   case Decl::TypeAlias: {  // using X = int; [C++0x]
175     QualType Ty = cast<TypedefNameDecl>(D).getUnderlyingType();
176     if (CGDebugInfo *DI = getDebugInfo())
177       DI->EmitAndRetainType(Ty);
178     if (Ty->isVariablyModifiedType())
179       EmitVariablyModifiedType(Ty);
180     return;
181   }
182   }
183 }
184 
185 /// EmitVarDecl - This method handles emission of any variable declaration
186 /// inside a function, including static vars etc.
187 void CodeGenFunction::EmitVarDecl(const VarDecl &D) {
188   if (D.hasExternalStorage())
189     // Don't emit it now, allow it to be emitted lazily on its first use.
190     return;
191 
192   // Some function-scope variable does not have static storage but still
193   // needs to be emitted like a static variable, e.g. a function-scope
194   // variable in constant address space in OpenCL.
195   if (D.getStorageDuration() != SD_Automatic) {
196     // Static sampler variables translated to function calls.
197     if (D.getType()->isSamplerT())
198       return;
199 
200     llvm::GlobalValue::LinkageTypes Linkage =
201         CGM.getLLVMLinkageVarDefinition(&D, /*IsConstant=*/false);
202 
203     // FIXME: We need to force the emission/use of a guard variable for
204     // some variables even if we can constant-evaluate them because
205     // we can't guarantee every translation unit will constant-evaluate them.
206 
207     return EmitStaticVarDecl(D, Linkage);
208   }
209 
210   if (D.getType().getAddressSpace() == LangAS::opencl_local)
211     return CGM.getOpenCLRuntime().EmitWorkGroupLocalVarDecl(*this, D);
212 
213   assert(D.hasLocalStorage());
214   return EmitAutoVarDecl(D);
215 }
216 
217 static std::string getStaticDeclName(CodeGenModule &CGM, const VarDecl &D) {
218   if (CGM.getLangOpts().CPlusPlus)
219     return CGM.getMangledName(&D).str();
220 
221   // If this isn't C++, we don't need a mangled name, just a pretty one.
222   assert(!D.isExternallyVisible() && "name shouldn't matter");
223   std::string ContextName;
224   const DeclContext *DC = D.getDeclContext();
225   if (auto *CD = dyn_cast<CapturedDecl>(DC))
226     DC = cast<DeclContext>(CD->getNonClosureContext());
227   if (const auto *FD = dyn_cast<FunctionDecl>(DC))
228     ContextName = std::string(CGM.getMangledName(FD));
229   else if (const auto *BD = dyn_cast<BlockDecl>(DC))
230     ContextName = std::string(CGM.getBlockMangledName(GlobalDecl(), BD));
231   else if (const auto *OMD = dyn_cast<ObjCMethodDecl>(DC))
232     ContextName = OMD->getSelector().getAsString();
233   else
234     llvm_unreachable("Unknown context for static var decl");
235 
236   ContextName += "." + D.getNameAsString();
237   return ContextName;
238 }
239 
240 llvm::Constant *CodeGenModule::getOrCreateStaticVarDecl(
241     const VarDecl &D, llvm::GlobalValue::LinkageTypes Linkage) {
242   // In general, we don't always emit static var decls once before we reference
243   // them. It is possible to reference them before emitting the function that
244   // contains them, and it is possible to emit the containing function multiple
245   // times.
246   if (llvm::Constant *ExistingGV = StaticLocalDeclMap[&D])
247     return ExistingGV;
248 
249   QualType Ty = D.getType();
250   assert(Ty->isConstantSizeType() && "VLAs can't be static");
251 
252   // Use the label if the variable is renamed with the asm-label extension.
253   std::string Name;
254   if (D.hasAttr<AsmLabelAttr>())
255     Name = std::string(getMangledName(&D));
256   else
257     Name = getStaticDeclName(*this, D);
258 
259   llvm::Type *LTy = getTypes().ConvertTypeForMem(Ty);
260   LangAS AS = GetGlobalVarAddressSpace(&D);
261   unsigned TargetAS = getContext().getTargetAddressSpace(AS);
262 
263   // OpenCL variables in local address space and CUDA shared
264   // variables cannot have an initializer.
265   llvm::Constant *Init = nullptr;
266   if (Ty.getAddressSpace() == LangAS::opencl_local ||
267       D.hasAttr<CUDASharedAttr>() || D.hasAttr<LoaderUninitializedAttr>())
268     Init = llvm::UndefValue::get(LTy);
269   else
270     Init = EmitNullConstant(Ty);
271 
272   llvm::GlobalVariable *GV = new llvm::GlobalVariable(
273       getModule(), LTy, Ty.isConstant(getContext()), Linkage, Init, Name,
274       nullptr, llvm::GlobalVariable::NotThreadLocal, TargetAS);
275   GV->setAlignment(getContext().getDeclAlign(&D).getAsAlign());
276 
277   if (supportsCOMDAT() && GV->isWeakForLinker())
278     GV->setComdat(TheModule.getOrInsertComdat(GV->getName()));
279 
280   if (D.getTLSKind())
281     setTLSMode(GV, D);
282 
283   setGVProperties(GV, &D);
284 
285   // Make sure the result is of the correct type.
286   LangAS ExpectedAS = Ty.getAddressSpace();
287   llvm::Constant *Addr = GV;
288   if (AS != ExpectedAS) {
289     Addr = getTargetCodeGenInfo().performAddrSpaceCast(
290         *this, GV, AS, ExpectedAS,
291         LTy->getPointerTo(getContext().getTargetAddressSpace(ExpectedAS)));
292   }
293 
294   setStaticLocalDeclAddress(&D, Addr);
295 
296   // Ensure that the static local gets initialized by making sure the parent
297   // function gets emitted eventually.
298   const Decl *DC = cast<Decl>(D.getDeclContext());
299 
300   // We can't name blocks or captured statements directly, so try to emit their
301   // parents.
302   if (isa<BlockDecl>(DC) || isa<CapturedDecl>(DC)) {
303     DC = DC->getNonClosureContext();
304     // FIXME: Ensure that global blocks get emitted.
305     if (!DC)
306       return Addr;
307   }
308 
309   GlobalDecl GD;
310   if (const auto *CD = dyn_cast<CXXConstructorDecl>(DC))
311     GD = GlobalDecl(CD, Ctor_Base);
312   else if (const auto *DD = dyn_cast<CXXDestructorDecl>(DC))
313     GD = GlobalDecl(DD, Dtor_Base);
314   else if (const auto *FD = dyn_cast<FunctionDecl>(DC))
315     GD = GlobalDecl(FD);
316   else {
317     // Don't do anything for Obj-C method decls or global closures. We should
318     // never defer them.
319     assert(isa<ObjCMethodDecl>(DC) && "unexpected parent code decl");
320   }
321   if (GD.getDecl()) {
322     // Disable emission of the parent function for the OpenMP device codegen.
323     CGOpenMPRuntime::DisableAutoDeclareTargetRAII NoDeclTarget(*this);
324     (void)GetAddrOfGlobal(GD);
325   }
326 
327   return Addr;
328 }
329 
330 /// AddInitializerToStaticVarDecl - Add the initializer for 'D' to the
331 /// global variable that has already been created for it.  If the initializer
332 /// has a different type than GV does, this may free GV and return a different
333 /// one.  Otherwise it just returns GV.
334 llvm::GlobalVariable *
335 CodeGenFunction::AddInitializerToStaticVarDecl(const VarDecl &D,
336                                                llvm::GlobalVariable *GV) {
337   ConstantEmitter emitter(*this);
338   llvm::Constant *Init = emitter.tryEmitForInitializer(D);
339 
340   // If constant emission failed, then this should be a C++ static
341   // initializer.
342   if (!Init) {
343     if (!getLangOpts().CPlusPlus)
344       CGM.ErrorUnsupported(D.getInit(), "constant l-value expression");
345     else if (D.hasFlexibleArrayInit(getContext()))
346       CGM.ErrorUnsupported(D.getInit(), "flexible array initializer");
347     else if (HaveInsertPoint()) {
348       // Since we have a static initializer, this global variable can't
349       // be constant.
350       GV->setConstant(false);
351 
352       EmitCXXGuardedInit(D, GV, /*PerformInit*/true);
353     }
354     return GV;
355   }
356 
357 #ifndef NDEBUG
358   CharUnits VarSize = CGM.getContext().getTypeSizeInChars(D.getType()) +
359                       D.getFlexibleArrayInitChars(getContext());
360   CharUnits CstSize = CharUnits::fromQuantity(
361       CGM.getDataLayout().getTypeAllocSize(Init->getType()));
362   assert(VarSize == CstSize && "Emitted constant has unexpected size");
363 #endif
364 
365   // The initializer may differ in type from the global. Rewrite
366   // the global to match the initializer.  (We have to do this
367   // because some types, like unions, can't be completely represented
368   // in the LLVM type system.)
369   if (GV->getValueType() != Init->getType()) {
370     llvm::GlobalVariable *OldGV = GV;
371 
372     GV = new llvm::GlobalVariable(
373         CGM.getModule(), Init->getType(), OldGV->isConstant(),
374         OldGV->getLinkage(), Init, "",
375         /*InsertBefore*/ OldGV, OldGV->getThreadLocalMode(),
376         OldGV->getType()->getPointerAddressSpace());
377     GV->setVisibility(OldGV->getVisibility());
378     GV->setDSOLocal(OldGV->isDSOLocal());
379     GV->setComdat(OldGV->getComdat());
380 
381     // Steal the name of the old global
382     GV->takeName(OldGV);
383 
384     // Replace all uses of the old global with the new global
385     llvm::Constant *NewPtrForOldDecl =
386     llvm::ConstantExpr::getBitCast(GV, OldGV->getType());
387     OldGV->replaceAllUsesWith(NewPtrForOldDecl);
388 
389     // Erase the old global, since it is no longer used.
390     OldGV->eraseFromParent();
391   }
392 
393   GV->setConstant(CGM.isTypeConstant(D.getType(), true));
394   GV->setInitializer(Init);
395 
396   emitter.finalize(GV);
397 
398   if (D.needsDestruction(getContext()) == QualType::DK_cxx_destructor &&
399       HaveInsertPoint()) {
400     // We have a constant initializer, but a nontrivial destructor. We still
401     // need to perform a guarded "initialization" in order to register the
402     // destructor.
403     EmitCXXGuardedInit(D, GV, /*PerformInit*/false);
404   }
405 
406   return GV;
407 }
408 
409 void CodeGenFunction::EmitStaticVarDecl(const VarDecl &D,
410                                       llvm::GlobalValue::LinkageTypes Linkage) {
411   // Check to see if we already have a global variable for this
412   // declaration.  This can happen when double-emitting function
413   // bodies, e.g. with complete and base constructors.
414   llvm::Constant *addr = CGM.getOrCreateStaticVarDecl(D, Linkage);
415   CharUnits alignment = getContext().getDeclAlign(&D);
416 
417   // Store into LocalDeclMap before generating initializer to handle
418   // circular references.
419   llvm::Type *elemTy = ConvertTypeForMem(D.getType());
420   setAddrOfLocalVar(&D, Address(addr, elemTy, alignment));
421 
422   // We can't have a VLA here, but we can have a pointer to a VLA,
423   // even though that doesn't really make any sense.
424   // Make sure to evaluate VLA bounds now so that we have them for later.
425   if (D.getType()->isVariablyModifiedType())
426     EmitVariablyModifiedType(D.getType());
427 
428   // Save the type in case adding the initializer forces a type change.
429   llvm::Type *expectedType = addr->getType();
430 
431   llvm::GlobalVariable *var =
432     cast<llvm::GlobalVariable>(addr->stripPointerCasts());
433 
434   // CUDA's local and local static __shared__ variables should not
435   // have any non-empty initializers. This is ensured by Sema.
436   // Whatever initializer such variable may have when it gets here is
437   // a no-op and should not be emitted.
438   bool isCudaSharedVar = getLangOpts().CUDA && getLangOpts().CUDAIsDevice &&
439                          D.hasAttr<CUDASharedAttr>();
440   // If this value has an initializer, emit it.
441   if (D.getInit() && !isCudaSharedVar)
442     var = AddInitializerToStaticVarDecl(D, var);
443 
444   var->setAlignment(alignment.getAsAlign());
445 
446   if (D.hasAttr<AnnotateAttr>())
447     CGM.AddGlobalAnnotations(&D, var);
448 
449   if (auto *SA = D.getAttr<PragmaClangBSSSectionAttr>())
450     var->addAttribute("bss-section", SA->getName());
451   if (auto *SA = D.getAttr<PragmaClangDataSectionAttr>())
452     var->addAttribute("data-section", SA->getName());
453   if (auto *SA = D.getAttr<PragmaClangRodataSectionAttr>())
454     var->addAttribute("rodata-section", SA->getName());
455   if (auto *SA = D.getAttr<PragmaClangRelroSectionAttr>())
456     var->addAttribute("relro-section", SA->getName());
457 
458   if (const SectionAttr *SA = D.getAttr<SectionAttr>())
459     var->setSection(SA->getName());
460 
461   if (D.hasAttr<RetainAttr>())
462     CGM.addUsedGlobal(var);
463   else if (D.hasAttr<UsedAttr>())
464     CGM.addUsedOrCompilerUsedGlobal(var);
465 
466   // We may have to cast the constant because of the initializer
467   // mismatch above.
468   //
469   // FIXME: It is really dangerous to store this in the map; if anyone
470   // RAUW's the GV uses of this constant will be invalid.
471   llvm::Constant *castedAddr =
472     llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(var, expectedType);
473   LocalDeclMap.find(&D)->second = Address(castedAddr, elemTy, alignment);
474   CGM.setStaticLocalDeclAddress(&D, castedAddr);
475 
476   CGM.getSanitizerMetadata()->reportGlobal(var, D);
477 
478   // Emit global variable debug descriptor for static vars.
479   CGDebugInfo *DI = getDebugInfo();
480   if (DI && CGM.getCodeGenOpts().hasReducedDebugInfo()) {
481     DI->setLocation(D.getLocation());
482     DI->EmitGlobalVariable(var, &D);
483   }
484 }
485 
486 namespace {
487   struct DestroyObject final : EHScopeStack::Cleanup {
488     DestroyObject(Address addr, QualType type,
489                   CodeGenFunction::Destroyer *destroyer,
490                   bool useEHCleanupForArray)
491       : addr(addr), type(type), destroyer(destroyer),
492         useEHCleanupForArray(useEHCleanupForArray) {}
493 
494     Address addr;
495     QualType type;
496     CodeGenFunction::Destroyer *destroyer;
497     bool useEHCleanupForArray;
498 
499     void Emit(CodeGenFunction &CGF, Flags flags) override {
500       // Don't use an EH cleanup recursively from an EH cleanup.
501       bool useEHCleanupForArray =
502         flags.isForNormalCleanup() && this->useEHCleanupForArray;
503 
504       CGF.emitDestroy(addr, type, destroyer, useEHCleanupForArray);
505     }
506   };
507 
508   template <class Derived>
509   struct DestroyNRVOVariable : EHScopeStack::Cleanup {
510     DestroyNRVOVariable(Address addr, QualType type, llvm::Value *NRVOFlag)
511         : NRVOFlag(NRVOFlag), Loc(addr), Ty(type) {}
512 
513     llvm::Value *NRVOFlag;
514     Address Loc;
515     QualType Ty;
516 
517     void Emit(CodeGenFunction &CGF, Flags flags) override {
518       // Along the exceptions path we always execute the dtor.
519       bool NRVO = flags.isForNormalCleanup() && NRVOFlag;
520 
521       llvm::BasicBlock *SkipDtorBB = nullptr;
522       if (NRVO) {
523         // If we exited via NRVO, we skip the destructor call.
524         llvm::BasicBlock *RunDtorBB = CGF.createBasicBlock("nrvo.unused");
525         SkipDtorBB = CGF.createBasicBlock("nrvo.skipdtor");
526         llvm::Value *DidNRVO =
527           CGF.Builder.CreateFlagLoad(NRVOFlag, "nrvo.val");
528         CGF.Builder.CreateCondBr(DidNRVO, SkipDtorBB, RunDtorBB);
529         CGF.EmitBlock(RunDtorBB);
530       }
531 
532       static_cast<Derived *>(this)->emitDestructorCall(CGF);
533 
534       if (NRVO) CGF.EmitBlock(SkipDtorBB);
535     }
536 
537     virtual ~DestroyNRVOVariable() = default;
538   };
539 
540   struct DestroyNRVOVariableCXX final
541       : DestroyNRVOVariable<DestroyNRVOVariableCXX> {
542     DestroyNRVOVariableCXX(Address addr, QualType type,
543                            const CXXDestructorDecl *Dtor, llvm::Value *NRVOFlag)
544         : DestroyNRVOVariable<DestroyNRVOVariableCXX>(addr, type, NRVOFlag),
545           Dtor(Dtor) {}
546 
547     const CXXDestructorDecl *Dtor;
548 
549     void emitDestructorCall(CodeGenFunction &CGF) {
550       CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete,
551                                 /*ForVirtualBase=*/false,
552                                 /*Delegating=*/false, Loc, Ty);
553     }
554   };
555 
556   struct DestroyNRVOVariableC final
557       : DestroyNRVOVariable<DestroyNRVOVariableC> {
558     DestroyNRVOVariableC(Address addr, llvm::Value *NRVOFlag, QualType Ty)
559         : DestroyNRVOVariable<DestroyNRVOVariableC>(addr, Ty, NRVOFlag) {}
560 
561     void emitDestructorCall(CodeGenFunction &CGF) {
562       CGF.destroyNonTrivialCStruct(CGF, Loc, Ty);
563     }
564   };
565 
566   struct CallStackRestore final : EHScopeStack::Cleanup {
567     Address Stack;
568     CallStackRestore(Address Stack) : Stack(Stack) {}
569     bool isRedundantBeforeReturn() override { return true; }
570     void Emit(CodeGenFunction &CGF, Flags flags) override {
571       llvm::Value *V = CGF.Builder.CreateLoad(Stack);
572       llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stackrestore);
573       CGF.Builder.CreateCall(F, V);
574     }
575   };
576 
577   struct ExtendGCLifetime final : EHScopeStack::Cleanup {
578     const VarDecl &Var;
579     ExtendGCLifetime(const VarDecl *var) : Var(*var) {}
580 
581     void Emit(CodeGenFunction &CGF, Flags flags) override {
582       // Compute the address of the local variable, in case it's a
583       // byref or something.
584       DeclRefExpr DRE(CGF.getContext(), const_cast<VarDecl *>(&Var), false,
585                       Var.getType(), VK_LValue, SourceLocation());
586       llvm::Value *value = CGF.EmitLoadOfScalar(CGF.EmitDeclRefLValue(&DRE),
587                                                 SourceLocation());
588       CGF.EmitExtendGCLifetime(value);
589     }
590   };
591 
592   struct CallCleanupFunction final : EHScopeStack::Cleanup {
593     llvm::Constant *CleanupFn;
594     const CGFunctionInfo &FnInfo;
595     const VarDecl &Var;
596 
597     CallCleanupFunction(llvm::Constant *CleanupFn, const CGFunctionInfo *Info,
598                         const VarDecl *Var)
599       : CleanupFn(CleanupFn), FnInfo(*Info), Var(*Var) {}
600 
601     void Emit(CodeGenFunction &CGF, Flags flags) override {
602       DeclRefExpr DRE(CGF.getContext(), const_cast<VarDecl *>(&Var), false,
603                       Var.getType(), VK_LValue, SourceLocation());
604       // Compute the address of the local variable, in case it's a byref
605       // or something.
606       llvm::Value *Addr = CGF.EmitDeclRefLValue(&DRE).getPointer(CGF);
607 
608       // In some cases, the type of the function argument will be different from
609       // the type of the pointer. An example of this is
610       // void f(void* arg);
611       // __attribute__((cleanup(f))) void *g;
612       //
613       // To fix this we insert a bitcast here.
614       QualType ArgTy = FnInfo.arg_begin()->type;
615       llvm::Value *Arg =
616         CGF.Builder.CreateBitCast(Addr, CGF.ConvertType(ArgTy));
617 
618       CallArgList Args;
619       Args.add(RValue::get(Arg),
620                CGF.getContext().getPointerType(Var.getType()));
621       auto Callee = CGCallee::forDirect(CleanupFn);
622       CGF.EmitCall(FnInfo, Callee, ReturnValueSlot(), Args);
623     }
624   };
625 } // end anonymous namespace
626 
627 /// EmitAutoVarWithLifetime - Does the setup required for an automatic
628 /// variable with lifetime.
629 static void EmitAutoVarWithLifetime(CodeGenFunction &CGF, const VarDecl &var,
630                                     Address addr,
631                                     Qualifiers::ObjCLifetime lifetime) {
632   switch (lifetime) {
633   case Qualifiers::OCL_None:
634     llvm_unreachable("present but none");
635 
636   case Qualifiers::OCL_ExplicitNone:
637     // nothing to do
638     break;
639 
640   case Qualifiers::OCL_Strong: {
641     CodeGenFunction::Destroyer *destroyer =
642       (var.hasAttr<ObjCPreciseLifetimeAttr>()
643        ? CodeGenFunction::destroyARCStrongPrecise
644        : CodeGenFunction::destroyARCStrongImprecise);
645 
646     CleanupKind cleanupKind = CGF.getARCCleanupKind();
647     CGF.pushDestroy(cleanupKind, addr, var.getType(), destroyer,
648                     cleanupKind & EHCleanup);
649     break;
650   }
651   case Qualifiers::OCL_Autoreleasing:
652     // nothing to do
653     break;
654 
655   case Qualifiers::OCL_Weak:
656     // __weak objects always get EH cleanups; otherwise, exceptions
657     // could cause really nasty crashes instead of mere leaks.
658     CGF.pushDestroy(NormalAndEHCleanup, addr, var.getType(),
659                     CodeGenFunction::destroyARCWeak,
660                     /*useEHCleanup*/ true);
661     break;
662   }
663 }
664 
665 static bool isAccessedBy(const VarDecl &var, const Stmt *s) {
666   if (const Expr *e = dyn_cast<Expr>(s)) {
667     // Skip the most common kinds of expressions that make
668     // hierarchy-walking expensive.
669     s = e = e->IgnoreParenCasts();
670 
671     if (const DeclRefExpr *ref = dyn_cast<DeclRefExpr>(e))
672       return (ref->getDecl() == &var);
673     if (const BlockExpr *be = dyn_cast<BlockExpr>(e)) {
674       const BlockDecl *block = be->getBlockDecl();
675       for (const auto &I : block->captures()) {
676         if (I.getVariable() == &var)
677           return true;
678       }
679     }
680   }
681 
682   for (const Stmt *SubStmt : s->children())
683     // SubStmt might be null; as in missing decl or conditional of an if-stmt.
684     if (SubStmt && isAccessedBy(var, SubStmt))
685       return true;
686 
687   return false;
688 }
689 
690 static bool isAccessedBy(const ValueDecl *decl, const Expr *e) {
691   if (!decl) return false;
692   if (!isa<VarDecl>(decl)) return false;
693   const VarDecl *var = cast<VarDecl>(decl);
694   return isAccessedBy(*var, e);
695 }
696 
697 static bool tryEmitARCCopyWeakInit(CodeGenFunction &CGF,
698                                    const LValue &destLV, const Expr *init) {
699   bool needsCast = false;
700 
701   while (auto castExpr = dyn_cast<CastExpr>(init->IgnoreParens())) {
702     switch (castExpr->getCastKind()) {
703     // Look through casts that don't require representation changes.
704     case CK_NoOp:
705     case CK_BitCast:
706     case CK_BlockPointerToObjCPointerCast:
707       needsCast = true;
708       break;
709 
710     // If we find an l-value to r-value cast from a __weak variable,
711     // emit this operation as a copy or move.
712     case CK_LValueToRValue: {
713       const Expr *srcExpr = castExpr->getSubExpr();
714       if (srcExpr->getType().getObjCLifetime() != Qualifiers::OCL_Weak)
715         return false;
716 
717       // Emit the source l-value.
718       LValue srcLV = CGF.EmitLValue(srcExpr);
719 
720       // Handle a formal type change to avoid asserting.
721       auto srcAddr = srcLV.getAddress(CGF);
722       if (needsCast) {
723         srcAddr = CGF.Builder.CreateElementBitCast(
724             srcAddr, destLV.getAddress(CGF).getElementType());
725       }
726 
727       // If it was an l-value, use objc_copyWeak.
728       if (srcExpr->isLValue()) {
729         CGF.EmitARCCopyWeak(destLV.getAddress(CGF), srcAddr);
730       } else {
731         assert(srcExpr->isXValue());
732         CGF.EmitARCMoveWeak(destLV.getAddress(CGF), srcAddr);
733       }
734       return true;
735     }
736 
737     // Stop at anything else.
738     default:
739       return false;
740     }
741 
742     init = castExpr->getSubExpr();
743   }
744   return false;
745 }
746 
747 static void drillIntoBlockVariable(CodeGenFunction &CGF,
748                                    LValue &lvalue,
749                                    const VarDecl *var) {
750   lvalue.setAddress(CGF.emitBlockByrefAddress(lvalue.getAddress(CGF), var));
751 }
752 
753 void CodeGenFunction::EmitNullabilityCheck(LValue LHS, llvm::Value *RHS,
754                                            SourceLocation Loc) {
755   if (!SanOpts.has(SanitizerKind::NullabilityAssign))
756     return;
757 
758   auto Nullability = LHS.getType()->getNullability(getContext());
759   if (!Nullability || *Nullability != NullabilityKind::NonNull)
760     return;
761 
762   // Check if the right hand side of the assignment is nonnull, if the left
763   // hand side must be nonnull.
764   SanitizerScope SanScope(this);
765   llvm::Value *IsNotNull = Builder.CreateIsNotNull(RHS);
766   llvm::Constant *StaticData[] = {
767       EmitCheckSourceLocation(Loc), EmitCheckTypeDescriptor(LHS.getType()),
768       llvm::ConstantInt::get(Int8Ty, 0), // The LogAlignment info is unused.
769       llvm::ConstantInt::get(Int8Ty, TCK_NonnullAssign)};
770   EmitCheck({{IsNotNull, SanitizerKind::NullabilityAssign}},
771             SanitizerHandler::TypeMismatch, StaticData, RHS);
772 }
773 
774 void CodeGenFunction::EmitScalarInit(const Expr *init, const ValueDecl *D,
775                                      LValue lvalue, bool capturedByInit) {
776   Qualifiers::ObjCLifetime lifetime = lvalue.getObjCLifetime();
777   if (!lifetime) {
778     llvm::Value *value = EmitScalarExpr(init);
779     if (capturedByInit)
780       drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D));
781     EmitNullabilityCheck(lvalue, value, init->getExprLoc());
782     EmitStoreThroughLValue(RValue::get(value), lvalue, true);
783     return;
784   }
785 
786   if (const CXXDefaultInitExpr *DIE = dyn_cast<CXXDefaultInitExpr>(init))
787     init = DIE->getExpr();
788 
789   // If we're emitting a value with lifetime, we have to do the
790   // initialization *before* we leave the cleanup scopes.
791   if (auto *EWC = dyn_cast<ExprWithCleanups>(init)) {
792     CodeGenFunction::RunCleanupsScope Scope(*this);
793     return EmitScalarInit(EWC->getSubExpr(), D, lvalue, capturedByInit);
794   }
795 
796   // We have to maintain the illusion that the variable is
797   // zero-initialized.  If the variable might be accessed in its
798   // initializer, zero-initialize before running the initializer, then
799   // actually perform the initialization with an assign.
800   bool accessedByInit = false;
801   if (lifetime != Qualifiers::OCL_ExplicitNone)
802     accessedByInit = (capturedByInit || isAccessedBy(D, init));
803   if (accessedByInit) {
804     LValue tempLV = lvalue;
805     // Drill down to the __block object if necessary.
806     if (capturedByInit) {
807       // We can use a simple GEP for this because it can't have been
808       // moved yet.
809       tempLV.setAddress(emitBlockByrefAddress(tempLV.getAddress(*this),
810                                               cast<VarDecl>(D),
811                                               /*follow*/ false));
812     }
813 
814     auto ty =
815         cast<llvm::PointerType>(tempLV.getAddress(*this).getElementType());
816     llvm::Value *zero = CGM.getNullPointer(ty, tempLV.getType());
817 
818     // If __weak, we want to use a barrier under certain conditions.
819     if (lifetime == Qualifiers::OCL_Weak)
820       EmitARCInitWeak(tempLV.getAddress(*this), zero);
821 
822     // Otherwise just do a simple store.
823     else
824       EmitStoreOfScalar(zero, tempLV, /* isInitialization */ true);
825   }
826 
827   // Emit the initializer.
828   llvm::Value *value = nullptr;
829 
830   switch (lifetime) {
831   case Qualifiers::OCL_None:
832     llvm_unreachable("present but none");
833 
834   case Qualifiers::OCL_Strong: {
835     if (!D || !isa<VarDecl>(D) || !cast<VarDecl>(D)->isARCPseudoStrong()) {
836       value = EmitARCRetainScalarExpr(init);
837       break;
838     }
839     // If D is pseudo-strong, treat it like __unsafe_unretained here. This means
840     // that we omit the retain, and causes non-autoreleased return values to be
841     // immediately released.
842     LLVM_FALLTHROUGH;
843   }
844 
845   case Qualifiers::OCL_ExplicitNone:
846     value = EmitARCUnsafeUnretainedScalarExpr(init);
847     break;
848 
849   case Qualifiers::OCL_Weak: {
850     // If it's not accessed by the initializer, try to emit the
851     // initialization with a copy or move.
852     if (!accessedByInit && tryEmitARCCopyWeakInit(*this, lvalue, init)) {
853       return;
854     }
855 
856     // No way to optimize a producing initializer into this.  It's not
857     // worth optimizing for, because the value will immediately
858     // disappear in the common case.
859     value = EmitScalarExpr(init);
860 
861     if (capturedByInit) drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D));
862     if (accessedByInit)
863       EmitARCStoreWeak(lvalue.getAddress(*this), value, /*ignored*/ true);
864     else
865       EmitARCInitWeak(lvalue.getAddress(*this), value);
866     return;
867   }
868 
869   case Qualifiers::OCL_Autoreleasing:
870     value = EmitARCRetainAutoreleaseScalarExpr(init);
871     break;
872   }
873 
874   if (capturedByInit) drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D));
875 
876   EmitNullabilityCheck(lvalue, value, init->getExprLoc());
877 
878   // If the variable might have been accessed by its initializer, we
879   // might have to initialize with a barrier.  We have to do this for
880   // both __weak and __strong, but __weak got filtered out above.
881   if (accessedByInit && lifetime == Qualifiers::OCL_Strong) {
882     llvm::Value *oldValue = EmitLoadOfScalar(lvalue, init->getExprLoc());
883     EmitStoreOfScalar(value, lvalue, /* isInitialization */ true);
884     EmitARCRelease(oldValue, ARCImpreciseLifetime);
885     return;
886   }
887 
888   EmitStoreOfScalar(value, lvalue, /* isInitialization */ true);
889 }
890 
891 /// Decide whether we can emit the non-zero parts of the specified initializer
892 /// with equal or fewer than NumStores scalar stores.
893 static bool canEmitInitWithFewStoresAfterBZero(llvm::Constant *Init,
894                                                unsigned &NumStores) {
895   // Zero and Undef never requires any extra stores.
896   if (isa<llvm::ConstantAggregateZero>(Init) ||
897       isa<llvm::ConstantPointerNull>(Init) ||
898       isa<llvm::UndefValue>(Init))
899     return true;
900   if (isa<llvm::ConstantInt>(Init) || isa<llvm::ConstantFP>(Init) ||
901       isa<llvm::ConstantVector>(Init) || isa<llvm::BlockAddress>(Init) ||
902       isa<llvm::ConstantExpr>(Init))
903     return Init->isNullValue() || NumStores--;
904 
905   // See if we can emit each element.
906   if (isa<llvm::ConstantArray>(Init) || isa<llvm::ConstantStruct>(Init)) {
907     for (unsigned i = 0, e = Init->getNumOperands(); i != e; ++i) {
908       llvm::Constant *Elt = cast<llvm::Constant>(Init->getOperand(i));
909       if (!canEmitInitWithFewStoresAfterBZero(Elt, NumStores))
910         return false;
911     }
912     return true;
913   }
914 
915   if (llvm::ConstantDataSequential *CDS =
916         dyn_cast<llvm::ConstantDataSequential>(Init)) {
917     for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
918       llvm::Constant *Elt = CDS->getElementAsConstant(i);
919       if (!canEmitInitWithFewStoresAfterBZero(Elt, NumStores))
920         return false;
921     }
922     return true;
923   }
924 
925   // Anything else is hard and scary.
926   return false;
927 }
928 
929 /// For inits that canEmitInitWithFewStoresAfterBZero returned true for, emit
930 /// the scalar stores that would be required.
931 static void emitStoresForInitAfterBZero(CodeGenModule &CGM,
932                                         llvm::Constant *Init, Address Loc,
933                                         bool isVolatile, CGBuilderTy &Builder,
934                                         bool IsAutoInit) {
935   assert(!Init->isNullValue() && !isa<llvm::UndefValue>(Init) &&
936          "called emitStoresForInitAfterBZero for zero or undef value.");
937 
938   if (isa<llvm::ConstantInt>(Init) || isa<llvm::ConstantFP>(Init) ||
939       isa<llvm::ConstantVector>(Init) || isa<llvm::BlockAddress>(Init) ||
940       isa<llvm::ConstantExpr>(Init)) {
941     auto *I = Builder.CreateStore(Init, Loc, isVolatile);
942     if (IsAutoInit)
943       I->addAnnotationMetadata("auto-init");
944     return;
945   }
946 
947   if (llvm::ConstantDataSequential *CDS =
948           dyn_cast<llvm::ConstantDataSequential>(Init)) {
949     for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
950       llvm::Constant *Elt = CDS->getElementAsConstant(i);
951 
952       // If necessary, get a pointer to the element and emit it.
953       if (!Elt->isNullValue() && !isa<llvm::UndefValue>(Elt))
954         emitStoresForInitAfterBZero(
955             CGM, Elt, Builder.CreateConstInBoundsGEP2_32(Loc, 0, i), isVolatile,
956             Builder, IsAutoInit);
957     }
958     return;
959   }
960 
961   assert((isa<llvm::ConstantStruct>(Init) || isa<llvm::ConstantArray>(Init)) &&
962          "Unknown value type!");
963 
964   for (unsigned i = 0, e = Init->getNumOperands(); i != e; ++i) {
965     llvm::Constant *Elt = cast<llvm::Constant>(Init->getOperand(i));
966 
967     // If necessary, get a pointer to the element and emit it.
968     if (!Elt->isNullValue() && !isa<llvm::UndefValue>(Elt))
969       emitStoresForInitAfterBZero(CGM, Elt,
970                                   Builder.CreateConstInBoundsGEP2_32(Loc, 0, i),
971                                   isVolatile, Builder, IsAutoInit);
972   }
973 }
974 
975 /// Decide whether we should use bzero plus some stores to initialize a local
976 /// variable instead of using a memcpy from a constant global.  It is beneficial
977 /// to use bzero if the global is all zeros, or mostly zeros and large.
978 static bool shouldUseBZeroPlusStoresToInitialize(llvm::Constant *Init,
979                                                  uint64_t GlobalSize) {
980   // If a global is all zeros, always use a bzero.
981   if (isa<llvm::ConstantAggregateZero>(Init)) return true;
982 
983   // If a non-zero global is <= 32 bytes, always use a memcpy.  If it is large,
984   // do it if it will require 6 or fewer scalar stores.
985   // TODO: Should budget depends on the size?  Avoiding a large global warrants
986   // plopping in more stores.
987   unsigned StoreBudget = 6;
988   uint64_t SizeLimit = 32;
989 
990   return GlobalSize > SizeLimit &&
991          canEmitInitWithFewStoresAfterBZero(Init, StoreBudget);
992 }
993 
994 /// Decide whether we should use memset to initialize a local variable instead
995 /// of using a memcpy from a constant global. Assumes we've already decided to
996 /// not user bzero.
997 /// FIXME We could be more clever, as we are for bzero above, and generate
998 ///       memset followed by stores. It's unclear that's worth the effort.
999 static llvm::Value *shouldUseMemSetToInitialize(llvm::Constant *Init,
1000                                                 uint64_t GlobalSize,
1001                                                 const llvm::DataLayout &DL) {
1002   uint64_t SizeLimit = 32;
1003   if (GlobalSize <= SizeLimit)
1004     return nullptr;
1005   return llvm::isBytewiseValue(Init, DL);
1006 }
1007 
1008 /// Decide whether we want to split a constant structure or array store into a
1009 /// sequence of its fields' stores. This may cost us code size and compilation
1010 /// speed, but plays better with store optimizations.
1011 static bool shouldSplitConstantStore(CodeGenModule &CGM,
1012                                      uint64_t GlobalByteSize) {
1013   // Don't break things that occupy more than one cacheline.
1014   uint64_t ByteSizeLimit = 64;
1015   if (CGM.getCodeGenOpts().OptimizationLevel == 0)
1016     return false;
1017   if (GlobalByteSize <= ByteSizeLimit)
1018     return true;
1019   return false;
1020 }
1021 
1022 enum class IsPattern { No, Yes };
1023 
1024 /// Generate a constant filled with either a pattern or zeroes.
1025 static llvm::Constant *patternOrZeroFor(CodeGenModule &CGM, IsPattern isPattern,
1026                                         llvm::Type *Ty) {
1027   if (isPattern == IsPattern::Yes)
1028     return initializationPatternFor(CGM, Ty);
1029   else
1030     return llvm::Constant::getNullValue(Ty);
1031 }
1032 
1033 static llvm::Constant *constWithPadding(CodeGenModule &CGM, IsPattern isPattern,
1034                                         llvm::Constant *constant);
1035 
1036 /// Helper function for constWithPadding() to deal with padding in structures.
1037 static llvm::Constant *constStructWithPadding(CodeGenModule &CGM,
1038                                               IsPattern isPattern,
1039                                               llvm::StructType *STy,
1040                                               llvm::Constant *constant) {
1041   const llvm::DataLayout &DL = CGM.getDataLayout();
1042   const llvm::StructLayout *Layout = DL.getStructLayout(STy);
1043   llvm::Type *Int8Ty = llvm::IntegerType::getInt8Ty(CGM.getLLVMContext());
1044   unsigned SizeSoFar = 0;
1045   SmallVector<llvm::Constant *, 8> Values;
1046   bool NestedIntact = true;
1047   for (unsigned i = 0, e = STy->getNumElements(); i != e; i++) {
1048     unsigned CurOff = Layout->getElementOffset(i);
1049     if (SizeSoFar < CurOff) {
1050       assert(!STy->isPacked());
1051       auto *PadTy = llvm::ArrayType::get(Int8Ty, CurOff - SizeSoFar);
1052       Values.push_back(patternOrZeroFor(CGM, isPattern, PadTy));
1053     }
1054     llvm::Constant *CurOp;
1055     if (constant->isZeroValue())
1056       CurOp = llvm::Constant::getNullValue(STy->getElementType(i));
1057     else
1058       CurOp = cast<llvm::Constant>(constant->getAggregateElement(i));
1059     auto *NewOp = constWithPadding(CGM, isPattern, CurOp);
1060     if (CurOp != NewOp)
1061       NestedIntact = false;
1062     Values.push_back(NewOp);
1063     SizeSoFar = CurOff + DL.getTypeAllocSize(CurOp->getType());
1064   }
1065   unsigned TotalSize = Layout->getSizeInBytes();
1066   if (SizeSoFar < TotalSize) {
1067     auto *PadTy = llvm::ArrayType::get(Int8Ty, TotalSize - SizeSoFar);
1068     Values.push_back(patternOrZeroFor(CGM, isPattern, PadTy));
1069   }
1070   if (NestedIntact && Values.size() == STy->getNumElements())
1071     return constant;
1072   return llvm::ConstantStruct::getAnon(Values, STy->isPacked());
1073 }
1074 
1075 /// Replace all padding bytes in a given constant with either a pattern byte or
1076 /// 0x00.
1077 static llvm::Constant *constWithPadding(CodeGenModule &CGM, IsPattern isPattern,
1078                                         llvm::Constant *constant) {
1079   llvm::Type *OrigTy = constant->getType();
1080   if (const auto STy = dyn_cast<llvm::StructType>(OrigTy))
1081     return constStructWithPadding(CGM, isPattern, STy, constant);
1082   if (auto *ArrayTy = dyn_cast<llvm::ArrayType>(OrigTy)) {
1083     llvm::SmallVector<llvm::Constant *, 8> Values;
1084     uint64_t Size = ArrayTy->getNumElements();
1085     if (!Size)
1086       return constant;
1087     llvm::Type *ElemTy = ArrayTy->getElementType();
1088     bool ZeroInitializer = constant->isNullValue();
1089     llvm::Constant *OpValue, *PaddedOp;
1090     if (ZeroInitializer) {
1091       OpValue = llvm::Constant::getNullValue(ElemTy);
1092       PaddedOp = constWithPadding(CGM, isPattern, OpValue);
1093     }
1094     for (unsigned Op = 0; Op != Size; ++Op) {
1095       if (!ZeroInitializer) {
1096         OpValue = constant->getAggregateElement(Op);
1097         PaddedOp = constWithPadding(CGM, isPattern, OpValue);
1098       }
1099       Values.push_back(PaddedOp);
1100     }
1101     auto *NewElemTy = Values[0]->getType();
1102     if (NewElemTy == ElemTy)
1103       return constant;
1104     auto *NewArrayTy = llvm::ArrayType::get(NewElemTy, Size);
1105     return llvm::ConstantArray::get(NewArrayTy, Values);
1106   }
1107   // FIXME: Add handling for tail padding in vectors. Vectors don't
1108   // have padding between or inside elements, but the total amount of
1109   // data can be less than the allocated size.
1110   return constant;
1111 }
1112 
1113 Address CodeGenModule::createUnnamedGlobalFrom(const VarDecl &D,
1114                                                llvm::Constant *Constant,
1115                                                CharUnits Align) {
1116   auto FunctionName = [&](const DeclContext *DC) -> std::string {
1117     if (const auto *FD = dyn_cast<FunctionDecl>(DC)) {
1118       if (const auto *CC = dyn_cast<CXXConstructorDecl>(FD))
1119         return CC->getNameAsString();
1120       if (const auto *CD = dyn_cast<CXXDestructorDecl>(FD))
1121         return CD->getNameAsString();
1122       return std::string(getMangledName(FD));
1123     } else if (const auto *OM = dyn_cast<ObjCMethodDecl>(DC)) {
1124       return OM->getNameAsString();
1125     } else if (isa<BlockDecl>(DC)) {
1126       return "<block>";
1127     } else if (isa<CapturedDecl>(DC)) {
1128       return "<captured>";
1129     } else {
1130       llvm_unreachable("expected a function or method");
1131     }
1132   };
1133 
1134   // Form a simple per-variable cache of these values in case we find we
1135   // want to reuse them.
1136   llvm::GlobalVariable *&CacheEntry = InitializerConstants[&D];
1137   if (!CacheEntry || CacheEntry->getInitializer() != Constant) {
1138     auto *Ty = Constant->getType();
1139     bool isConstant = true;
1140     llvm::GlobalVariable *InsertBefore = nullptr;
1141     unsigned AS =
1142         getContext().getTargetAddressSpace(GetGlobalConstantAddressSpace());
1143     std::string Name;
1144     if (D.hasGlobalStorage())
1145       Name = getMangledName(&D).str() + ".const";
1146     else if (const DeclContext *DC = D.getParentFunctionOrMethod())
1147       Name = ("__const." + FunctionName(DC) + "." + D.getName()).str();
1148     else
1149       llvm_unreachable("local variable has no parent function or method");
1150     llvm::GlobalVariable *GV = new llvm::GlobalVariable(
1151         getModule(), Ty, isConstant, llvm::GlobalValue::PrivateLinkage,
1152         Constant, Name, InsertBefore, llvm::GlobalValue::NotThreadLocal, AS);
1153     GV->setAlignment(Align.getAsAlign());
1154     GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
1155     CacheEntry = GV;
1156   } else if (CacheEntry->getAlignment() < uint64_t(Align.getQuantity())) {
1157     CacheEntry->setAlignment(Align.getAsAlign());
1158   }
1159 
1160   return Address(CacheEntry, CacheEntry->getValueType(), Align);
1161 }
1162 
1163 static Address createUnnamedGlobalForMemcpyFrom(CodeGenModule &CGM,
1164                                                 const VarDecl &D,
1165                                                 CGBuilderTy &Builder,
1166                                                 llvm::Constant *Constant,
1167                                                 CharUnits Align) {
1168   Address SrcPtr = CGM.createUnnamedGlobalFrom(D, Constant, Align);
1169   return Builder.CreateElementBitCast(SrcPtr, CGM.Int8Ty);
1170 }
1171 
1172 static void emitStoresForConstant(CodeGenModule &CGM, const VarDecl &D,
1173                                   Address Loc, bool isVolatile,
1174                                   CGBuilderTy &Builder,
1175                                   llvm::Constant *constant, bool IsAutoInit) {
1176   auto *Ty = constant->getType();
1177   uint64_t ConstantSize = CGM.getDataLayout().getTypeAllocSize(Ty);
1178   if (!ConstantSize)
1179     return;
1180 
1181   bool canDoSingleStore = Ty->isIntOrIntVectorTy() ||
1182                           Ty->isPtrOrPtrVectorTy() || Ty->isFPOrFPVectorTy();
1183   if (canDoSingleStore) {
1184     auto *I = Builder.CreateStore(constant, Loc, isVolatile);
1185     if (IsAutoInit)
1186       I->addAnnotationMetadata("auto-init");
1187     return;
1188   }
1189 
1190   auto *SizeVal = llvm::ConstantInt::get(CGM.IntPtrTy, ConstantSize);
1191 
1192   // If the initializer is all or mostly the same, codegen with bzero / memset
1193   // then do a few stores afterward.
1194   if (shouldUseBZeroPlusStoresToInitialize(constant, ConstantSize)) {
1195     auto *I = Builder.CreateMemSet(Loc, llvm::ConstantInt::get(CGM.Int8Ty, 0),
1196                                    SizeVal, isVolatile);
1197     if (IsAutoInit)
1198       I->addAnnotationMetadata("auto-init");
1199 
1200     bool valueAlreadyCorrect =
1201         constant->isNullValue() || isa<llvm::UndefValue>(constant);
1202     if (!valueAlreadyCorrect) {
1203       Loc = Builder.CreateElementBitCast(Loc, Ty);
1204       emitStoresForInitAfterBZero(CGM, constant, Loc, isVolatile, Builder,
1205                                   IsAutoInit);
1206     }
1207     return;
1208   }
1209 
1210   // If the initializer is a repeated byte pattern, use memset.
1211   llvm::Value *Pattern =
1212       shouldUseMemSetToInitialize(constant, ConstantSize, CGM.getDataLayout());
1213   if (Pattern) {
1214     uint64_t Value = 0x00;
1215     if (!isa<llvm::UndefValue>(Pattern)) {
1216       const llvm::APInt &AP = cast<llvm::ConstantInt>(Pattern)->getValue();
1217       assert(AP.getBitWidth() <= 8);
1218       Value = AP.getLimitedValue();
1219     }
1220     auto *I = Builder.CreateMemSet(
1221         Loc, llvm::ConstantInt::get(CGM.Int8Ty, Value), SizeVal, isVolatile);
1222     if (IsAutoInit)
1223       I->addAnnotationMetadata("auto-init");
1224     return;
1225   }
1226 
1227   // If the initializer is small, use a handful of stores.
1228   if (shouldSplitConstantStore(CGM, ConstantSize)) {
1229     if (auto *STy = dyn_cast<llvm::StructType>(Ty)) {
1230       // FIXME: handle the case when STy != Loc.getElementType().
1231       if (STy == Loc.getElementType()) {
1232         for (unsigned i = 0; i != constant->getNumOperands(); i++) {
1233           Address EltPtr = Builder.CreateStructGEP(Loc, i);
1234           emitStoresForConstant(
1235               CGM, D, EltPtr, isVolatile, Builder,
1236               cast<llvm::Constant>(Builder.CreateExtractValue(constant, i)),
1237               IsAutoInit);
1238         }
1239         return;
1240       }
1241     } else if (auto *ATy = dyn_cast<llvm::ArrayType>(Ty)) {
1242       // FIXME: handle the case when ATy != Loc.getElementType().
1243       if (ATy == Loc.getElementType()) {
1244         for (unsigned i = 0; i != ATy->getNumElements(); i++) {
1245           Address EltPtr = Builder.CreateConstArrayGEP(Loc, i);
1246           emitStoresForConstant(
1247               CGM, D, EltPtr, isVolatile, Builder,
1248               cast<llvm::Constant>(Builder.CreateExtractValue(constant, i)),
1249               IsAutoInit);
1250         }
1251         return;
1252       }
1253     }
1254   }
1255 
1256   // Copy from a global.
1257   auto *I =
1258       Builder.CreateMemCpy(Loc,
1259                            createUnnamedGlobalForMemcpyFrom(
1260                                CGM, D, Builder, constant, Loc.getAlignment()),
1261                            SizeVal, isVolatile);
1262   if (IsAutoInit)
1263     I->addAnnotationMetadata("auto-init");
1264 }
1265 
1266 static void emitStoresForZeroInit(CodeGenModule &CGM, const VarDecl &D,
1267                                   Address Loc, bool isVolatile,
1268                                   CGBuilderTy &Builder) {
1269   llvm::Type *ElTy = Loc.getElementType();
1270   llvm::Constant *constant =
1271       constWithPadding(CGM, IsPattern::No, llvm::Constant::getNullValue(ElTy));
1272   emitStoresForConstant(CGM, D, Loc, isVolatile, Builder, constant,
1273                         /*IsAutoInit=*/true);
1274 }
1275 
1276 static void emitStoresForPatternInit(CodeGenModule &CGM, const VarDecl &D,
1277                                      Address Loc, bool isVolatile,
1278                                      CGBuilderTy &Builder) {
1279   llvm::Type *ElTy = Loc.getElementType();
1280   llvm::Constant *constant = constWithPadding(
1281       CGM, IsPattern::Yes, initializationPatternFor(CGM, ElTy));
1282   assert(!isa<llvm::UndefValue>(constant));
1283   emitStoresForConstant(CGM, D, Loc, isVolatile, Builder, constant,
1284                         /*IsAutoInit=*/true);
1285 }
1286 
1287 static bool containsUndef(llvm::Constant *constant) {
1288   auto *Ty = constant->getType();
1289   if (isa<llvm::UndefValue>(constant))
1290     return true;
1291   if (Ty->isStructTy() || Ty->isArrayTy() || Ty->isVectorTy())
1292     for (llvm::Use &Op : constant->operands())
1293       if (containsUndef(cast<llvm::Constant>(Op)))
1294         return true;
1295   return false;
1296 }
1297 
1298 static llvm::Constant *replaceUndef(CodeGenModule &CGM, IsPattern isPattern,
1299                                     llvm::Constant *constant) {
1300   auto *Ty = constant->getType();
1301   if (isa<llvm::UndefValue>(constant))
1302     return patternOrZeroFor(CGM, isPattern, Ty);
1303   if (!(Ty->isStructTy() || Ty->isArrayTy() || Ty->isVectorTy()))
1304     return constant;
1305   if (!containsUndef(constant))
1306     return constant;
1307   llvm::SmallVector<llvm::Constant *, 8> Values(constant->getNumOperands());
1308   for (unsigned Op = 0, NumOp = constant->getNumOperands(); Op != NumOp; ++Op) {
1309     auto *OpValue = cast<llvm::Constant>(constant->getOperand(Op));
1310     Values[Op] = replaceUndef(CGM, isPattern, OpValue);
1311   }
1312   if (Ty->isStructTy())
1313     return llvm::ConstantStruct::get(cast<llvm::StructType>(Ty), Values);
1314   if (Ty->isArrayTy())
1315     return llvm::ConstantArray::get(cast<llvm::ArrayType>(Ty), Values);
1316   assert(Ty->isVectorTy());
1317   return llvm::ConstantVector::get(Values);
1318 }
1319 
1320 /// EmitAutoVarDecl - Emit code and set up an entry in LocalDeclMap for a
1321 /// variable declaration with auto, register, or no storage class specifier.
1322 /// These turn into simple stack objects, or GlobalValues depending on target.
1323 void CodeGenFunction::EmitAutoVarDecl(const VarDecl &D) {
1324   AutoVarEmission emission = EmitAutoVarAlloca(D);
1325   EmitAutoVarInit(emission);
1326   EmitAutoVarCleanups(emission);
1327 }
1328 
1329 /// Emit a lifetime.begin marker if some criteria are satisfied.
1330 /// \return a pointer to the temporary size Value if a marker was emitted, null
1331 /// otherwise
1332 llvm::Value *CodeGenFunction::EmitLifetimeStart(llvm::TypeSize Size,
1333                                                 llvm::Value *Addr) {
1334   if (!ShouldEmitLifetimeMarkers)
1335     return nullptr;
1336 
1337   assert(Addr->getType()->getPointerAddressSpace() ==
1338              CGM.getDataLayout().getAllocaAddrSpace() &&
1339          "Pointer should be in alloca address space");
1340   llvm::Value *SizeV = llvm::ConstantInt::get(
1341       Int64Ty, Size.isScalable() ? -1 : Size.getFixedValue());
1342   Addr = Builder.CreateBitCast(Addr, AllocaInt8PtrTy);
1343   llvm::CallInst *C =
1344       Builder.CreateCall(CGM.getLLVMLifetimeStartFn(), {SizeV, Addr});
1345   C->setDoesNotThrow();
1346   return SizeV;
1347 }
1348 
1349 void CodeGenFunction::EmitLifetimeEnd(llvm::Value *Size, llvm::Value *Addr) {
1350   assert(Addr->getType()->getPointerAddressSpace() ==
1351              CGM.getDataLayout().getAllocaAddrSpace() &&
1352          "Pointer should be in alloca address space");
1353   Addr = Builder.CreateBitCast(Addr, AllocaInt8PtrTy);
1354   llvm::CallInst *C =
1355       Builder.CreateCall(CGM.getLLVMLifetimeEndFn(), {Size, Addr});
1356   C->setDoesNotThrow();
1357 }
1358 
1359 void CodeGenFunction::EmitAndRegisterVariableArrayDimensions(
1360     CGDebugInfo *DI, const VarDecl &D, bool EmitDebugInfo) {
1361   // For each dimension stores its QualType and corresponding
1362   // size-expression Value.
1363   SmallVector<CodeGenFunction::VlaSizePair, 4> Dimensions;
1364   SmallVector<IdentifierInfo *, 4> VLAExprNames;
1365 
1366   // Break down the array into individual dimensions.
1367   QualType Type1D = D.getType();
1368   while (getContext().getAsVariableArrayType(Type1D)) {
1369     auto VlaSize = getVLAElements1D(Type1D);
1370     if (auto *C = dyn_cast<llvm::ConstantInt>(VlaSize.NumElts))
1371       Dimensions.emplace_back(C, Type1D.getUnqualifiedType());
1372     else {
1373       // Generate a locally unique name for the size expression.
1374       Twine Name = Twine("__vla_expr") + Twine(VLAExprCounter++);
1375       SmallString<12> Buffer;
1376       StringRef NameRef = Name.toStringRef(Buffer);
1377       auto &Ident = getContext().Idents.getOwn(NameRef);
1378       VLAExprNames.push_back(&Ident);
1379       auto SizeExprAddr =
1380           CreateDefaultAlignTempAlloca(VlaSize.NumElts->getType(), NameRef);
1381       Builder.CreateStore(VlaSize.NumElts, SizeExprAddr);
1382       Dimensions.emplace_back(SizeExprAddr.getPointer(),
1383                               Type1D.getUnqualifiedType());
1384     }
1385     Type1D = VlaSize.Type;
1386   }
1387 
1388   if (!EmitDebugInfo)
1389     return;
1390 
1391   // Register each dimension's size-expression with a DILocalVariable,
1392   // so that it can be used by CGDebugInfo when instantiating a DISubrange
1393   // to describe this array.
1394   unsigned NameIdx = 0;
1395   for (auto &VlaSize : Dimensions) {
1396     llvm::Metadata *MD;
1397     if (auto *C = dyn_cast<llvm::ConstantInt>(VlaSize.NumElts))
1398       MD = llvm::ConstantAsMetadata::get(C);
1399     else {
1400       // Create an artificial VarDecl to generate debug info for.
1401       IdentifierInfo *NameIdent = VLAExprNames[NameIdx++];
1402       assert(cast<llvm::PointerType>(VlaSize.NumElts->getType())
1403                  ->isOpaqueOrPointeeTypeMatches(SizeTy) &&
1404              "Number of VLA elements must be SizeTy");
1405       auto QT = getContext().getIntTypeForBitwidth(
1406           SizeTy->getScalarSizeInBits(), false);
1407       auto *ArtificialDecl = VarDecl::Create(
1408           getContext(), const_cast<DeclContext *>(D.getDeclContext()),
1409           D.getLocation(), D.getLocation(), NameIdent, QT,
1410           getContext().CreateTypeSourceInfo(QT), SC_Auto);
1411       ArtificialDecl->setImplicit();
1412 
1413       MD = DI->EmitDeclareOfAutoVariable(ArtificialDecl, VlaSize.NumElts,
1414                                          Builder);
1415     }
1416     assert(MD && "No Size expression debug node created");
1417     DI->registerVLASizeExpression(VlaSize.Type, MD);
1418   }
1419 }
1420 
1421 /// EmitAutoVarAlloca - Emit the alloca and debug information for a
1422 /// local variable.  Does not emit initialization or destruction.
1423 CodeGenFunction::AutoVarEmission
1424 CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) {
1425   QualType Ty = D.getType();
1426   assert(
1427       Ty.getAddressSpace() == LangAS::Default ||
1428       (Ty.getAddressSpace() == LangAS::opencl_private && getLangOpts().OpenCL));
1429 
1430   AutoVarEmission emission(D);
1431 
1432   bool isEscapingByRef = D.isEscapingByref();
1433   emission.IsEscapingByRef = isEscapingByRef;
1434 
1435   CharUnits alignment = getContext().getDeclAlign(&D);
1436 
1437   // If the type is variably-modified, emit all the VLA sizes for it.
1438   if (Ty->isVariablyModifiedType())
1439     EmitVariablyModifiedType(Ty);
1440 
1441   auto *DI = getDebugInfo();
1442   bool EmitDebugInfo = DI && CGM.getCodeGenOpts().hasReducedDebugInfo();
1443 
1444   Address address = Address::invalid();
1445   Address AllocaAddr = Address::invalid();
1446   Address OpenMPLocalAddr = Address::invalid();
1447   if (CGM.getLangOpts().OpenMPIRBuilder)
1448     OpenMPLocalAddr = OMPBuilderCBHelpers::getAddressOfLocalVariable(*this, &D);
1449   else
1450     OpenMPLocalAddr =
1451         getLangOpts().OpenMP
1452             ? CGM.getOpenMPRuntime().getAddressOfLocalVariable(*this, &D)
1453             : Address::invalid();
1454 
1455   bool NRVO = getLangOpts().ElideConstructors && D.isNRVOVariable();
1456 
1457   if (getLangOpts().OpenMP && OpenMPLocalAddr.isValid()) {
1458     address = OpenMPLocalAddr;
1459     AllocaAddr = OpenMPLocalAddr;
1460   } else if (Ty->isConstantSizeType()) {
1461     // If this value is an array or struct with a statically determinable
1462     // constant initializer, there are optimizations we can do.
1463     //
1464     // TODO: We should constant-evaluate the initializer of any variable,
1465     // as long as it is initialized by a constant expression. Currently,
1466     // isConstantInitializer produces wrong answers for structs with
1467     // reference or bitfield members, and a few other cases, and checking
1468     // for POD-ness protects us from some of these.
1469     if (D.getInit() && (Ty->isArrayType() || Ty->isRecordType()) &&
1470         (D.isConstexpr() ||
1471          ((Ty.isPODType(getContext()) ||
1472            getContext().getBaseElementType(Ty)->isObjCObjectPointerType()) &&
1473           D.getInit()->isConstantInitializer(getContext(), false)))) {
1474 
1475       // If the variable's a const type, and it's neither an NRVO
1476       // candidate nor a __block variable and has no mutable members,
1477       // emit it as a global instead.
1478       // Exception is if a variable is located in non-constant address space
1479       // in OpenCL.
1480       if ((!getLangOpts().OpenCL ||
1481            Ty.getAddressSpace() == LangAS::opencl_constant) &&
1482           (CGM.getCodeGenOpts().MergeAllConstants && !NRVO &&
1483            !isEscapingByRef && CGM.isTypeConstant(Ty, true))) {
1484         EmitStaticVarDecl(D, llvm::GlobalValue::InternalLinkage);
1485 
1486         // Signal this condition to later callbacks.
1487         emission.Addr = Address::invalid();
1488         assert(emission.wasEmittedAsGlobal());
1489         return emission;
1490       }
1491 
1492       // Otherwise, tell the initialization code that we're in this case.
1493       emission.IsConstantAggregate = true;
1494     }
1495 
1496     // A normal fixed sized variable becomes an alloca in the entry block,
1497     // unless:
1498     // - it's an NRVO variable.
1499     // - we are compiling OpenMP and it's an OpenMP local variable.
1500     if (NRVO) {
1501       // The named return value optimization: allocate this variable in the
1502       // return slot, so that we can elide the copy when returning this
1503       // variable (C++0x [class.copy]p34).
1504       address = ReturnValue;
1505       AllocaAddr = ReturnValue;
1506 
1507       if (const RecordType *RecordTy = Ty->getAs<RecordType>()) {
1508         const auto *RD = RecordTy->getDecl();
1509         const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD);
1510         if ((CXXRD && !CXXRD->hasTrivialDestructor()) ||
1511             RD->isNonTrivialToPrimitiveDestroy()) {
1512           // Create a flag that is used to indicate when the NRVO was applied
1513           // to this variable. Set it to zero to indicate that NRVO was not
1514           // applied.
1515           llvm::Value *Zero = Builder.getFalse();
1516           Address NRVOFlag =
1517               CreateTempAlloca(Zero->getType(), CharUnits::One(), "nrvo",
1518                                /*ArraySize=*/nullptr, &AllocaAddr);
1519           EnsureInsertPoint();
1520           Builder.CreateStore(Zero, NRVOFlag);
1521 
1522           // Record the NRVO flag for this variable.
1523           NRVOFlags[&D] = NRVOFlag.getPointer();
1524           emission.NRVOFlag = NRVOFlag.getPointer();
1525         }
1526       }
1527     } else {
1528       CharUnits allocaAlignment;
1529       llvm::Type *allocaTy;
1530       if (isEscapingByRef) {
1531         auto &byrefInfo = getBlockByrefInfo(&D);
1532         allocaTy = byrefInfo.Type;
1533         allocaAlignment = byrefInfo.ByrefAlignment;
1534       } else {
1535         allocaTy = ConvertTypeForMem(Ty);
1536         allocaAlignment = alignment;
1537       }
1538 
1539       // Create the alloca.  Note that we set the name separately from
1540       // building the instruction so that it's there even in no-asserts
1541       // builds.
1542       address = CreateTempAlloca(allocaTy, allocaAlignment, D.getName(),
1543                                  /*ArraySize=*/nullptr, &AllocaAddr);
1544 
1545       // Don't emit lifetime markers for MSVC catch parameters. The lifetime of
1546       // the catch parameter starts in the catchpad instruction, and we can't
1547       // insert code in those basic blocks.
1548       bool IsMSCatchParam =
1549           D.isExceptionVariable() && getTarget().getCXXABI().isMicrosoft();
1550 
1551       // Emit a lifetime intrinsic if meaningful. There's no point in doing this
1552       // if we don't have a valid insertion point (?).
1553       if (HaveInsertPoint() && !IsMSCatchParam) {
1554         // If there's a jump into the lifetime of this variable, its lifetime
1555         // gets broken up into several regions in IR, which requires more work
1556         // to handle correctly. For now, just omit the intrinsics; this is a
1557         // rare case, and it's better to just be conservatively correct.
1558         // PR28267.
1559         //
1560         // We have to do this in all language modes if there's a jump past the
1561         // declaration. We also have to do it in C if there's a jump to an
1562         // earlier point in the current block because non-VLA lifetimes begin as
1563         // soon as the containing block is entered, not when its variables
1564         // actually come into scope; suppressing the lifetime annotations
1565         // completely in this case is unnecessarily pessimistic, but again, this
1566         // is rare.
1567         if (!Bypasses.IsBypassed(&D) &&
1568             !(!getLangOpts().CPlusPlus && hasLabelBeenSeenInCurrentScope())) {
1569           llvm::TypeSize Size = CGM.getDataLayout().getTypeAllocSize(allocaTy);
1570           emission.SizeForLifetimeMarkers =
1571               EmitLifetimeStart(Size, AllocaAddr.getPointer());
1572         }
1573       } else {
1574         assert(!emission.useLifetimeMarkers());
1575       }
1576     }
1577   } else {
1578     EnsureInsertPoint();
1579 
1580     if (!DidCallStackSave) {
1581       // Save the stack.
1582       Address Stack =
1583         CreateTempAlloca(Int8PtrTy, getPointerAlign(), "saved_stack");
1584 
1585       llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::stacksave);
1586       llvm::Value *V = Builder.CreateCall(F);
1587       Builder.CreateStore(V, Stack);
1588 
1589       DidCallStackSave = true;
1590 
1591       // Push a cleanup block and restore the stack there.
1592       // FIXME: in general circumstances, this should be an EH cleanup.
1593       pushStackRestore(NormalCleanup, Stack);
1594     }
1595 
1596     auto VlaSize = getVLASize(Ty);
1597     llvm::Type *llvmTy = ConvertTypeForMem(VlaSize.Type);
1598 
1599     // Allocate memory for the array.
1600     address = CreateTempAlloca(llvmTy, alignment, "vla", VlaSize.NumElts,
1601                                &AllocaAddr);
1602 
1603     // If we have debug info enabled, properly describe the VLA dimensions for
1604     // this type by registering the vla size expression for each of the
1605     // dimensions.
1606     EmitAndRegisterVariableArrayDimensions(DI, D, EmitDebugInfo);
1607   }
1608 
1609   setAddrOfLocalVar(&D, address);
1610   emission.Addr = address;
1611   emission.AllocaAddr = AllocaAddr;
1612 
1613   // Emit debug info for local var declaration.
1614   if (EmitDebugInfo && HaveInsertPoint()) {
1615     Address DebugAddr = address;
1616     bool UsePointerValue = NRVO && ReturnValuePointer.isValid();
1617     DI->setLocation(D.getLocation());
1618 
1619     // If NRVO, use a pointer to the return address.
1620     if (UsePointerValue) {
1621       DebugAddr = ReturnValuePointer;
1622       AllocaAddr = ReturnValuePointer;
1623     }
1624     (void)DI->EmitDeclareOfAutoVariable(&D, AllocaAddr.getPointer(), Builder,
1625                                         UsePointerValue);
1626   }
1627 
1628   if (D.hasAttr<AnnotateAttr>() && HaveInsertPoint())
1629     EmitVarAnnotations(&D, address.getPointer());
1630 
1631   // Make sure we call @llvm.lifetime.end.
1632   if (emission.useLifetimeMarkers())
1633     EHStack.pushCleanup<CallLifetimeEnd>(NormalEHLifetimeMarker,
1634                                          emission.getOriginalAllocatedAddress(),
1635                                          emission.getSizeForLifetimeMarkers());
1636 
1637   return emission;
1638 }
1639 
1640 static bool isCapturedBy(const VarDecl &, const Expr *);
1641 
1642 /// Determines whether the given __block variable is potentially
1643 /// captured by the given statement.
1644 static bool isCapturedBy(const VarDecl &Var, const Stmt *S) {
1645   if (const Expr *E = dyn_cast<Expr>(S))
1646     return isCapturedBy(Var, E);
1647   for (const Stmt *SubStmt : S->children())
1648     if (isCapturedBy(Var, SubStmt))
1649       return true;
1650   return false;
1651 }
1652 
1653 /// Determines whether the given __block variable is potentially
1654 /// captured by the given expression.
1655 static bool isCapturedBy(const VarDecl &Var, const Expr *E) {
1656   // Skip the most common kinds of expressions that make
1657   // hierarchy-walking expensive.
1658   E = E->IgnoreParenCasts();
1659 
1660   if (const BlockExpr *BE = dyn_cast<BlockExpr>(E)) {
1661     const BlockDecl *Block = BE->getBlockDecl();
1662     for (const auto &I : Block->captures()) {
1663       if (I.getVariable() == &Var)
1664         return true;
1665     }
1666 
1667     // No need to walk into the subexpressions.
1668     return false;
1669   }
1670 
1671   if (const StmtExpr *SE = dyn_cast<StmtExpr>(E)) {
1672     const CompoundStmt *CS = SE->getSubStmt();
1673     for (const auto *BI : CS->body())
1674       if (const auto *BIE = dyn_cast<Expr>(BI)) {
1675         if (isCapturedBy(Var, BIE))
1676           return true;
1677       }
1678       else if (const auto *DS = dyn_cast<DeclStmt>(BI)) {
1679           // special case declarations
1680           for (const auto *I : DS->decls()) {
1681               if (const auto *VD = dyn_cast<VarDecl>((I))) {
1682                 const Expr *Init = VD->getInit();
1683                 if (Init && isCapturedBy(Var, Init))
1684                   return true;
1685               }
1686           }
1687       }
1688       else
1689         // FIXME. Make safe assumption assuming arbitrary statements cause capturing.
1690         // Later, provide code to poke into statements for capture analysis.
1691         return true;
1692     return false;
1693   }
1694 
1695   for (const Stmt *SubStmt : E->children())
1696     if (isCapturedBy(Var, SubStmt))
1697       return true;
1698 
1699   return false;
1700 }
1701 
1702 /// Determine whether the given initializer is trivial in the sense
1703 /// that it requires no code to be generated.
1704 bool CodeGenFunction::isTrivialInitializer(const Expr *Init) {
1705   if (!Init)
1706     return true;
1707 
1708   if (const CXXConstructExpr *Construct = dyn_cast<CXXConstructExpr>(Init))
1709     if (CXXConstructorDecl *Constructor = Construct->getConstructor())
1710       if (Constructor->isTrivial() &&
1711           Constructor->isDefaultConstructor() &&
1712           !Construct->requiresZeroInitialization())
1713         return true;
1714 
1715   return false;
1716 }
1717 
1718 void CodeGenFunction::emitZeroOrPatternForAutoVarInit(QualType type,
1719                                                       const VarDecl &D,
1720                                                       Address Loc) {
1721   auto trivialAutoVarInit = getContext().getLangOpts().getTrivialAutoVarInit();
1722   CharUnits Size = getContext().getTypeSizeInChars(type);
1723   bool isVolatile = type.isVolatileQualified();
1724   if (!Size.isZero()) {
1725     switch (trivialAutoVarInit) {
1726     case LangOptions::TrivialAutoVarInitKind::Uninitialized:
1727       llvm_unreachable("Uninitialized handled by caller");
1728     case LangOptions::TrivialAutoVarInitKind::Zero:
1729       if (CGM.stopAutoInit())
1730         return;
1731       emitStoresForZeroInit(CGM, D, Loc, isVolatile, Builder);
1732       break;
1733     case LangOptions::TrivialAutoVarInitKind::Pattern:
1734       if (CGM.stopAutoInit())
1735         return;
1736       emitStoresForPatternInit(CGM, D, Loc, isVolatile, Builder);
1737       break;
1738     }
1739     return;
1740   }
1741 
1742   // VLAs look zero-sized to getTypeInfo. We can't emit constant stores to
1743   // them, so emit a memcpy with the VLA size to initialize each element.
1744   // Technically zero-sized or negative-sized VLAs are undefined, and UBSan
1745   // will catch that code, but there exists code which generates zero-sized
1746   // VLAs. Be nice and initialize whatever they requested.
1747   const auto *VlaType = getContext().getAsVariableArrayType(type);
1748   if (!VlaType)
1749     return;
1750   auto VlaSize = getVLASize(VlaType);
1751   auto SizeVal = VlaSize.NumElts;
1752   CharUnits EltSize = getContext().getTypeSizeInChars(VlaSize.Type);
1753   switch (trivialAutoVarInit) {
1754   case LangOptions::TrivialAutoVarInitKind::Uninitialized:
1755     llvm_unreachable("Uninitialized handled by caller");
1756 
1757   case LangOptions::TrivialAutoVarInitKind::Zero: {
1758     if (CGM.stopAutoInit())
1759       return;
1760     if (!EltSize.isOne())
1761       SizeVal = Builder.CreateNUWMul(SizeVal, CGM.getSize(EltSize));
1762     auto *I = Builder.CreateMemSet(Loc, llvm::ConstantInt::get(Int8Ty, 0),
1763                                    SizeVal, isVolatile);
1764     I->addAnnotationMetadata("auto-init");
1765     break;
1766   }
1767 
1768   case LangOptions::TrivialAutoVarInitKind::Pattern: {
1769     if (CGM.stopAutoInit())
1770       return;
1771     llvm::Type *ElTy = Loc.getElementType();
1772     llvm::Constant *Constant = constWithPadding(
1773         CGM, IsPattern::Yes, initializationPatternFor(CGM, ElTy));
1774     CharUnits ConstantAlign = getContext().getTypeAlignInChars(VlaSize.Type);
1775     llvm::BasicBlock *SetupBB = createBasicBlock("vla-setup.loop");
1776     llvm::BasicBlock *LoopBB = createBasicBlock("vla-init.loop");
1777     llvm::BasicBlock *ContBB = createBasicBlock("vla-init.cont");
1778     llvm::Value *IsZeroSizedVLA = Builder.CreateICmpEQ(
1779         SizeVal, llvm::ConstantInt::get(SizeVal->getType(), 0),
1780         "vla.iszerosized");
1781     Builder.CreateCondBr(IsZeroSizedVLA, ContBB, SetupBB);
1782     EmitBlock(SetupBB);
1783     if (!EltSize.isOne())
1784       SizeVal = Builder.CreateNUWMul(SizeVal, CGM.getSize(EltSize));
1785     llvm::Value *BaseSizeInChars =
1786         llvm::ConstantInt::get(IntPtrTy, EltSize.getQuantity());
1787     Address Begin = Builder.CreateElementBitCast(Loc, Int8Ty, "vla.begin");
1788     llvm::Value *End = Builder.CreateInBoundsGEP(
1789         Begin.getElementType(), Begin.getPointer(), SizeVal, "vla.end");
1790     llvm::BasicBlock *OriginBB = Builder.GetInsertBlock();
1791     EmitBlock(LoopBB);
1792     llvm::PHINode *Cur = Builder.CreatePHI(Begin.getType(), 2, "vla.cur");
1793     Cur->addIncoming(Begin.getPointer(), OriginBB);
1794     CharUnits CurAlign = Loc.getAlignment().alignmentOfArrayElement(EltSize);
1795     auto *I =
1796         Builder.CreateMemCpy(Address(Cur, Int8Ty, CurAlign),
1797                              createUnnamedGlobalForMemcpyFrom(
1798                                  CGM, D, Builder, Constant, ConstantAlign),
1799                              BaseSizeInChars, isVolatile);
1800     I->addAnnotationMetadata("auto-init");
1801     llvm::Value *Next =
1802         Builder.CreateInBoundsGEP(Int8Ty, Cur, BaseSizeInChars, "vla.next");
1803     llvm::Value *Done = Builder.CreateICmpEQ(Next, End, "vla-init.isdone");
1804     Builder.CreateCondBr(Done, ContBB, LoopBB);
1805     Cur->addIncoming(Next, LoopBB);
1806     EmitBlock(ContBB);
1807   } break;
1808   }
1809 }
1810 
1811 void CodeGenFunction::EmitAutoVarInit(const AutoVarEmission &emission) {
1812   assert(emission.Variable && "emission was not valid!");
1813 
1814   // If this was emitted as a global constant, we're done.
1815   if (emission.wasEmittedAsGlobal()) return;
1816 
1817   const VarDecl &D = *emission.Variable;
1818   auto DL = ApplyDebugLocation::CreateDefaultArtificial(*this, D.getLocation());
1819   QualType type = D.getType();
1820 
1821   // If this local has an initializer, emit it now.
1822   const Expr *Init = D.getInit();
1823 
1824   // If we are at an unreachable point, we don't need to emit the initializer
1825   // unless it contains a label.
1826   if (!HaveInsertPoint()) {
1827     if (!Init || !ContainsLabel(Init)) return;
1828     EnsureInsertPoint();
1829   }
1830 
1831   // Initialize the structure of a __block variable.
1832   if (emission.IsEscapingByRef)
1833     emitByrefStructureInit(emission);
1834 
1835   // Initialize the variable here if it doesn't have a initializer and it is a
1836   // C struct that is non-trivial to initialize or an array containing such a
1837   // struct.
1838   if (!Init &&
1839       type.isNonTrivialToPrimitiveDefaultInitialize() ==
1840           QualType::PDIK_Struct) {
1841     LValue Dst = MakeAddrLValue(emission.getAllocatedAddress(), type);
1842     if (emission.IsEscapingByRef)
1843       drillIntoBlockVariable(*this, Dst, &D);
1844     defaultInitNonTrivialCStructVar(Dst);
1845     return;
1846   }
1847 
1848   // Check whether this is a byref variable that's potentially
1849   // captured and moved by its own initializer.  If so, we'll need to
1850   // emit the initializer first, then copy into the variable.
1851   bool capturedByInit =
1852       Init && emission.IsEscapingByRef && isCapturedBy(D, Init);
1853 
1854   bool locIsByrefHeader = !capturedByInit;
1855   const Address Loc =
1856       locIsByrefHeader ? emission.getObjectAddress(*this) : emission.Addr;
1857 
1858   // Note: constexpr already initializes everything correctly.
1859   LangOptions::TrivialAutoVarInitKind trivialAutoVarInit =
1860       (D.isConstexpr()
1861            ? LangOptions::TrivialAutoVarInitKind::Uninitialized
1862            : (D.getAttr<UninitializedAttr>()
1863                   ? LangOptions::TrivialAutoVarInitKind::Uninitialized
1864                   : getContext().getLangOpts().getTrivialAutoVarInit()));
1865 
1866   auto initializeWhatIsTechnicallyUninitialized = [&](Address Loc) {
1867     if (trivialAutoVarInit ==
1868         LangOptions::TrivialAutoVarInitKind::Uninitialized)
1869       return;
1870 
1871     // Only initialize a __block's storage: we always initialize the header.
1872     if (emission.IsEscapingByRef && !locIsByrefHeader)
1873       Loc = emitBlockByrefAddress(Loc, &D, /*follow=*/false);
1874 
1875     return emitZeroOrPatternForAutoVarInit(type, D, Loc);
1876   };
1877 
1878   if (isTrivialInitializer(Init))
1879     return initializeWhatIsTechnicallyUninitialized(Loc);
1880 
1881   llvm::Constant *constant = nullptr;
1882   if (emission.IsConstantAggregate ||
1883       D.mightBeUsableInConstantExpressions(getContext())) {
1884     assert(!capturedByInit && "constant init contains a capturing block?");
1885     constant = ConstantEmitter(*this).tryEmitAbstractForInitializer(D);
1886     if (constant && !constant->isZeroValue() &&
1887         (trivialAutoVarInit !=
1888          LangOptions::TrivialAutoVarInitKind::Uninitialized)) {
1889       IsPattern isPattern =
1890           (trivialAutoVarInit == LangOptions::TrivialAutoVarInitKind::Pattern)
1891               ? IsPattern::Yes
1892               : IsPattern::No;
1893       // C guarantees that brace-init with fewer initializers than members in
1894       // the aggregate will initialize the rest of the aggregate as-if it were
1895       // static initialization. In turn static initialization guarantees that
1896       // padding is initialized to zero bits. We could instead pattern-init if D
1897       // has any ImplicitValueInitExpr, but that seems to be unintuitive
1898       // behavior.
1899       constant = constWithPadding(CGM, IsPattern::No,
1900                                   replaceUndef(CGM, isPattern, constant));
1901     }
1902   }
1903 
1904   if (!constant) {
1905     initializeWhatIsTechnicallyUninitialized(Loc);
1906     LValue lv = MakeAddrLValue(Loc, type);
1907     lv.setNonGC(true);
1908     return EmitExprAsInit(Init, &D, lv, capturedByInit);
1909   }
1910 
1911   if (!emission.IsConstantAggregate) {
1912     // For simple scalar/complex initialization, store the value directly.
1913     LValue lv = MakeAddrLValue(Loc, type);
1914     lv.setNonGC(true);
1915     return EmitStoreThroughLValue(RValue::get(constant), lv, true);
1916   }
1917 
1918   emitStoresForConstant(CGM, D, Builder.CreateElementBitCast(Loc, CGM.Int8Ty),
1919                         type.isVolatileQualified(), Builder, constant,
1920                         /*IsAutoInit=*/false);
1921 }
1922 
1923 /// Emit an expression as an initializer for an object (variable, field, etc.)
1924 /// at the given location.  The expression is not necessarily the normal
1925 /// initializer for the object, and the address is not necessarily
1926 /// its normal location.
1927 ///
1928 /// \param init the initializing expression
1929 /// \param D the object to act as if we're initializing
1930 /// \param lvalue the lvalue to initialize
1931 /// \param capturedByInit true if \p D is a __block variable
1932 ///   whose address is potentially changed by the initializer
1933 void CodeGenFunction::EmitExprAsInit(const Expr *init, const ValueDecl *D,
1934                                      LValue lvalue, bool capturedByInit) {
1935   QualType type = D->getType();
1936 
1937   if (type->isReferenceType()) {
1938     RValue rvalue = EmitReferenceBindingToExpr(init);
1939     if (capturedByInit)
1940       drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D));
1941     EmitStoreThroughLValue(rvalue, lvalue, true);
1942     return;
1943   }
1944   switch (getEvaluationKind(type)) {
1945   case TEK_Scalar:
1946     EmitScalarInit(init, D, lvalue, capturedByInit);
1947     return;
1948   case TEK_Complex: {
1949     ComplexPairTy complex = EmitComplexExpr(init);
1950     if (capturedByInit)
1951       drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D));
1952     EmitStoreOfComplex(complex, lvalue, /*init*/ true);
1953     return;
1954   }
1955   case TEK_Aggregate:
1956     if (type->isAtomicType()) {
1957       EmitAtomicInit(const_cast<Expr*>(init), lvalue);
1958     } else {
1959       AggValueSlot::Overlap_t Overlap = AggValueSlot::MayOverlap;
1960       if (isa<VarDecl>(D))
1961         Overlap = AggValueSlot::DoesNotOverlap;
1962       else if (auto *FD = dyn_cast<FieldDecl>(D))
1963         Overlap = getOverlapForFieldInit(FD);
1964       // TODO: how can we delay here if D is captured by its initializer?
1965       EmitAggExpr(init, AggValueSlot::forLValue(
1966                             lvalue, *this, AggValueSlot::IsDestructed,
1967                             AggValueSlot::DoesNotNeedGCBarriers,
1968                             AggValueSlot::IsNotAliased, Overlap));
1969     }
1970     return;
1971   }
1972   llvm_unreachable("bad evaluation kind");
1973 }
1974 
1975 /// Enter a destroy cleanup for the given local variable.
1976 void CodeGenFunction::emitAutoVarTypeCleanup(
1977                             const CodeGenFunction::AutoVarEmission &emission,
1978                             QualType::DestructionKind dtorKind) {
1979   assert(dtorKind != QualType::DK_none);
1980 
1981   // Note that for __block variables, we want to destroy the
1982   // original stack object, not the possibly forwarded object.
1983   Address addr = emission.getObjectAddress(*this);
1984 
1985   const VarDecl *var = emission.Variable;
1986   QualType type = var->getType();
1987 
1988   CleanupKind cleanupKind = NormalAndEHCleanup;
1989   CodeGenFunction::Destroyer *destroyer = nullptr;
1990 
1991   switch (dtorKind) {
1992   case QualType::DK_none:
1993     llvm_unreachable("no cleanup for trivially-destructible variable");
1994 
1995   case QualType::DK_cxx_destructor:
1996     // If there's an NRVO flag on the emission, we need a different
1997     // cleanup.
1998     if (emission.NRVOFlag) {
1999       assert(!type->isArrayType());
2000       CXXDestructorDecl *dtor = type->getAsCXXRecordDecl()->getDestructor();
2001       EHStack.pushCleanup<DestroyNRVOVariableCXX>(cleanupKind, addr, type, dtor,
2002                                                   emission.NRVOFlag);
2003       return;
2004     }
2005     break;
2006 
2007   case QualType::DK_objc_strong_lifetime:
2008     // Suppress cleanups for pseudo-strong variables.
2009     if (var->isARCPseudoStrong()) return;
2010 
2011     // Otherwise, consider whether to use an EH cleanup or not.
2012     cleanupKind = getARCCleanupKind();
2013 
2014     // Use the imprecise destroyer by default.
2015     if (!var->hasAttr<ObjCPreciseLifetimeAttr>())
2016       destroyer = CodeGenFunction::destroyARCStrongImprecise;
2017     break;
2018 
2019   case QualType::DK_objc_weak_lifetime:
2020     break;
2021 
2022   case QualType::DK_nontrivial_c_struct:
2023     destroyer = CodeGenFunction::destroyNonTrivialCStruct;
2024     if (emission.NRVOFlag) {
2025       assert(!type->isArrayType());
2026       EHStack.pushCleanup<DestroyNRVOVariableC>(cleanupKind, addr,
2027                                                 emission.NRVOFlag, type);
2028       return;
2029     }
2030     break;
2031   }
2032 
2033   // If we haven't chosen a more specific destroyer, use the default.
2034   if (!destroyer) destroyer = getDestroyer(dtorKind);
2035 
2036   // Use an EH cleanup in array destructors iff the destructor itself
2037   // is being pushed as an EH cleanup.
2038   bool useEHCleanup = (cleanupKind & EHCleanup);
2039   EHStack.pushCleanup<DestroyObject>(cleanupKind, addr, type, destroyer,
2040                                      useEHCleanup);
2041 }
2042 
2043 void CodeGenFunction::EmitAutoVarCleanups(const AutoVarEmission &emission) {
2044   assert(emission.Variable && "emission was not valid!");
2045 
2046   // If this was emitted as a global constant, we're done.
2047   if (emission.wasEmittedAsGlobal()) return;
2048 
2049   // If we don't have an insertion point, we're done.  Sema prevents
2050   // us from jumping into any of these scopes anyway.
2051   if (!HaveInsertPoint()) return;
2052 
2053   const VarDecl &D = *emission.Variable;
2054 
2055   // Check the type for a cleanup.
2056   if (QualType::DestructionKind dtorKind = D.needsDestruction(getContext()))
2057     emitAutoVarTypeCleanup(emission, dtorKind);
2058 
2059   // In GC mode, honor objc_precise_lifetime.
2060   if (getLangOpts().getGC() != LangOptions::NonGC &&
2061       D.hasAttr<ObjCPreciseLifetimeAttr>()) {
2062     EHStack.pushCleanup<ExtendGCLifetime>(NormalCleanup, &D);
2063   }
2064 
2065   // Handle the cleanup attribute.
2066   if (const CleanupAttr *CA = D.getAttr<CleanupAttr>()) {
2067     const FunctionDecl *FD = CA->getFunctionDecl();
2068 
2069     llvm::Constant *F = CGM.GetAddrOfFunction(FD);
2070     assert(F && "Could not find function!");
2071 
2072     const CGFunctionInfo &Info = CGM.getTypes().arrangeFunctionDeclaration(FD);
2073     EHStack.pushCleanup<CallCleanupFunction>(NormalAndEHCleanup, F, &Info, &D);
2074   }
2075 
2076   // If this is a block variable, call _Block_object_destroy
2077   // (on the unforwarded address). Don't enter this cleanup if we're in pure-GC
2078   // mode.
2079   if (emission.IsEscapingByRef &&
2080       CGM.getLangOpts().getGC() != LangOptions::GCOnly) {
2081     BlockFieldFlags Flags = BLOCK_FIELD_IS_BYREF;
2082     if (emission.Variable->getType().isObjCGCWeak())
2083       Flags |= BLOCK_FIELD_IS_WEAK;
2084     enterByrefCleanup(NormalAndEHCleanup, emission.Addr, Flags,
2085                       /*LoadBlockVarAddr*/ false,
2086                       cxxDestructorCanThrow(emission.Variable->getType()));
2087   }
2088 }
2089 
2090 CodeGenFunction::Destroyer *
2091 CodeGenFunction::getDestroyer(QualType::DestructionKind kind) {
2092   switch (kind) {
2093   case QualType::DK_none: llvm_unreachable("no destroyer for trivial dtor");
2094   case QualType::DK_cxx_destructor:
2095     return destroyCXXObject;
2096   case QualType::DK_objc_strong_lifetime:
2097     return destroyARCStrongPrecise;
2098   case QualType::DK_objc_weak_lifetime:
2099     return destroyARCWeak;
2100   case QualType::DK_nontrivial_c_struct:
2101     return destroyNonTrivialCStruct;
2102   }
2103   llvm_unreachable("Unknown DestructionKind");
2104 }
2105 
2106 /// pushEHDestroy - Push the standard destructor for the given type as
2107 /// an EH-only cleanup.
2108 void CodeGenFunction::pushEHDestroy(QualType::DestructionKind dtorKind,
2109                                     Address addr, QualType type) {
2110   assert(dtorKind && "cannot push destructor for trivial type");
2111   assert(needsEHCleanup(dtorKind));
2112 
2113   pushDestroy(EHCleanup, addr, type, getDestroyer(dtorKind), true);
2114 }
2115 
2116 /// pushDestroy - Push the standard destructor for the given type as
2117 /// at least a normal cleanup.
2118 void CodeGenFunction::pushDestroy(QualType::DestructionKind dtorKind,
2119                                   Address addr, QualType type) {
2120   assert(dtorKind && "cannot push destructor for trivial type");
2121 
2122   CleanupKind cleanupKind = getCleanupKind(dtorKind);
2123   pushDestroy(cleanupKind, addr, type, getDestroyer(dtorKind),
2124               cleanupKind & EHCleanup);
2125 }
2126 
2127 void CodeGenFunction::pushDestroy(CleanupKind cleanupKind, Address addr,
2128                                   QualType type, Destroyer *destroyer,
2129                                   bool useEHCleanupForArray) {
2130   pushFullExprCleanup<DestroyObject>(cleanupKind, addr, type,
2131                                      destroyer, useEHCleanupForArray);
2132 }
2133 
2134 void CodeGenFunction::pushStackRestore(CleanupKind Kind, Address SPMem) {
2135   EHStack.pushCleanup<CallStackRestore>(Kind, SPMem);
2136 }
2137 
2138 void CodeGenFunction::pushLifetimeExtendedDestroy(CleanupKind cleanupKind,
2139                                                   Address addr, QualType type,
2140                                                   Destroyer *destroyer,
2141                                                   bool useEHCleanupForArray) {
2142   // If we're not in a conditional branch, we don't need to bother generating a
2143   // conditional cleanup.
2144   if (!isInConditionalBranch()) {
2145     // Push an EH-only cleanup for the object now.
2146     // FIXME: When popping normal cleanups, we need to keep this EH cleanup
2147     // around in case a temporary's destructor throws an exception.
2148     if (cleanupKind & EHCleanup)
2149       EHStack.pushCleanup<DestroyObject>(
2150           static_cast<CleanupKind>(cleanupKind & ~NormalCleanup), addr, type,
2151           destroyer, useEHCleanupForArray);
2152 
2153     return pushCleanupAfterFullExprWithActiveFlag<DestroyObject>(
2154         cleanupKind, Address::invalid(), addr, type, destroyer, useEHCleanupForArray);
2155   }
2156 
2157   // Otherwise, we should only destroy the object if it's been initialized.
2158   // Re-use the active flag and saved address across both the EH and end of
2159   // scope cleanups.
2160 
2161   using SavedType = typename DominatingValue<Address>::saved_type;
2162   using ConditionalCleanupType =
2163       EHScopeStack::ConditionalCleanup<DestroyObject, Address, QualType,
2164                                        Destroyer *, bool>;
2165 
2166   Address ActiveFlag = createCleanupActiveFlag();
2167   SavedType SavedAddr = saveValueInCond(addr);
2168 
2169   if (cleanupKind & EHCleanup) {
2170     EHStack.pushCleanup<ConditionalCleanupType>(
2171         static_cast<CleanupKind>(cleanupKind & ~NormalCleanup), SavedAddr, type,
2172         destroyer, useEHCleanupForArray);
2173     initFullExprCleanupWithFlag(ActiveFlag);
2174   }
2175 
2176   pushCleanupAfterFullExprWithActiveFlag<ConditionalCleanupType>(
2177       cleanupKind, ActiveFlag, SavedAddr, type, destroyer,
2178       useEHCleanupForArray);
2179 }
2180 
2181 /// emitDestroy - Immediately perform the destruction of the given
2182 /// object.
2183 ///
2184 /// \param addr - the address of the object; a type*
2185 /// \param type - the type of the object; if an array type, all
2186 ///   objects are destroyed in reverse order
2187 /// \param destroyer - the function to call to destroy individual
2188 ///   elements
2189 /// \param useEHCleanupForArray - whether an EH cleanup should be
2190 ///   used when destroying array elements, in case one of the
2191 ///   destructions throws an exception
2192 void CodeGenFunction::emitDestroy(Address addr, QualType type,
2193                                   Destroyer *destroyer,
2194                                   bool useEHCleanupForArray) {
2195   const ArrayType *arrayType = getContext().getAsArrayType(type);
2196   if (!arrayType)
2197     return destroyer(*this, addr, type);
2198 
2199   llvm::Value *length = emitArrayLength(arrayType, type, addr);
2200 
2201   CharUnits elementAlign =
2202     addr.getAlignment()
2203         .alignmentOfArrayElement(getContext().getTypeSizeInChars(type));
2204 
2205   // Normally we have to check whether the array is zero-length.
2206   bool checkZeroLength = true;
2207 
2208   // But if the array length is constant, we can suppress that.
2209   if (llvm::ConstantInt *constLength = dyn_cast<llvm::ConstantInt>(length)) {
2210     // ...and if it's constant zero, we can just skip the entire thing.
2211     if (constLength->isZero()) return;
2212     checkZeroLength = false;
2213   }
2214 
2215   llvm::Value *begin = addr.getPointer();
2216   llvm::Value *end =
2217       Builder.CreateInBoundsGEP(addr.getElementType(), begin, length);
2218   emitArrayDestroy(begin, end, type, elementAlign, destroyer,
2219                    checkZeroLength, useEHCleanupForArray);
2220 }
2221 
2222 /// emitArrayDestroy - Destroys all the elements of the given array,
2223 /// beginning from last to first.  The array cannot be zero-length.
2224 ///
2225 /// \param begin - a type* denoting the first element of the array
2226 /// \param end - a type* denoting one past the end of the array
2227 /// \param elementType - the element type of the array
2228 /// \param destroyer - the function to call to destroy elements
2229 /// \param useEHCleanup - whether to push an EH cleanup to destroy
2230 ///   the remaining elements in case the destruction of a single
2231 ///   element throws
2232 void CodeGenFunction::emitArrayDestroy(llvm::Value *begin,
2233                                        llvm::Value *end,
2234                                        QualType elementType,
2235                                        CharUnits elementAlign,
2236                                        Destroyer *destroyer,
2237                                        bool checkZeroLength,
2238                                        bool useEHCleanup) {
2239   assert(!elementType->isArrayType());
2240 
2241   // The basic structure here is a do-while loop, because we don't
2242   // need to check for the zero-element case.
2243   llvm::BasicBlock *bodyBB = createBasicBlock("arraydestroy.body");
2244   llvm::BasicBlock *doneBB = createBasicBlock("arraydestroy.done");
2245 
2246   if (checkZeroLength) {
2247     llvm::Value *isEmpty = Builder.CreateICmpEQ(begin, end,
2248                                                 "arraydestroy.isempty");
2249     Builder.CreateCondBr(isEmpty, doneBB, bodyBB);
2250   }
2251 
2252   // Enter the loop body, making that address the current address.
2253   llvm::BasicBlock *entryBB = Builder.GetInsertBlock();
2254   EmitBlock(bodyBB);
2255   llvm::PHINode *elementPast =
2256     Builder.CreatePHI(begin->getType(), 2, "arraydestroy.elementPast");
2257   elementPast->addIncoming(end, entryBB);
2258 
2259   // Shift the address back by one element.
2260   llvm::Value *negativeOne = llvm::ConstantInt::get(SizeTy, -1, true);
2261   llvm::Type *llvmElementType = ConvertTypeForMem(elementType);
2262   llvm::Value *element = Builder.CreateInBoundsGEP(
2263       llvmElementType, elementPast, negativeOne, "arraydestroy.element");
2264 
2265   if (useEHCleanup)
2266     pushRegularPartialArrayCleanup(begin, element, elementType, elementAlign,
2267                                    destroyer);
2268 
2269   // Perform the actual destruction there.
2270   destroyer(*this, Address(element, llvmElementType, elementAlign),
2271             elementType);
2272 
2273   if (useEHCleanup)
2274     PopCleanupBlock();
2275 
2276   // Check whether we've reached the end.
2277   llvm::Value *done = Builder.CreateICmpEQ(element, begin, "arraydestroy.done");
2278   Builder.CreateCondBr(done, doneBB, bodyBB);
2279   elementPast->addIncoming(element, Builder.GetInsertBlock());
2280 
2281   // Done.
2282   EmitBlock(doneBB);
2283 }
2284 
2285 /// Perform partial array destruction as if in an EH cleanup.  Unlike
2286 /// emitArrayDestroy, the element type here may still be an array type.
2287 static void emitPartialArrayDestroy(CodeGenFunction &CGF,
2288                                     llvm::Value *begin, llvm::Value *end,
2289                                     QualType type, CharUnits elementAlign,
2290                                     CodeGenFunction::Destroyer *destroyer) {
2291   llvm::Type *elemTy = CGF.ConvertTypeForMem(type);
2292 
2293   // If the element type is itself an array, drill down.
2294   unsigned arrayDepth = 0;
2295   while (const ArrayType *arrayType = CGF.getContext().getAsArrayType(type)) {
2296     // VLAs don't require a GEP index to walk into.
2297     if (!isa<VariableArrayType>(arrayType))
2298       arrayDepth++;
2299     type = arrayType->getElementType();
2300   }
2301 
2302   if (arrayDepth) {
2303     llvm::Value *zero = llvm::ConstantInt::get(CGF.SizeTy, 0);
2304 
2305     SmallVector<llvm::Value*,4> gepIndices(arrayDepth+1, zero);
2306     begin = CGF.Builder.CreateInBoundsGEP(
2307         elemTy, begin, gepIndices, "pad.arraybegin");
2308     end = CGF.Builder.CreateInBoundsGEP(
2309         elemTy, end, gepIndices, "pad.arrayend");
2310   }
2311 
2312   // Destroy the array.  We don't ever need an EH cleanup because we
2313   // assume that we're in an EH cleanup ourselves, so a throwing
2314   // destructor causes an immediate terminate.
2315   CGF.emitArrayDestroy(begin, end, type, elementAlign, destroyer,
2316                        /*checkZeroLength*/ true, /*useEHCleanup*/ false);
2317 }
2318 
2319 namespace {
2320   /// RegularPartialArrayDestroy - a cleanup which performs a partial
2321   /// array destroy where the end pointer is regularly determined and
2322   /// does not need to be loaded from a local.
2323   class RegularPartialArrayDestroy final : public EHScopeStack::Cleanup {
2324     llvm::Value *ArrayBegin;
2325     llvm::Value *ArrayEnd;
2326     QualType ElementType;
2327     CodeGenFunction::Destroyer *Destroyer;
2328     CharUnits ElementAlign;
2329   public:
2330     RegularPartialArrayDestroy(llvm::Value *arrayBegin, llvm::Value *arrayEnd,
2331                                QualType elementType, CharUnits elementAlign,
2332                                CodeGenFunction::Destroyer *destroyer)
2333       : ArrayBegin(arrayBegin), ArrayEnd(arrayEnd),
2334         ElementType(elementType), Destroyer(destroyer),
2335         ElementAlign(elementAlign) {}
2336 
2337     void Emit(CodeGenFunction &CGF, Flags flags) override {
2338       emitPartialArrayDestroy(CGF, ArrayBegin, ArrayEnd,
2339                               ElementType, ElementAlign, Destroyer);
2340     }
2341   };
2342 
2343   /// IrregularPartialArrayDestroy - a cleanup which performs a
2344   /// partial array destroy where the end pointer is irregularly
2345   /// determined and must be loaded from a local.
2346   class IrregularPartialArrayDestroy final : public EHScopeStack::Cleanup {
2347     llvm::Value *ArrayBegin;
2348     Address ArrayEndPointer;
2349     QualType ElementType;
2350     CodeGenFunction::Destroyer *Destroyer;
2351     CharUnits ElementAlign;
2352   public:
2353     IrregularPartialArrayDestroy(llvm::Value *arrayBegin,
2354                                  Address arrayEndPointer,
2355                                  QualType elementType,
2356                                  CharUnits elementAlign,
2357                                  CodeGenFunction::Destroyer *destroyer)
2358       : ArrayBegin(arrayBegin), ArrayEndPointer(arrayEndPointer),
2359         ElementType(elementType), Destroyer(destroyer),
2360         ElementAlign(elementAlign) {}
2361 
2362     void Emit(CodeGenFunction &CGF, Flags flags) override {
2363       llvm::Value *arrayEnd = CGF.Builder.CreateLoad(ArrayEndPointer);
2364       emitPartialArrayDestroy(CGF, ArrayBegin, arrayEnd,
2365                               ElementType, ElementAlign, Destroyer);
2366     }
2367   };
2368 } // end anonymous namespace
2369 
2370 /// pushIrregularPartialArrayCleanup - Push an EH cleanup to destroy
2371 /// already-constructed elements of the given array.  The cleanup
2372 /// may be popped with DeactivateCleanupBlock or PopCleanupBlock.
2373 ///
2374 /// \param elementType - the immediate element type of the array;
2375 ///   possibly still an array type
2376 void CodeGenFunction::pushIrregularPartialArrayCleanup(llvm::Value *arrayBegin,
2377                                                        Address arrayEndPointer,
2378                                                        QualType elementType,
2379                                                        CharUnits elementAlign,
2380                                                        Destroyer *destroyer) {
2381   pushFullExprCleanup<IrregularPartialArrayDestroy>(EHCleanup,
2382                                                     arrayBegin, arrayEndPointer,
2383                                                     elementType, elementAlign,
2384                                                     destroyer);
2385 }
2386 
2387 /// pushRegularPartialArrayCleanup - Push an EH cleanup to destroy
2388 /// already-constructed elements of the given array.  The cleanup
2389 /// may be popped with DeactivateCleanupBlock or PopCleanupBlock.
2390 ///
2391 /// \param elementType - the immediate element type of the array;
2392 ///   possibly still an array type
2393 void CodeGenFunction::pushRegularPartialArrayCleanup(llvm::Value *arrayBegin,
2394                                                      llvm::Value *arrayEnd,
2395                                                      QualType elementType,
2396                                                      CharUnits elementAlign,
2397                                                      Destroyer *destroyer) {
2398   pushFullExprCleanup<RegularPartialArrayDestroy>(EHCleanup,
2399                                                   arrayBegin, arrayEnd,
2400                                                   elementType, elementAlign,
2401                                                   destroyer);
2402 }
2403 
2404 /// Lazily declare the @llvm.lifetime.start intrinsic.
2405 llvm::Function *CodeGenModule::getLLVMLifetimeStartFn() {
2406   if (LifetimeStartFn)
2407     return LifetimeStartFn;
2408   LifetimeStartFn = llvm::Intrinsic::getDeclaration(&getModule(),
2409     llvm::Intrinsic::lifetime_start, AllocaInt8PtrTy);
2410   return LifetimeStartFn;
2411 }
2412 
2413 /// Lazily declare the @llvm.lifetime.end intrinsic.
2414 llvm::Function *CodeGenModule::getLLVMLifetimeEndFn() {
2415   if (LifetimeEndFn)
2416     return LifetimeEndFn;
2417   LifetimeEndFn = llvm::Intrinsic::getDeclaration(&getModule(),
2418     llvm::Intrinsic::lifetime_end, AllocaInt8PtrTy);
2419   return LifetimeEndFn;
2420 }
2421 
2422 namespace {
2423   /// A cleanup to perform a release of an object at the end of a
2424   /// function.  This is used to balance out the incoming +1 of a
2425   /// ns_consumed argument when we can't reasonably do that just by
2426   /// not doing the initial retain for a __block argument.
2427   struct ConsumeARCParameter final : EHScopeStack::Cleanup {
2428     ConsumeARCParameter(llvm::Value *param,
2429                         ARCPreciseLifetime_t precise)
2430       : Param(param), Precise(precise) {}
2431 
2432     llvm::Value *Param;
2433     ARCPreciseLifetime_t Precise;
2434 
2435     void Emit(CodeGenFunction &CGF, Flags flags) override {
2436       CGF.EmitARCRelease(Param, Precise);
2437     }
2438   };
2439 } // end anonymous namespace
2440 
2441 /// Emit an alloca (or GlobalValue depending on target)
2442 /// for the specified parameter and set up LocalDeclMap.
2443 void CodeGenFunction::EmitParmDecl(const VarDecl &D, ParamValue Arg,
2444                                    unsigned ArgNo) {
2445   bool NoDebugInfo = false;
2446   // FIXME: Why isn't ImplicitParamDecl a ParmVarDecl?
2447   assert((isa<ParmVarDecl>(D) || isa<ImplicitParamDecl>(D)) &&
2448          "Invalid argument to EmitParmDecl");
2449 
2450   Arg.getAnyValue()->setName(D.getName());
2451 
2452   QualType Ty = D.getType();
2453 
2454   // Use better IR generation for certain implicit parameters.
2455   if (auto IPD = dyn_cast<ImplicitParamDecl>(&D)) {
2456     // The only implicit argument a block has is its literal.
2457     // This may be passed as an inalloca'ed value on Windows x86.
2458     if (BlockInfo) {
2459       llvm::Value *V = Arg.isIndirect()
2460                            ? Builder.CreateLoad(Arg.getIndirectAddress())
2461                            : Arg.getDirectValue();
2462       setBlockContextParameter(IPD, ArgNo, V);
2463       return;
2464     }
2465     // Suppressing debug info for ThreadPrivateVar parameters, else it hides
2466     // debug info of TLS variables.
2467     NoDebugInfo =
2468         (IPD->getParameterKind() == ImplicitParamDecl::ThreadPrivateVar);
2469   }
2470 
2471   Address DeclPtr = Address::invalid();
2472   Address AllocaPtr = Address::invalid();
2473   bool DoStore = false;
2474   bool IsScalar = hasScalarEvaluationKind(Ty);
2475   // If we already have a pointer to the argument, reuse the input pointer.
2476   if (Arg.isIndirect()) {
2477     // If we have a prettier pointer type at this point, bitcast to that.
2478     DeclPtr = Arg.getIndirectAddress();
2479     DeclPtr = Builder.CreateElementBitCast(DeclPtr, ConvertTypeForMem(Ty),
2480                                            D.getName());
2481     // Indirect argument is in alloca address space, which may be different
2482     // from the default address space.
2483     auto AllocaAS = CGM.getASTAllocaAddressSpace();
2484     auto *V = DeclPtr.getPointer();
2485     AllocaPtr = DeclPtr;
2486     auto SrcLangAS = getLangOpts().OpenCL ? LangAS::opencl_private : AllocaAS;
2487     auto DestLangAS =
2488         getLangOpts().OpenCL ? LangAS::opencl_private : LangAS::Default;
2489     if (SrcLangAS != DestLangAS) {
2490       assert(getContext().getTargetAddressSpace(SrcLangAS) ==
2491              CGM.getDataLayout().getAllocaAddrSpace());
2492       auto DestAS = getContext().getTargetAddressSpace(DestLangAS);
2493       auto *T = DeclPtr.getElementType()->getPointerTo(DestAS);
2494       DeclPtr = DeclPtr.withPointer(getTargetHooks().performAddrSpaceCast(
2495           *this, V, SrcLangAS, DestLangAS, T, true));
2496     }
2497 
2498     // Push a destructor cleanup for this parameter if the ABI requires it.
2499     // Don't push a cleanup in a thunk for a method that will also emit a
2500     // cleanup.
2501     if (Ty->isRecordType() && !CurFuncIsThunk &&
2502         Ty->castAs<RecordType>()->getDecl()->isParamDestroyedInCallee()) {
2503       if (QualType::DestructionKind DtorKind =
2504               D.needsDestruction(getContext())) {
2505         assert((DtorKind == QualType::DK_cxx_destructor ||
2506                 DtorKind == QualType::DK_nontrivial_c_struct) &&
2507                "unexpected destructor type");
2508         pushDestroy(DtorKind, DeclPtr, Ty);
2509         CalleeDestructedParamCleanups[cast<ParmVarDecl>(&D)] =
2510             EHStack.stable_begin();
2511       }
2512     }
2513   } else {
2514     // Check if the parameter address is controlled by OpenMP runtime.
2515     Address OpenMPLocalAddr =
2516         getLangOpts().OpenMP
2517             ? CGM.getOpenMPRuntime().getAddressOfLocalVariable(*this, &D)
2518             : Address::invalid();
2519     if (getLangOpts().OpenMP && OpenMPLocalAddr.isValid()) {
2520       DeclPtr = OpenMPLocalAddr;
2521       AllocaPtr = DeclPtr;
2522     } else {
2523       // Otherwise, create a temporary to hold the value.
2524       DeclPtr = CreateMemTemp(Ty, getContext().getDeclAlign(&D),
2525                               D.getName() + ".addr", &AllocaPtr);
2526     }
2527     DoStore = true;
2528   }
2529 
2530   llvm::Value *ArgVal = (DoStore ? Arg.getDirectValue() : nullptr);
2531 
2532   LValue lv = MakeAddrLValue(DeclPtr, Ty);
2533   if (IsScalar) {
2534     Qualifiers qs = Ty.getQualifiers();
2535     if (Qualifiers::ObjCLifetime lt = qs.getObjCLifetime()) {
2536       // We honor __attribute__((ns_consumed)) for types with lifetime.
2537       // For __strong, it's handled by just skipping the initial retain;
2538       // otherwise we have to balance out the initial +1 with an extra
2539       // cleanup to do the release at the end of the function.
2540       bool isConsumed = D.hasAttr<NSConsumedAttr>();
2541 
2542       // If a parameter is pseudo-strong then we can omit the implicit retain.
2543       if (D.isARCPseudoStrong()) {
2544         assert(lt == Qualifiers::OCL_Strong &&
2545                "pseudo-strong variable isn't strong?");
2546         assert(qs.hasConst() && "pseudo-strong variable should be const!");
2547         lt = Qualifiers::OCL_ExplicitNone;
2548       }
2549 
2550       // Load objects passed indirectly.
2551       if (Arg.isIndirect() && !ArgVal)
2552         ArgVal = Builder.CreateLoad(DeclPtr);
2553 
2554       if (lt == Qualifiers::OCL_Strong) {
2555         if (!isConsumed) {
2556           if (CGM.getCodeGenOpts().OptimizationLevel == 0) {
2557             // use objc_storeStrong(&dest, value) for retaining the
2558             // object. But first, store a null into 'dest' because
2559             // objc_storeStrong attempts to release its old value.
2560             llvm::Value *Null = CGM.EmitNullConstant(D.getType());
2561             EmitStoreOfScalar(Null, lv, /* isInitialization */ true);
2562             EmitARCStoreStrongCall(lv.getAddress(*this), ArgVal, true);
2563             DoStore = false;
2564           }
2565           else
2566           // Don't use objc_retainBlock for block pointers, because we
2567           // don't want to Block_copy something just because we got it
2568           // as a parameter.
2569             ArgVal = EmitARCRetainNonBlock(ArgVal);
2570         }
2571       } else {
2572         // Push the cleanup for a consumed parameter.
2573         if (isConsumed) {
2574           ARCPreciseLifetime_t precise = (D.hasAttr<ObjCPreciseLifetimeAttr>()
2575                                 ? ARCPreciseLifetime : ARCImpreciseLifetime);
2576           EHStack.pushCleanup<ConsumeARCParameter>(getARCCleanupKind(), ArgVal,
2577                                                    precise);
2578         }
2579 
2580         if (lt == Qualifiers::OCL_Weak) {
2581           EmitARCInitWeak(DeclPtr, ArgVal);
2582           DoStore = false; // The weak init is a store, no need to do two.
2583         }
2584       }
2585 
2586       // Enter the cleanup scope.
2587       EmitAutoVarWithLifetime(*this, D, DeclPtr, lt);
2588     }
2589   }
2590 
2591   // Store the initial value into the alloca.
2592   if (DoStore)
2593     EmitStoreOfScalar(ArgVal, lv, /* isInitialization */ true);
2594 
2595   setAddrOfLocalVar(&D, DeclPtr);
2596 
2597   // Emit debug info for param declarations in non-thunk functions.
2598   if (CGDebugInfo *DI = getDebugInfo()) {
2599     if (CGM.getCodeGenOpts().hasReducedDebugInfo() && !CurFuncIsThunk &&
2600         !NoDebugInfo) {
2601       llvm::DILocalVariable *DILocalVar = DI->EmitDeclareOfArgVariable(
2602           &D, AllocaPtr.getPointer(), ArgNo, Builder);
2603       if (const auto *Var = dyn_cast_or_null<ParmVarDecl>(&D))
2604         DI->getParamDbgMappings().insert({Var, DILocalVar});
2605     }
2606   }
2607 
2608   if (D.hasAttr<AnnotateAttr>())
2609     EmitVarAnnotations(&D, DeclPtr.getPointer());
2610 
2611   // We can only check return value nullability if all arguments to the
2612   // function satisfy their nullability preconditions. This makes it necessary
2613   // to emit null checks for args in the function body itself.
2614   if (requiresReturnValueNullabilityCheck()) {
2615     auto Nullability = Ty->getNullability(getContext());
2616     if (Nullability && *Nullability == NullabilityKind::NonNull) {
2617       SanitizerScope SanScope(this);
2618       RetValNullabilityPrecondition =
2619           Builder.CreateAnd(RetValNullabilityPrecondition,
2620                             Builder.CreateIsNotNull(Arg.getAnyValue()));
2621     }
2622   }
2623 }
2624 
2625 void CodeGenModule::EmitOMPDeclareReduction(const OMPDeclareReductionDecl *D,
2626                                             CodeGenFunction *CGF) {
2627   if (!LangOpts.OpenMP || (!LangOpts.EmitAllDecls && !D->isUsed()))
2628     return;
2629   getOpenMPRuntime().emitUserDefinedReduction(CGF, D);
2630 }
2631 
2632 void CodeGenModule::EmitOMPDeclareMapper(const OMPDeclareMapperDecl *D,
2633                                          CodeGenFunction *CGF) {
2634   if (!LangOpts.OpenMP || LangOpts.OpenMPSimd ||
2635       (!LangOpts.EmitAllDecls && !D->isUsed()))
2636     return;
2637   getOpenMPRuntime().emitUserDefinedMapper(D, CGF);
2638 }
2639 
2640 void CodeGenModule::EmitOMPRequiresDecl(const OMPRequiresDecl *D) {
2641   getOpenMPRuntime().processRequiresDirective(D);
2642 }
2643 
2644 void CodeGenModule::EmitOMPAllocateDecl(const OMPAllocateDecl *D) {
2645   for (const Expr *E : D->varlists()) {
2646     const auto *DE = cast<DeclRefExpr>(E);
2647     const auto *VD = cast<VarDecl>(DE->getDecl());
2648 
2649     // Skip all but globals.
2650     if (!VD->hasGlobalStorage())
2651       continue;
2652 
2653     // Check if the global has been materialized yet or not. If not, we are done
2654     // as any later generation will utilize the OMPAllocateDeclAttr. However, if
2655     // we already emitted the global we might have done so before the
2656     // OMPAllocateDeclAttr was attached, leading to the wrong address space
2657     // (potentially). While not pretty, common practise is to remove the old IR
2658     // global and generate a new one, so we do that here too. Uses are replaced
2659     // properly.
2660     StringRef MangledName = getMangledName(VD);
2661     llvm::GlobalValue *Entry = GetGlobalValue(MangledName);
2662     if (!Entry)
2663       continue;
2664 
2665     // We can also keep the existing global if the address space is what we
2666     // expect it to be, if not, it is replaced.
2667     QualType ASTTy = VD->getType();
2668     clang::LangAS GVAS = GetGlobalVarAddressSpace(VD);
2669     auto TargetAS = getContext().getTargetAddressSpace(GVAS);
2670     if (Entry->getType()->getAddressSpace() == TargetAS)
2671       continue;
2672 
2673     // Make a new global with the correct type / address space.
2674     llvm::Type *Ty = getTypes().ConvertTypeForMem(ASTTy);
2675     llvm::PointerType *PTy = llvm::PointerType::get(Ty, TargetAS);
2676 
2677     // Replace all uses of the old global with a cast. Since we mutate the type
2678     // in place we neeed an intermediate that takes the spot of the old entry
2679     // until we can create the cast.
2680     llvm::GlobalVariable *DummyGV = new llvm::GlobalVariable(
2681         getModule(), Entry->getValueType(), false,
2682         llvm::GlobalValue::CommonLinkage, nullptr, "dummy", nullptr,
2683         llvm::GlobalVariable::NotThreadLocal, Entry->getAddressSpace());
2684     Entry->replaceAllUsesWith(DummyGV);
2685 
2686     Entry->mutateType(PTy);
2687     llvm::Constant *NewPtrForOldDecl =
2688         llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(
2689             Entry, DummyGV->getType());
2690 
2691     // Now we have a casted version of the changed global, the dummy can be
2692     // replaced and deleted.
2693     DummyGV->replaceAllUsesWith(NewPtrForOldDecl);
2694     DummyGV->eraseFromParent();
2695   }
2696 }
2697 
2698 llvm::Optional<CharUnits>
2699 CodeGenModule::getOMPAllocateAlignment(const VarDecl *VD) {
2700   if (const auto *AA = VD->getAttr<OMPAllocateDeclAttr>()) {
2701     if (Expr *Alignment = AA->getAlignment()) {
2702       unsigned UserAlign =
2703           Alignment->EvaluateKnownConstInt(getContext()).getExtValue();
2704       CharUnits NaturalAlign =
2705           getNaturalTypeAlignment(VD->getType().getNonReferenceType());
2706 
2707       // OpenMP5.1 pg 185 lines 7-10
2708       //   Each item in the align modifier list must be aligned to the maximum
2709       //   of the specified alignment and the type's natural alignment.
2710       return CharUnits::fromQuantity(
2711           std::max<unsigned>(UserAlign, NaturalAlign.getQuantity()));
2712     }
2713   }
2714   return llvm::None;
2715 }
2716