1 //===------- ItaniumCXXABI.cpp - Emit LLVM Code from ASTs for a Module ----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This provides C++ code generation targeting the Itanium C++ ABI.  The class
10 // in this file generates structures that follow the Itanium C++ ABI, which is
11 // documented at:
12 //  https://itanium-cxx-abi.github.io/cxx-abi/abi.html
13 //  https://itanium-cxx-abi.github.io/cxx-abi/abi-eh.html
14 //
15 // It also supports the closely-related ARM ABI, documented at:
16 // https://developer.arm.com/documentation/ihi0041/g/
17 //
18 //===----------------------------------------------------------------------===//
19 
20 #include "CGCXXABI.h"
21 #include "CGCleanup.h"
22 #include "CGRecordLayout.h"
23 #include "CGVTables.h"
24 #include "CodeGenFunction.h"
25 #include "CodeGenModule.h"
26 #include "TargetInfo.h"
27 #include "clang/AST/Attr.h"
28 #include "clang/AST/Mangle.h"
29 #include "clang/AST/StmtCXX.h"
30 #include "clang/AST/Type.h"
31 #include "clang/CodeGen/ConstantInitBuilder.h"
32 #include "llvm/IR/DataLayout.h"
33 #include "llvm/IR/GlobalValue.h"
34 #include "llvm/IR/Instructions.h"
35 #include "llvm/IR/Intrinsics.h"
36 #include "llvm/IR/Value.h"
37 #include "llvm/Support/ScopedPrinter.h"
38 
39 #include <optional>
40 
41 using namespace clang;
42 using namespace CodeGen;
43 
44 namespace {
45 class ItaniumCXXABI : public CodeGen::CGCXXABI {
46   /// VTables - All the vtables which have been defined.
47   llvm::DenseMap<const CXXRecordDecl *, llvm::GlobalVariable *> VTables;
48 
49   /// All the thread wrapper functions that have been used.
50   llvm::SmallVector<std::pair<const VarDecl *, llvm::Function *>, 8>
51       ThreadWrappers;
52 
53 protected:
54   bool UseARMMethodPtrABI;
55   bool UseARMGuardVarABI;
56   bool Use32BitVTableOffsetABI;
57 
58   ItaniumMangleContext &getMangleContext() {
59     return cast<ItaniumMangleContext>(CodeGen::CGCXXABI::getMangleContext());
60   }
61 
62 public:
63   ItaniumCXXABI(CodeGen::CodeGenModule &CGM,
64                 bool UseARMMethodPtrABI = false,
65                 bool UseARMGuardVarABI = false) :
66     CGCXXABI(CGM), UseARMMethodPtrABI(UseARMMethodPtrABI),
67     UseARMGuardVarABI(UseARMGuardVarABI),
68     Use32BitVTableOffsetABI(false) { }
69 
70   bool classifyReturnType(CGFunctionInfo &FI) const override;
71 
72   RecordArgABI getRecordArgABI(const CXXRecordDecl *RD) const override {
73     // If C++ prohibits us from making a copy, pass by address.
74     if (!RD->canPassInRegisters())
75       return RAA_Indirect;
76     return RAA_Default;
77   }
78 
79   bool isThisCompleteObject(GlobalDecl GD) const override {
80     // The Itanium ABI has separate complete-object vs.  base-object
81     // variants of both constructors and destructors.
82     if (isa<CXXDestructorDecl>(GD.getDecl())) {
83       switch (GD.getDtorType()) {
84       case Dtor_Complete:
85       case Dtor_Deleting:
86         return true;
87 
88       case Dtor_Base:
89         return false;
90 
91       case Dtor_Comdat:
92         llvm_unreachable("emitting dtor comdat as function?");
93       }
94       llvm_unreachable("bad dtor kind");
95     }
96     if (isa<CXXConstructorDecl>(GD.getDecl())) {
97       switch (GD.getCtorType()) {
98       case Ctor_Complete:
99         return true;
100 
101       case Ctor_Base:
102         return false;
103 
104       case Ctor_CopyingClosure:
105       case Ctor_DefaultClosure:
106         llvm_unreachable("closure ctors in Itanium ABI?");
107 
108       case Ctor_Comdat:
109         llvm_unreachable("emitting ctor comdat as function?");
110       }
111       llvm_unreachable("bad dtor kind");
112     }
113 
114     // No other kinds.
115     return false;
116   }
117 
118   bool isZeroInitializable(const MemberPointerType *MPT) override;
119 
120   llvm::Type *ConvertMemberPointerType(const MemberPointerType *MPT) override;
121 
122   CGCallee
123     EmitLoadOfMemberFunctionPointer(CodeGenFunction &CGF,
124                                     const Expr *E,
125                                     Address This,
126                                     llvm::Value *&ThisPtrForCall,
127                                     llvm::Value *MemFnPtr,
128                                     const MemberPointerType *MPT) override;
129 
130   llvm::Value *
131     EmitMemberDataPointerAddress(CodeGenFunction &CGF, const Expr *E,
132                                  Address Base,
133                                  llvm::Value *MemPtr,
134                                  const MemberPointerType *MPT) override;
135 
136   llvm::Value *EmitMemberPointerConversion(CodeGenFunction &CGF,
137                                            const CastExpr *E,
138                                            llvm::Value *Src) override;
139   llvm::Constant *EmitMemberPointerConversion(const CastExpr *E,
140                                               llvm::Constant *Src) override;
141 
142   llvm::Constant *EmitNullMemberPointer(const MemberPointerType *MPT) override;
143 
144   llvm::Constant *EmitMemberFunctionPointer(const CXXMethodDecl *MD) override;
145   llvm::Constant *EmitMemberDataPointer(const MemberPointerType *MPT,
146                                         CharUnits offset) override;
147   llvm::Constant *EmitMemberPointer(const APValue &MP, QualType MPT) override;
148   llvm::Constant *BuildMemberPointer(const CXXMethodDecl *MD,
149                                      CharUnits ThisAdjustment);
150 
151   llvm::Value *EmitMemberPointerComparison(CodeGenFunction &CGF,
152                                            llvm::Value *L, llvm::Value *R,
153                                            const MemberPointerType *MPT,
154                                            bool Inequality) override;
155 
156   llvm::Value *EmitMemberPointerIsNotNull(CodeGenFunction &CGF,
157                                          llvm::Value *Addr,
158                                          const MemberPointerType *MPT) override;
159 
160   void emitVirtualObjectDelete(CodeGenFunction &CGF, const CXXDeleteExpr *DE,
161                                Address Ptr, QualType ElementType,
162                                const CXXDestructorDecl *Dtor) override;
163 
164   void emitRethrow(CodeGenFunction &CGF, bool isNoReturn) override;
165   void emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) override;
166 
167   void emitBeginCatch(CodeGenFunction &CGF, const CXXCatchStmt *C) override;
168 
169   llvm::CallInst *
170   emitTerminateForUnexpectedException(CodeGenFunction &CGF,
171                                       llvm::Value *Exn) override;
172 
173   void EmitFundamentalRTTIDescriptors(const CXXRecordDecl *RD);
174   llvm::Constant *getAddrOfRTTIDescriptor(QualType Ty) override;
175   CatchTypeInfo
176   getAddrOfCXXCatchHandlerType(QualType Ty,
177                                QualType CatchHandlerType) override {
178     return CatchTypeInfo{getAddrOfRTTIDescriptor(Ty), 0};
179   }
180 
181   bool shouldTypeidBeNullChecked(bool IsDeref, QualType SrcRecordTy) override;
182   void EmitBadTypeidCall(CodeGenFunction &CGF) override;
183   llvm::Value *EmitTypeid(CodeGenFunction &CGF, QualType SrcRecordTy,
184                           Address ThisPtr,
185                           llvm::Type *StdTypeInfoPtrTy) override;
186 
187   bool shouldDynamicCastCallBeNullChecked(bool SrcIsPtr,
188                                           QualType SrcRecordTy) override;
189 
190   /// Determine whether we know that all instances of type RecordTy will have
191   /// the same vtable pointer values, that is distinct from all other vtable
192   /// pointers. While this is required by the Itanium ABI, it doesn't happen in
193   /// practice in some cases due to language extensions.
194   bool hasUniqueVTablePointer(QualType RecordTy) {
195     const CXXRecordDecl *RD = RecordTy->getAsCXXRecordDecl();
196 
197     // Under -fapple-kext, multiple definitions of the same vtable may be
198     // emitted.
199     if (!CGM.getCodeGenOpts().AssumeUniqueVTables ||
200         getContext().getLangOpts().AppleKext)
201       return false;
202 
203     // If the type_info* would be null, the vtable might be merged with that of
204     // another type.
205     if (!CGM.shouldEmitRTTI())
206       return false;
207 
208     // If there's only one definition of the vtable in the program, it has a
209     // unique address.
210     if (!llvm::GlobalValue::isWeakForLinker(CGM.getVTableLinkage(RD)))
211       return true;
212 
213     // Even if there are multiple definitions of the vtable, they are required
214     // by the ABI to use the same symbol name, so should be merged at load
215     // time. However, if the class has hidden visibility, there can be
216     // different versions of the class in different modules, and the ABI
217     // library might treat them as being the same.
218     if (CGM.GetLLVMVisibility(RD->getVisibility()) !=
219         llvm::GlobalValue::DefaultVisibility)
220       return false;
221 
222     return true;
223   }
224 
225   bool shouldEmitExactDynamicCast(QualType DestRecordTy) override {
226     return hasUniqueVTablePointer(DestRecordTy);
227   }
228 
229   llvm::Value *emitDynamicCastCall(CodeGenFunction &CGF, Address Value,
230                                    QualType SrcRecordTy, QualType DestTy,
231                                    QualType DestRecordTy,
232                                    llvm::BasicBlock *CastEnd) override;
233 
234   llvm::Value *emitExactDynamicCast(CodeGenFunction &CGF, Address ThisAddr,
235                                     QualType SrcRecordTy, QualType DestTy,
236                                     QualType DestRecordTy,
237                                     llvm::BasicBlock *CastSuccess,
238                                     llvm::BasicBlock *CastFail) override;
239 
240   llvm::Value *emitDynamicCastToVoid(CodeGenFunction &CGF, Address Value,
241                                      QualType SrcRecordTy) override;
242 
243   bool EmitBadCastCall(CodeGenFunction &CGF) override;
244 
245   llvm::Value *
246     GetVirtualBaseClassOffset(CodeGenFunction &CGF, Address This,
247                               const CXXRecordDecl *ClassDecl,
248                               const CXXRecordDecl *BaseClassDecl) override;
249 
250   void EmitCXXConstructors(const CXXConstructorDecl *D) override;
251 
252   AddedStructorArgCounts
253   buildStructorSignature(GlobalDecl GD,
254                          SmallVectorImpl<CanQualType> &ArgTys) override;
255 
256   bool useThunkForDtorVariant(const CXXDestructorDecl *Dtor,
257                               CXXDtorType DT) const override {
258     // Itanium does not emit any destructor variant as an inline thunk.
259     // Delegating may occur as an optimization, but all variants are either
260     // emitted with external linkage or as linkonce if they are inline and used.
261     return false;
262   }
263 
264   void EmitCXXDestructors(const CXXDestructorDecl *D) override;
265 
266   void addImplicitStructorParams(CodeGenFunction &CGF, QualType &ResTy,
267                                  FunctionArgList &Params) override;
268 
269   void EmitInstanceFunctionProlog(CodeGenFunction &CGF) override;
270 
271   AddedStructorArgs getImplicitConstructorArgs(CodeGenFunction &CGF,
272                                                const CXXConstructorDecl *D,
273                                                CXXCtorType Type,
274                                                bool ForVirtualBase,
275                                                bool Delegating) override;
276 
277   llvm::Value *getCXXDestructorImplicitParam(CodeGenFunction &CGF,
278                                              const CXXDestructorDecl *DD,
279                                              CXXDtorType Type,
280                                              bool ForVirtualBase,
281                                              bool Delegating) override;
282 
283   void EmitDestructorCall(CodeGenFunction &CGF, const CXXDestructorDecl *DD,
284                           CXXDtorType Type, bool ForVirtualBase,
285                           bool Delegating, Address This,
286                           QualType ThisTy) override;
287 
288   void emitVTableDefinitions(CodeGenVTables &CGVT,
289                              const CXXRecordDecl *RD) override;
290 
291   bool isVirtualOffsetNeededForVTableField(CodeGenFunction &CGF,
292                                            CodeGenFunction::VPtr Vptr) override;
293 
294   bool doStructorsInitializeVPtrs(const CXXRecordDecl *VTableClass) override {
295     return true;
296   }
297 
298   llvm::Constant *
299   getVTableAddressPoint(BaseSubobject Base,
300                         const CXXRecordDecl *VTableClass) override;
301 
302   llvm::Value *getVTableAddressPointInStructor(
303       CodeGenFunction &CGF, const CXXRecordDecl *VTableClass,
304       BaseSubobject Base, const CXXRecordDecl *NearestVBase) override;
305 
306   llvm::Value *getVTableAddressPointInStructorWithVTT(
307       CodeGenFunction &CGF, const CXXRecordDecl *VTableClass,
308       BaseSubobject Base, const CXXRecordDecl *NearestVBase);
309 
310   llvm::Constant *
311   getVTableAddressPointForConstExpr(BaseSubobject Base,
312                                     const CXXRecordDecl *VTableClass) override;
313 
314   llvm::GlobalVariable *getAddrOfVTable(const CXXRecordDecl *RD,
315                                         CharUnits VPtrOffset) override;
316 
317   CGCallee getVirtualFunctionPointer(CodeGenFunction &CGF, GlobalDecl GD,
318                                      Address This, llvm::Type *Ty,
319                                      SourceLocation Loc) override;
320 
321   llvm::Value *EmitVirtualDestructorCall(CodeGenFunction &CGF,
322                                          const CXXDestructorDecl *Dtor,
323                                          CXXDtorType DtorType, Address This,
324                                          DeleteOrMemberCallExpr E) override;
325 
326   void emitVirtualInheritanceTables(const CXXRecordDecl *RD) override;
327 
328   bool canSpeculativelyEmitVTable(const CXXRecordDecl *RD) const override;
329   bool canSpeculativelyEmitVTableAsBaseClass(const CXXRecordDecl *RD) const;
330 
331   void setThunkLinkage(llvm::Function *Thunk, bool ForVTable, GlobalDecl GD,
332                        bool ReturnAdjustment) override {
333     // Allow inlining of thunks by emitting them with available_externally
334     // linkage together with vtables when needed.
335     if (ForVTable && !Thunk->hasLocalLinkage())
336       Thunk->setLinkage(llvm::GlobalValue::AvailableExternallyLinkage);
337     CGM.setGVProperties(Thunk, GD);
338   }
339 
340   bool exportThunk() override { return true; }
341 
342   llvm::Value *performThisAdjustment(CodeGenFunction &CGF, Address This,
343                                      const ThisAdjustment &TA) override;
344 
345   llvm::Value *performReturnAdjustment(CodeGenFunction &CGF, Address Ret,
346                                        const ReturnAdjustment &RA) override;
347 
348   size_t getSrcArgforCopyCtor(const CXXConstructorDecl *,
349                               FunctionArgList &Args) const override {
350     assert(!Args.empty() && "expected the arglist to not be empty!");
351     return Args.size() - 1;
352   }
353 
354   StringRef GetPureVirtualCallName() override { return "__cxa_pure_virtual"; }
355   StringRef GetDeletedVirtualCallName() override
356     { return "__cxa_deleted_virtual"; }
357 
358   CharUnits getArrayCookieSizeImpl(QualType elementType) override;
359   Address InitializeArrayCookie(CodeGenFunction &CGF,
360                                 Address NewPtr,
361                                 llvm::Value *NumElements,
362                                 const CXXNewExpr *expr,
363                                 QualType ElementType) override;
364   llvm::Value *readArrayCookieImpl(CodeGenFunction &CGF,
365                                    Address allocPtr,
366                                    CharUnits cookieSize) override;
367 
368   void EmitGuardedInit(CodeGenFunction &CGF, const VarDecl &D,
369                        llvm::GlobalVariable *DeclPtr,
370                        bool PerformInit) override;
371   void registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
372                           llvm::FunctionCallee dtor,
373                           llvm::Constant *addr) override;
374 
375   llvm::Function *getOrCreateThreadLocalWrapper(const VarDecl *VD,
376                                                 llvm::Value *Val);
377   void EmitThreadLocalInitFuncs(
378       CodeGenModule &CGM,
379       ArrayRef<const VarDecl *> CXXThreadLocals,
380       ArrayRef<llvm::Function *> CXXThreadLocalInits,
381       ArrayRef<const VarDecl *> CXXThreadLocalInitVars) override;
382 
383   bool usesThreadWrapperFunction(const VarDecl *VD) const override {
384     return !isEmittedWithConstantInitializer(VD) ||
385            mayNeedDestruction(VD);
386   }
387   LValue EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF, const VarDecl *VD,
388                                       QualType LValType) override;
389 
390   bool NeedsVTTParameter(GlobalDecl GD) override;
391 
392   /**************************** RTTI Uniqueness ******************************/
393 
394 protected:
395   /// Returns true if the ABI requires RTTI type_info objects to be unique
396   /// across a program.
397   virtual bool shouldRTTIBeUnique() const { return true; }
398 
399 public:
400   /// What sort of unique-RTTI behavior should we use?
401   enum RTTIUniquenessKind {
402     /// We are guaranteeing, or need to guarantee, that the RTTI string
403     /// is unique.
404     RUK_Unique,
405 
406     /// We are not guaranteeing uniqueness for the RTTI string, so we
407     /// can demote to hidden visibility but must use string comparisons.
408     RUK_NonUniqueHidden,
409 
410     /// We are not guaranteeing uniqueness for the RTTI string, so we
411     /// have to use string comparisons, but we also have to emit it with
412     /// non-hidden visibility.
413     RUK_NonUniqueVisible
414   };
415 
416   /// Return the required visibility status for the given type and linkage in
417   /// the current ABI.
418   RTTIUniquenessKind
419   classifyRTTIUniqueness(QualType CanTy,
420                          llvm::GlobalValue::LinkageTypes Linkage) const;
421   friend class ItaniumRTTIBuilder;
422 
423   void emitCXXStructor(GlobalDecl GD) override;
424 
425   std::pair<llvm::Value *, const CXXRecordDecl *>
426   LoadVTablePtr(CodeGenFunction &CGF, Address This,
427                 const CXXRecordDecl *RD) override;
428 
429  private:
430    bool hasAnyUnusedVirtualInlineFunction(const CXXRecordDecl *RD) const {
431      const auto &VtableLayout =
432          CGM.getItaniumVTableContext().getVTableLayout(RD);
433 
434      for (const auto &VtableComponent : VtableLayout.vtable_components()) {
435        // Skip empty slot.
436        if (!VtableComponent.isUsedFunctionPointerKind())
437          continue;
438 
439        const CXXMethodDecl *Method = VtableComponent.getFunctionDecl();
440        if (!Method->getCanonicalDecl()->isInlined())
441          continue;
442 
443        StringRef Name = CGM.getMangledName(VtableComponent.getGlobalDecl());
444        auto *Entry = CGM.GetGlobalValue(Name);
445        // This checks if virtual inline function has already been emitted.
446        // Note that it is possible that this inline function would be emitted
447        // after trying to emit vtable speculatively. Because of this we do
448        // an extra pass after emitting all deferred vtables to find and emit
449        // these vtables opportunistically.
450        if (!Entry || Entry->isDeclaration())
451          return true;
452      }
453      return false;
454   }
455 
456   bool isVTableHidden(const CXXRecordDecl *RD) const {
457     const auto &VtableLayout =
458             CGM.getItaniumVTableContext().getVTableLayout(RD);
459 
460     for (const auto &VtableComponent : VtableLayout.vtable_components()) {
461       if (VtableComponent.isRTTIKind()) {
462         const CXXRecordDecl *RTTIDecl = VtableComponent.getRTTIDecl();
463         if (RTTIDecl->getVisibility() == Visibility::HiddenVisibility)
464           return true;
465       } else if (VtableComponent.isUsedFunctionPointerKind()) {
466         const CXXMethodDecl *Method = VtableComponent.getFunctionDecl();
467         if (Method->getVisibility() == Visibility::HiddenVisibility &&
468             !Method->isDefined())
469           return true;
470       }
471     }
472     return false;
473   }
474 };
475 
476 class ARMCXXABI : public ItaniumCXXABI {
477 public:
478   ARMCXXABI(CodeGen::CodeGenModule &CGM) :
479     ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true,
480                   /*UseARMGuardVarABI=*/true) {}
481 
482   bool constructorsAndDestructorsReturnThis() const override { return true; }
483 
484   void EmitReturnFromThunk(CodeGenFunction &CGF, RValue RV,
485                            QualType ResTy) override;
486 
487   CharUnits getArrayCookieSizeImpl(QualType elementType) override;
488   Address InitializeArrayCookie(CodeGenFunction &CGF,
489                                 Address NewPtr,
490                                 llvm::Value *NumElements,
491                                 const CXXNewExpr *expr,
492                                 QualType ElementType) override;
493   llvm::Value *readArrayCookieImpl(CodeGenFunction &CGF, Address allocPtr,
494                                    CharUnits cookieSize) override;
495 };
496 
497 class AppleARM64CXXABI : public ARMCXXABI {
498 public:
499   AppleARM64CXXABI(CodeGen::CodeGenModule &CGM) : ARMCXXABI(CGM) {
500     Use32BitVTableOffsetABI = true;
501   }
502 
503   // ARM64 libraries are prepared for non-unique RTTI.
504   bool shouldRTTIBeUnique() const override { return false; }
505 };
506 
507 class FuchsiaCXXABI final : public ItaniumCXXABI {
508 public:
509   explicit FuchsiaCXXABI(CodeGen::CodeGenModule &CGM)
510       : ItaniumCXXABI(CGM) {}
511 
512 private:
513   bool constructorsAndDestructorsReturnThis() const override { return true; }
514 };
515 
516 class WebAssemblyCXXABI final : public ItaniumCXXABI {
517 public:
518   explicit WebAssemblyCXXABI(CodeGen::CodeGenModule &CGM)
519       : ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true,
520                       /*UseARMGuardVarABI=*/true) {}
521   void emitBeginCatch(CodeGenFunction &CGF, const CXXCatchStmt *C) override;
522   llvm::CallInst *
523   emitTerminateForUnexpectedException(CodeGenFunction &CGF,
524                                       llvm::Value *Exn) override;
525 
526 private:
527   bool constructorsAndDestructorsReturnThis() const override { return true; }
528   bool canCallMismatchedFunctionType() const override { return false; }
529 };
530 
531 class XLCXXABI final : public ItaniumCXXABI {
532 public:
533   explicit XLCXXABI(CodeGen::CodeGenModule &CGM)
534       : ItaniumCXXABI(CGM) {}
535 
536   void registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
537                           llvm::FunctionCallee dtor,
538                           llvm::Constant *addr) override;
539 
540   bool useSinitAndSterm() const override { return true; }
541 
542 private:
543   void emitCXXStermFinalizer(const VarDecl &D, llvm::Function *dtorStub,
544                              llvm::Constant *addr);
545 };
546 }
547 
548 CodeGen::CGCXXABI *CodeGen::CreateItaniumCXXABI(CodeGenModule &CGM) {
549   switch (CGM.getContext().getCXXABIKind()) {
550   // For IR-generation purposes, there's no significant difference
551   // between the ARM and iOS ABIs.
552   case TargetCXXABI::GenericARM:
553   case TargetCXXABI::iOS:
554   case TargetCXXABI::WatchOS:
555     return new ARMCXXABI(CGM);
556 
557   case TargetCXXABI::AppleARM64:
558     return new AppleARM64CXXABI(CGM);
559 
560   case TargetCXXABI::Fuchsia:
561     return new FuchsiaCXXABI(CGM);
562 
563   // Note that AArch64 uses the generic ItaniumCXXABI class since it doesn't
564   // include the other 32-bit ARM oddities: constructor/destructor return values
565   // and array cookies.
566   case TargetCXXABI::GenericAArch64:
567     return new ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true,
568                              /*UseARMGuardVarABI=*/true);
569 
570   case TargetCXXABI::GenericMIPS:
571     return new ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true);
572 
573   case TargetCXXABI::WebAssembly:
574     return new WebAssemblyCXXABI(CGM);
575 
576   case TargetCXXABI::XL:
577     return new XLCXXABI(CGM);
578 
579   case TargetCXXABI::GenericItanium:
580     if (CGM.getContext().getTargetInfo().getTriple().getArch()
581         == llvm::Triple::le32) {
582       // For PNaCl, use ARM-style method pointers so that PNaCl code
583       // does not assume anything about the alignment of function
584       // pointers.
585       return new ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true);
586     }
587     return new ItaniumCXXABI(CGM);
588 
589   case TargetCXXABI::Microsoft:
590     llvm_unreachable("Microsoft ABI is not Itanium-based");
591   }
592   llvm_unreachable("bad ABI kind");
593 }
594 
595 llvm::Type *
596 ItaniumCXXABI::ConvertMemberPointerType(const MemberPointerType *MPT) {
597   if (MPT->isMemberDataPointer())
598     return CGM.PtrDiffTy;
599   return llvm::StructType::get(CGM.PtrDiffTy, CGM.PtrDiffTy);
600 }
601 
602 /// In the Itanium and ARM ABIs, method pointers have the form:
603 ///   struct { ptrdiff_t ptr; ptrdiff_t adj; } memptr;
604 ///
605 /// In the Itanium ABI:
606 ///  - method pointers are virtual if (memptr.ptr & 1) is nonzero
607 ///  - the this-adjustment is (memptr.adj)
608 ///  - the virtual offset is (memptr.ptr - 1)
609 ///
610 /// In the ARM ABI:
611 ///  - method pointers are virtual if (memptr.adj & 1) is nonzero
612 ///  - the this-adjustment is (memptr.adj >> 1)
613 ///  - the virtual offset is (memptr.ptr)
614 /// ARM uses 'adj' for the virtual flag because Thumb functions
615 /// may be only single-byte aligned.
616 ///
617 /// If the member is virtual, the adjusted 'this' pointer points
618 /// to a vtable pointer from which the virtual offset is applied.
619 ///
620 /// If the member is non-virtual, memptr.ptr is the address of
621 /// the function to call.
622 CGCallee ItaniumCXXABI::EmitLoadOfMemberFunctionPointer(
623     CodeGenFunction &CGF, const Expr *E, Address ThisAddr,
624     llvm::Value *&ThisPtrForCall,
625     llvm::Value *MemFnPtr, const MemberPointerType *MPT) {
626   CGBuilderTy &Builder = CGF.Builder;
627 
628   const FunctionProtoType *FPT =
629       MPT->getPointeeType()->castAs<FunctionProtoType>();
630   auto *RD =
631       cast<CXXRecordDecl>(MPT->getClass()->castAs<RecordType>()->getDecl());
632 
633   llvm::Constant *ptrdiff_1 = llvm::ConstantInt::get(CGM.PtrDiffTy, 1);
634 
635   llvm::BasicBlock *FnVirtual = CGF.createBasicBlock("memptr.virtual");
636   llvm::BasicBlock *FnNonVirtual = CGF.createBasicBlock("memptr.nonvirtual");
637   llvm::BasicBlock *FnEnd = CGF.createBasicBlock("memptr.end");
638 
639   // Extract memptr.adj, which is in the second field.
640   llvm::Value *RawAdj = Builder.CreateExtractValue(MemFnPtr, 1, "memptr.adj");
641 
642   // Compute the true adjustment.
643   llvm::Value *Adj = RawAdj;
644   if (UseARMMethodPtrABI)
645     Adj = Builder.CreateAShr(Adj, ptrdiff_1, "memptr.adj.shifted");
646 
647   // Apply the adjustment and cast back to the original struct type
648   // for consistency.
649   llvm::Value *This = ThisAddr.getPointer();
650   llvm::Value *Ptr = Builder.CreateBitCast(This, Builder.getInt8PtrTy());
651   Ptr = Builder.CreateInBoundsGEP(Builder.getInt8Ty(), Ptr, Adj);
652   This = Builder.CreateBitCast(Ptr, This->getType(), "this.adjusted");
653   ThisPtrForCall = This;
654 
655   // Load the function pointer.
656   llvm::Value *FnAsInt = Builder.CreateExtractValue(MemFnPtr, 0, "memptr.ptr");
657 
658   // If the LSB in the function pointer is 1, the function pointer points to
659   // a virtual function.
660   llvm::Value *IsVirtual;
661   if (UseARMMethodPtrABI)
662     IsVirtual = Builder.CreateAnd(RawAdj, ptrdiff_1);
663   else
664     IsVirtual = Builder.CreateAnd(FnAsInt, ptrdiff_1);
665   IsVirtual = Builder.CreateIsNotNull(IsVirtual, "memptr.isvirtual");
666   Builder.CreateCondBr(IsVirtual, FnVirtual, FnNonVirtual);
667 
668   // In the virtual path, the adjustment left 'This' pointing to the
669   // vtable of the correct base subobject.  The "function pointer" is an
670   // offset within the vtable (+1 for the virtual flag on non-ARM).
671   CGF.EmitBlock(FnVirtual);
672 
673   // Cast the adjusted this to a pointer to vtable pointer and load.
674   llvm::Type *VTableTy = CGF.CGM.GlobalsInt8PtrTy;
675   CharUnits VTablePtrAlign =
676     CGF.CGM.getDynamicOffsetAlignment(ThisAddr.getAlignment(), RD,
677                                       CGF.getPointerAlign());
678   llvm::Value *VTable = CGF.GetVTablePtr(
679       Address(This, ThisAddr.getElementType(), VTablePtrAlign), VTableTy, RD);
680 
681   // Apply the offset.
682   // On ARM64, to reserve extra space in virtual member function pointers,
683   // we only pay attention to the low 32 bits of the offset.
684   llvm::Value *VTableOffset = FnAsInt;
685   if (!UseARMMethodPtrABI)
686     VTableOffset = Builder.CreateSub(VTableOffset, ptrdiff_1);
687   if (Use32BitVTableOffsetABI) {
688     VTableOffset = Builder.CreateTrunc(VTableOffset, CGF.Int32Ty);
689     VTableOffset = Builder.CreateZExt(VTableOffset, CGM.PtrDiffTy);
690   }
691 
692   // Check the address of the function pointer if CFI on member function
693   // pointers is enabled.
694   llvm::Constant *CheckSourceLocation;
695   llvm::Constant *CheckTypeDesc;
696   bool ShouldEmitCFICheck = CGF.SanOpts.has(SanitizerKind::CFIMFCall) &&
697                             CGM.HasHiddenLTOVisibility(RD);
698   bool ShouldEmitVFEInfo = CGM.getCodeGenOpts().VirtualFunctionElimination &&
699                            CGM.HasHiddenLTOVisibility(RD);
700   bool ShouldEmitWPDInfo =
701       CGM.getCodeGenOpts().WholeProgramVTables &&
702       // Don't insert type tests if we are forcing public visibility.
703       !CGM.AlwaysHasLTOVisibilityPublic(RD);
704   llvm::Value *VirtualFn = nullptr;
705 
706   {
707     CodeGenFunction::SanitizerScope SanScope(&CGF);
708     llvm::Value *TypeId = nullptr;
709     llvm::Value *CheckResult = nullptr;
710 
711     if (ShouldEmitCFICheck || ShouldEmitVFEInfo || ShouldEmitWPDInfo) {
712       // If doing CFI, VFE or WPD, we will need the metadata node to check
713       // against.
714       llvm::Metadata *MD =
715           CGM.CreateMetadataIdentifierForVirtualMemPtrType(QualType(MPT, 0));
716       TypeId = llvm::MetadataAsValue::get(CGF.getLLVMContext(), MD);
717     }
718 
719     if (ShouldEmitVFEInfo) {
720       llvm::Value *VFPAddr =
721           Builder.CreateGEP(CGF.Int8Ty, VTable, VTableOffset);
722 
723       // If doing VFE, load from the vtable with a type.checked.load intrinsic
724       // call. Note that we use the GEP to calculate the address to load from
725       // and pass 0 as the offset to the intrinsic. This is because every
726       // vtable slot of the correct type is marked with matching metadata, and
727       // we know that the load must be from one of these slots.
728       llvm::Value *CheckedLoad = Builder.CreateCall(
729           CGM.getIntrinsic(llvm::Intrinsic::type_checked_load),
730           {VFPAddr, llvm::ConstantInt::get(CGM.Int32Ty, 0), TypeId});
731       CheckResult = Builder.CreateExtractValue(CheckedLoad, 1);
732       VirtualFn = Builder.CreateExtractValue(CheckedLoad, 0);
733     } else {
734       // When not doing VFE, emit a normal load, as it allows more
735       // optimisations than type.checked.load.
736       if (ShouldEmitCFICheck || ShouldEmitWPDInfo) {
737         llvm::Value *VFPAddr =
738             Builder.CreateGEP(CGF.Int8Ty, VTable, VTableOffset);
739         llvm::Intrinsic::ID IID = CGM.HasHiddenLTOVisibility(RD)
740                                       ? llvm::Intrinsic::type_test
741                                       : llvm::Intrinsic::public_type_test;
742 
743         CheckResult = Builder.CreateCall(
744             CGM.getIntrinsic(IID),
745             {Builder.CreateBitCast(VFPAddr, CGF.Int8PtrTy), TypeId});
746       }
747 
748       if (CGM.getItaniumVTableContext().isRelativeLayout()) {
749         VirtualFn = CGF.Builder.CreateCall(
750             CGM.getIntrinsic(llvm::Intrinsic::load_relative,
751                              {VTableOffset->getType()}),
752             {VTable, VTableOffset});
753       } else {
754         llvm::Value *VFPAddr =
755             CGF.Builder.CreateGEP(CGF.Int8Ty, VTable, VTableOffset);
756         VirtualFn = CGF.Builder.CreateAlignedLoad(
757             llvm::PointerType::getUnqual(CGF.getLLVMContext()), VFPAddr,
758             CGF.getPointerAlign(), "memptr.virtualfn");
759       }
760     }
761     assert(VirtualFn && "Virtual fuction pointer not created!");
762     assert((!ShouldEmitCFICheck || !ShouldEmitVFEInfo || !ShouldEmitWPDInfo ||
763             CheckResult) &&
764            "Check result required but not created!");
765 
766     if (ShouldEmitCFICheck) {
767       // If doing CFI, emit the check.
768       CheckSourceLocation = CGF.EmitCheckSourceLocation(E->getBeginLoc());
769       CheckTypeDesc = CGF.EmitCheckTypeDescriptor(QualType(MPT, 0));
770       llvm::Constant *StaticData[] = {
771           llvm::ConstantInt::get(CGF.Int8Ty, CodeGenFunction::CFITCK_VMFCall),
772           CheckSourceLocation,
773           CheckTypeDesc,
774       };
775 
776       if (CGM.getCodeGenOpts().SanitizeTrap.has(SanitizerKind::CFIMFCall)) {
777         CGF.EmitTrapCheck(CheckResult, SanitizerHandler::CFICheckFail);
778       } else {
779         llvm::Value *AllVtables = llvm::MetadataAsValue::get(
780             CGM.getLLVMContext(),
781             llvm::MDString::get(CGM.getLLVMContext(), "all-vtables"));
782         llvm::Value *ValidVtable = Builder.CreateCall(
783             CGM.getIntrinsic(llvm::Intrinsic::type_test), {VTable, AllVtables});
784         CGF.EmitCheck(std::make_pair(CheckResult, SanitizerKind::CFIMFCall),
785                       SanitizerHandler::CFICheckFail, StaticData,
786                       {VTable, ValidVtable});
787       }
788 
789       FnVirtual = Builder.GetInsertBlock();
790     }
791   } // End of sanitizer scope
792 
793   CGF.EmitBranch(FnEnd);
794 
795   // In the non-virtual path, the function pointer is actually a
796   // function pointer.
797   CGF.EmitBlock(FnNonVirtual);
798   llvm::Value *NonVirtualFn = Builder.CreateIntToPtr(
799       FnAsInt, llvm::PointerType::getUnqual(CGF.getLLVMContext()),
800       "memptr.nonvirtualfn");
801 
802   // Check the function pointer if CFI on member function pointers is enabled.
803   if (ShouldEmitCFICheck) {
804     CXXRecordDecl *RD = MPT->getClass()->getAsCXXRecordDecl();
805     if (RD->hasDefinition()) {
806       CodeGenFunction::SanitizerScope SanScope(&CGF);
807 
808       llvm::Constant *StaticData[] = {
809           llvm::ConstantInt::get(CGF.Int8Ty, CodeGenFunction::CFITCK_NVMFCall),
810           CheckSourceLocation,
811           CheckTypeDesc,
812       };
813 
814       llvm::Value *Bit = Builder.getFalse();
815       llvm::Value *CastedNonVirtualFn =
816           Builder.CreateBitCast(NonVirtualFn, CGF.Int8PtrTy);
817       for (const CXXRecordDecl *Base : CGM.getMostBaseClasses(RD)) {
818         llvm::Metadata *MD = CGM.CreateMetadataIdentifierForType(
819             getContext().getMemberPointerType(
820                 MPT->getPointeeType(),
821                 getContext().getRecordType(Base).getTypePtr()));
822         llvm::Value *TypeId =
823             llvm::MetadataAsValue::get(CGF.getLLVMContext(), MD);
824 
825         llvm::Value *TypeTest =
826             Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::type_test),
827                                {CastedNonVirtualFn, TypeId});
828         Bit = Builder.CreateOr(Bit, TypeTest);
829       }
830 
831       CGF.EmitCheck(std::make_pair(Bit, SanitizerKind::CFIMFCall),
832                     SanitizerHandler::CFICheckFail, StaticData,
833                     {CastedNonVirtualFn, llvm::UndefValue::get(CGF.IntPtrTy)});
834 
835       FnNonVirtual = Builder.GetInsertBlock();
836     }
837   }
838 
839   // We're done.
840   CGF.EmitBlock(FnEnd);
841   llvm::PHINode *CalleePtr =
842       Builder.CreatePHI(llvm::PointerType::getUnqual(CGF.getLLVMContext()), 2);
843   CalleePtr->addIncoming(VirtualFn, FnVirtual);
844   CalleePtr->addIncoming(NonVirtualFn, FnNonVirtual);
845 
846   CGCallee Callee(FPT, CalleePtr);
847   return Callee;
848 }
849 
850 /// Compute an l-value by applying the given pointer-to-member to a
851 /// base object.
852 llvm::Value *ItaniumCXXABI::EmitMemberDataPointerAddress(
853     CodeGenFunction &CGF, const Expr *E, Address Base, llvm::Value *MemPtr,
854     const MemberPointerType *MPT) {
855   assert(MemPtr->getType() == CGM.PtrDiffTy);
856 
857   CGBuilderTy &Builder = CGF.Builder;
858 
859   // Apply the offset, which we assume is non-null.
860   return Builder.CreateInBoundsGEP(CGF.Int8Ty, Base.getPointer(), MemPtr,
861                                    "memptr.offset");
862 }
863 
864 /// Perform a bitcast, derived-to-base, or base-to-derived member pointer
865 /// conversion.
866 ///
867 /// Bitcast conversions are always a no-op under Itanium.
868 ///
869 /// Obligatory offset/adjustment diagram:
870 ///         <-- offset -->          <-- adjustment -->
871 ///   |--------------------------|----------------------|--------------------|
872 ///   ^Derived address point     ^Base address point    ^Member address point
873 ///
874 /// So when converting a base member pointer to a derived member pointer,
875 /// we add the offset to the adjustment because the address point has
876 /// decreased;  and conversely, when converting a derived MP to a base MP
877 /// we subtract the offset from the adjustment because the address point
878 /// has increased.
879 ///
880 /// The standard forbids (at compile time) conversion to and from
881 /// virtual bases, which is why we don't have to consider them here.
882 ///
883 /// The standard forbids (at run time) casting a derived MP to a base
884 /// MP when the derived MP does not point to a member of the base.
885 /// This is why -1 is a reasonable choice for null data member
886 /// pointers.
887 llvm::Value *
888 ItaniumCXXABI::EmitMemberPointerConversion(CodeGenFunction &CGF,
889                                            const CastExpr *E,
890                                            llvm::Value *src) {
891   assert(E->getCastKind() == CK_DerivedToBaseMemberPointer ||
892          E->getCastKind() == CK_BaseToDerivedMemberPointer ||
893          E->getCastKind() == CK_ReinterpretMemberPointer);
894 
895   // Under Itanium, reinterprets don't require any additional processing.
896   if (E->getCastKind() == CK_ReinterpretMemberPointer) return src;
897 
898   // Use constant emission if we can.
899   if (isa<llvm::Constant>(src))
900     return EmitMemberPointerConversion(E, cast<llvm::Constant>(src));
901 
902   llvm::Constant *adj = getMemberPointerAdjustment(E);
903   if (!adj) return src;
904 
905   CGBuilderTy &Builder = CGF.Builder;
906   bool isDerivedToBase = (E->getCastKind() == CK_DerivedToBaseMemberPointer);
907 
908   const MemberPointerType *destTy =
909     E->getType()->castAs<MemberPointerType>();
910 
911   // For member data pointers, this is just a matter of adding the
912   // offset if the source is non-null.
913   if (destTy->isMemberDataPointer()) {
914     llvm::Value *dst;
915     if (isDerivedToBase)
916       dst = Builder.CreateNSWSub(src, adj, "adj");
917     else
918       dst = Builder.CreateNSWAdd(src, adj, "adj");
919 
920     // Null check.
921     llvm::Value *null = llvm::Constant::getAllOnesValue(src->getType());
922     llvm::Value *isNull = Builder.CreateICmpEQ(src, null, "memptr.isnull");
923     return Builder.CreateSelect(isNull, src, dst);
924   }
925 
926   // The this-adjustment is left-shifted by 1 on ARM.
927   if (UseARMMethodPtrABI) {
928     uint64_t offset = cast<llvm::ConstantInt>(adj)->getZExtValue();
929     offset <<= 1;
930     adj = llvm::ConstantInt::get(adj->getType(), offset);
931   }
932 
933   llvm::Value *srcAdj = Builder.CreateExtractValue(src, 1, "src.adj");
934   llvm::Value *dstAdj;
935   if (isDerivedToBase)
936     dstAdj = Builder.CreateNSWSub(srcAdj, adj, "adj");
937   else
938     dstAdj = Builder.CreateNSWAdd(srcAdj, adj, "adj");
939 
940   return Builder.CreateInsertValue(src, dstAdj, 1);
941 }
942 
943 llvm::Constant *
944 ItaniumCXXABI::EmitMemberPointerConversion(const CastExpr *E,
945                                            llvm::Constant *src) {
946   assert(E->getCastKind() == CK_DerivedToBaseMemberPointer ||
947          E->getCastKind() == CK_BaseToDerivedMemberPointer ||
948          E->getCastKind() == CK_ReinterpretMemberPointer);
949 
950   // Under Itanium, reinterprets don't require any additional processing.
951   if (E->getCastKind() == CK_ReinterpretMemberPointer) return src;
952 
953   // If the adjustment is trivial, we don't need to do anything.
954   llvm::Constant *adj = getMemberPointerAdjustment(E);
955   if (!adj) return src;
956 
957   bool isDerivedToBase = (E->getCastKind() == CK_DerivedToBaseMemberPointer);
958 
959   const MemberPointerType *destTy =
960     E->getType()->castAs<MemberPointerType>();
961 
962   // For member data pointers, this is just a matter of adding the
963   // offset if the source is non-null.
964   if (destTy->isMemberDataPointer()) {
965     // null maps to null.
966     if (src->isAllOnesValue()) return src;
967 
968     if (isDerivedToBase)
969       return llvm::ConstantExpr::getNSWSub(src, adj);
970     else
971       return llvm::ConstantExpr::getNSWAdd(src, adj);
972   }
973 
974   // The this-adjustment is left-shifted by 1 on ARM.
975   if (UseARMMethodPtrABI) {
976     uint64_t offset = cast<llvm::ConstantInt>(adj)->getZExtValue();
977     offset <<= 1;
978     adj = llvm::ConstantInt::get(adj->getType(), offset);
979   }
980 
981   llvm::Constant *srcAdj = src->getAggregateElement(1);
982   llvm::Constant *dstAdj;
983   if (isDerivedToBase)
984     dstAdj = llvm::ConstantExpr::getNSWSub(srcAdj, adj);
985   else
986     dstAdj = llvm::ConstantExpr::getNSWAdd(srcAdj, adj);
987 
988   llvm::Constant *res = ConstantFoldInsertValueInstruction(src, dstAdj, 1);
989   assert(res != nullptr && "Folding must succeed");
990   return res;
991 }
992 
993 llvm::Constant *
994 ItaniumCXXABI::EmitNullMemberPointer(const MemberPointerType *MPT) {
995   // Itanium C++ ABI 2.3:
996   //   A NULL pointer is represented as -1.
997   if (MPT->isMemberDataPointer())
998     return llvm::ConstantInt::get(CGM.PtrDiffTy, -1ULL, /*isSigned=*/true);
999 
1000   llvm::Constant *Zero = llvm::ConstantInt::get(CGM.PtrDiffTy, 0);
1001   llvm::Constant *Values[2] = { Zero, Zero };
1002   return llvm::ConstantStruct::getAnon(Values);
1003 }
1004 
1005 llvm::Constant *
1006 ItaniumCXXABI::EmitMemberDataPointer(const MemberPointerType *MPT,
1007                                      CharUnits offset) {
1008   // Itanium C++ ABI 2.3:
1009   //   A pointer to data member is an offset from the base address of
1010   //   the class object containing it, represented as a ptrdiff_t
1011   return llvm::ConstantInt::get(CGM.PtrDiffTy, offset.getQuantity());
1012 }
1013 
1014 llvm::Constant *
1015 ItaniumCXXABI::EmitMemberFunctionPointer(const CXXMethodDecl *MD) {
1016   return BuildMemberPointer(MD, CharUnits::Zero());
1017 }
1018 
1019 llvm::Constant *ItaniumCXXABI::BuildMemberPointer(const CXXMethodDecl *MD,
1020                                                   CharUnits ThisAdjustment) {
1021   assert(MD->isInstance() && "Member function must not be static!");
1022 
1023   CodeGenTypes &Types = CGM.getTypes();
1024 
1025   // Get the function pointer (or index if this is a virtual function).
1026   llvm::Constant *MemPtr[2];
1027   if (MD->isVirtual()) {
1028     uint64_t Index = CGM.getItaniumVTableContext().getMethodVTableIndex(MD);
1029     uint64_t VTableOffset;
1030     if (CGM.getItaniumVTableContext().isRelativeLayout()) {
1031       // Multiply by 4-byte relative offsets.
1032       VTableOffset = Index * 4;
1033     } else {
1034       const ASTContext &Context = getContext();
1035       CharUnits PointerWidth = Context.toCharUnitsFromBits(
1036           Context.getTargetInfo().getPointerWidth(LangAS::Default));
1037       VTableOffset = Index * PointerWidth.getQuantity();
1038     }
1039 
1040     if (UseARMMethodPtrABI) {
1041       // ARM C++ ABI 3.2.1:
1042       //   This ABI specifies that adj contains twice the this
1043       //   adjustment, plus 1 if the member function is virtual. The
1044       //   least significant bit of adj then makes exactly the same
1045       //   discrimination as the least significant bit of ptr does for
1046       //   Itanium.
1047       MemPtr[0] = llvm::ConstantInt::get(CGM.PtrDiffTy, VTableOffset);
1048       MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy,
1049                                          2 * ThisAdjustment.getQuantity() + 1);
1050     } else {
1051       // Itanium C++ ABI 2.3:
1052       //   For a virtual function, [the pointer field] is 1 plus the
1053       //   virtual table offset (in bytes) of the function,
1054       //   represented as a ptrdiff_t.
1055       MemPtr[0] = llvm::ConstantInt::get(CGM.PtrDiffTy, VTableOffset + 1);
1056       MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy,
1057                                          ThisAdjustment.getQuantity());
1058     }
1059   } else {
1060     const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
1061     llvm::Type *Ty;
1062     // Check whether the function has a computable LLVM signature.
1063     if (Types.isFuncTypeConvertible(FPT)) {
1064       // The function has a computable LLVM signature; use the correct type.
1065       Ty = Types.GetFunctionType(Types.arrangeCXXMethodDeclaration(MD));
1066     } else {
1067       // Use an arbitrary non-function type to tell GetAddrOfFunction that the
1068       // function type is incomplete.
1069       Ty = CGM.PtrDiffTy;
1070     }
1071     llvm::Constant *addr = CGM.GetAddrOfFunction(MD, Ty);
1072 
1073     MemPtr[0] = llvm::ConstantExpr::getPtrToInt(addr, CGM.PtrDiffTy);
1074     MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy,
1075                                        (UseARMMethodPtrABI ? 2 : 1) *
1076                                        ThisAdjustment.getQuantity());
1077   }
1078 
1079   return llvm::ConstantStruct::getAnon(MemPtr);
1080 }
1081 
1082 llvm::Constant *ItaniumCXXABI::EmitMemberPointer(const APValue &MP,
1083                                                  QualType MPType) {
1084   const MemberPointerType *MPT = MPType->castAs<MemberPointerType>();
1085   const ValueDecl *MPD = MP.getMemberPointerDecl();
1086   if (!MPD)
1087     return EmitNullMemberPointer(MPT);
1088 
1089   CharUnits ThisAdjustment = getContext().getMemberPointerPathAdjustment(MP);
1090 
1091   if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(MPD))
1092     return BuildMemberPointer(MD, ThisAdjustment);
1093 
1094   CharUnits FieldOffset =
1095     getContext().toCharUnitsFromBits(getContext().getFieldOffset(MPD));
1096   return EmitMemberDataPointer(MPT, ThisAdjustment + FieldOffset);
1097 }
1098 
1099 /// The comparison algorithm is pretty easy: the member pointers are
1100 /// the same if they're either bitwise identical *or* both null.
1101 ///
1102 /// ARM is different here only because null-ness is more complicated.
1103 llvm::Value *
1104 ItaniumCXXABI::EmitMemberPointerComparison(CodeGenFunction &CGF,
1105                                            llvm::Value *L,
1106                                            llvm::Value *R,
1107                                            const MemberPointerType *MPT,
1108                                            bool Inequality) {
1109   CGBuilderTy &Builder = CGF.Builder;
1110 
1111   llvm::ICmpInst::Predicate Eq;
1112   llvm::Instruction::BinaryOps And, Or;
1113   if (Inequality) {
1114     Eq = llvm::ICmpInst::ICMP_NE;
1115     And = llvm::Instruction::Or;
1116     Or = llvm::Instruction::And;
1117   } else {
1118     Eq = llvm::ICmpInst::ICMP_EQ;
1119     And = llvm::Instruction::And;
1120     Or = llvm::Instruction::Or;
1121   }
1122 
1123   // Member data pointers are easy because there's a unique null
1124   // value, so it just comes down to bitwise equality.
1125   if (MPT->isMemberDataPointer())
1126     return Builder.CreateICmp(Eq, L, R);
1127 
1128   // For member function pointers, the tautologies are more complex.
1129   // The Itanium tautology is:
1130   //   (L == R) <==> (L.ptr == R.ptr && (L.ptr == 0 || L.adj == R.adj))
1131   // The ARM tautology is:
1132   //   (L == R) <==> (L.ptr == R.ptr &&
1133   //                  (L.adj == R.adj ||
1134   //                   (L.ptr == 0 && ((L.adj|R.adj) & 1) == 0)))
1135   // The inequality tautologies have exactly the same structure, except
1136   // applying De Morgan's laws.
1137 
1138   llvm::Value *LPtr = Builder.CreateExtractValue(L, 0, "lhs.memptr.ptr");
1139   llvm::Value *RPtr = Builder.CreateExtractValue(R, 0, "rhs.memptr.ptr");
1140 
1141   // This condition tests whether L.ptr == R.ptr.  This must always be
1142   // true for equality to hold.
1143   llvm::Value *PtrEq = Builder.CreateICmp(Eq, LPtr, RPtr, "cmp.ptr");
1144 
1145   // This condition, together with the assumption that L.ptr == R.ptr,
1146   // tests whether the pointers are both null.  ARM imposes an extra
1147   // condition.
1148   llvm::Value *Zero = llvm::Constant::getNullValue(LPtr->getType());
1149   llvm::Value *EqZero = Builder.CreateICmp(Eq, LPtr, Zero, "cmp.ptr.null");
1150 
1151   // This condition tests whether L.adj == R.adj.  If this isn't
1152   // true, the pointers are unequal unless they're both null.
1153   llvm::Value *LAdj = Builder.CreateExtractValue(L, 1, "lhs.memptr.adj");
1154   llvm::Value *RAdj = Builder.CreateExtractValue(R, 1, "rhs.memptr.adj");
1155   llvm::Value *AdjEq = Builder.CreateICmp(Eq, LAdj, RAdj, "cmp.adj");
1156 
1157   // Null member function pointers on ARM clear the low bit of Adj,
1158   // so the zero condition has to check that neither low bit is set.
1159   if (UseARMMethodPtrABI) {
1160     llvm::Value *One = llvm::ConstantInt::get(LPtr->getType(), 1);
1161 
1162     // Compute (l.adj | r.adj) & 1 and test it against zero.
1163     llvm::Value *OrAdj = Builder.CreateOr(LAdj, RAdj, "or.adj");
1164     llvm::Value *OrAdjAnd1 = Builder.CreateAnd(OrAdj, One);
1165     llvm::Value *OrAdjAnd1EqZero = Builder.CreateICmp(Eq, OrAdjAnd1, Zero,
1166                                                       "cmp.or.adj");
1167     EqZero = Builder.CreateBinOp(And, EqZero, OrAdjAnd1EqZero);
1168   }
1169 
1170   // Tie together all our conditions.
1171   llvm::Value *Result = Builder.CreateBinOp(Or, EqZero, AdjEq);
1172   Result = Builder.CreateBinOp(And, PtrEq, Result,
1173                                Inequality ? "memptr.ne" : "memptr.eq");
1174   return Result;
1175 }
1176 
1177 llvm::Value *
1178 ItaniumCXXABI::EmitMemberPointerIsNotNull(CodeGenFunction &CGF,
1179                                           llvm::Value *MemPtr,
1180                                           const MemberPointerType *MPT) {
1181   CGBuilderTy &Builder = CGF.Builder;
1182 
1183   /// For member data pointers, this is just a check against -1.
1184   if (MPT->isMemberDataPointer()) {
1185     assert(MemPtr->getType() == CGM.PtrDiffTy);
1186     llvm::Value *NegativeOne =
1187       llvm::Constant::getAllOnesValue(MemPtr->getType());
1188     return Builder.CreateICmpNE(MemPtr, NegativeOne, "memptr.tobool");
1189   }
1190 
1191   // In Itanium, a member function pointer is not null if 'ptr' is not null.
1192   llvm::Value *Ptr = Builder.CreateExtractValue(MemPtr, 0, "memptr.ptr");
1193 
1194   llvm::Constant *Zero = llvm::ConstantInt::get(Ptr->getType(), 0);
1195   llvm::Value *Result = Builder.CreateICmpNE(Ptr, Zero, "memptr.tobool");
1196 
1197   // On ARM, a member function pointer is also non-null if the low bit of 'adj'
1198   // (the virtual bit) is set.
1199   if (UseARMMethodPtrABI) {
1200     llvm::Constant *One = llvm::ConstantInt::get(Ptr->getType(), 1);
1201     llvm::Value *Adj = Builder.CreateExtractValue(MemPtr, 1, "memptr.adj");
1202     llvm::Value *VirtualBit = Builder.CreateAnd(Adj, One, "memptr.virtualbit");
1203     llvm::Value *IsVirtual = Builder.CreateICmpNE(VirtualBit, Zero,
1204                                                   "memptr.isvirtual");
1205     Result = Builder.CreateOr(Result, IsVirtual);
1206   }
1207 
1208   return Result;
1209 }
1210 
1211 bool ItaniumCXXABI::classifyReturnType(CGFunctionInfo &FI) const {
1212   const CXXRecordDecl *RD = FI.getReturnType()->getAsCXXRecordDecl();
1213   if (!RD)
1214     return false;
1215 
1216   // If C++ prohibits us from making a copy, return by address.
1217   if (!RD->canPassInRegisters()) {
1218     auto Align = CGM.getContext().getTypeAlignInChars(FI.getReturnType());
1219     FI.getReturnInfo() = ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
1220     return true;
1221   }
1222   return false;
1223 }
1224 
1225 /// The Itanium ABI requires non-zero initialization only for data
1226 /// member pointers, for which '0' is a valid offset.
1227 bool ItaniumCXXABI::isZeroInitializable(const MemberPointerType *MPT) {
1228   return MPT->isMemberFunctionPointer();
1229 }
1230 
1231 /// The Itanium ABI always places an offset to the complete object
1232 /// at entry -2 in the vtable.
1233 void ItaniumCXXABI::emitVirtualObjectDelete(CodeGenFunction &CGF,
1234                                             const CXXDeleteExpr *DE,
1235                                             Address Ptr,
1236                                             QualType ElementType,
1237                                             const CXXDestructorDecl *Dtor) {
1238   bool UseGlobalDelete = DE->isGlobalDelete();
1239   if (UseGlobalDelete) {
1240     // Derive the complete-object pointer, which is what we need
1241     // to pass to the deallocation function.
1242 
1243     // Grab the vtable pointer as an intptr_t*.
1244     auto *ClassDecl =
1245         cast<CXXRecordDecl>(ElementType->castAs<RecordType>()->getDecl());
1246     llvm::Value *VTable = CGF.GetVTablePtr(
1247         Ptr, llvm::PointerType::getUnqual(CGF.getLLVMContext()), ClassDecl);
1248 
1249     // Track back to entry -2 and pull out the offset there.
1250     llvm::Value *OffsetPtr = CGF.Builder.CreateConstInBoundsGEP1_64(
1251         CGF.IntPtrTy, VTable, -2, "complete-offset.ptr");
1252     llvm::Value *Offset = CGF.Builder.CreateAlignedLoad(CGF.IntPtrTy, OffsetPtr,
1253                                                         CGF.getPointerAlign());
1254 
1255     // Apply the offset.
1256     llvm::Value *CompletePtr =
1257       CGF.Builder.CreateBitCast(Ptr.getPointer(), CGF.Int8PtrTy);
1258     CompletePtr =
1259         CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, CompletePtr, Offset);
1260 
1261     // If we're supposed to call the global delete, make sure we do so
1262     // even if the destructor throws.
1263     CGF.pushCallObjectDeleteCleanup(DE->getOperatorDelete(), CompletePtr,
1264                                     ElementType);
1265   }
1266 
1267   // FIXME: Provide a source location here even though there's no
1268   // CXXMemberCallExpr for dtor call.
1269   CXXDtorType DtorType = UseGlobalDelete ? Dtor_Complete : Dtor_Deleting;
1270   EmitVirtualDestructorCall(CGF, Dtor, DtorType, Ptr, DE);
1271 
1272   if (UseGlobalDelete)
1273     CGF.PopCleanupBlock();
1274 }
1275 
1276 void ItaniumCXXABI::emitRethrow(CodeGenFunction &CGF, bool isNoReturn) {
1277   // void __cxa_rethrow();
1278 
1279   llvm::FunctionType *FTy =
1280     llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
1281 
1282   llvm::FunctionCallee Fn = CGM.CreateRuntimeFunction(FTy, "__cxa_rethrow");
1283 
1284   if (isNoReturn)
1285     CGF.EmitNoreturnRuntimeCallOrInvoke(Fn, std::nullopt);
1286   else
1287     CGF.EmitRuntimeCallOrInvoke(Fn);
1288 }
1289 
1290 static llvm::FunctionCallee getAllocateExceptionFn(CodeGenModule &CGM) {
1291   // void *__cxa_allocate_exception(size_t thrown_size);
1292 
1293   llvm::FunctionType *FTy =
1294     llvm::FunctionType::get(CGM.Int8PtrTy, CGM.SizeTy, /*isVarArg=*/false);
1295 
1296   return CGM.CreateRuntimeFunction(FTy, "__cxa_allocate_exception");
1297 }
1298 
1299 static llvm::FunctionCallee getThrowFn(CodeGenModule &CGM) {
1300   // void __cxa_throw(void *thrown_exception, std::type_info *tinfo,
1301   //                  void (*dest) (void *));
1302 
1303   llvm::Type *Args[3] = { CGM.Int8PtrTy, CGM.GlobalsInt8PtrTy, CGM.Int8PtrTy };
1304   llvm::FunctionType *FTy =
1305     llvm::FunctionType::get(CGM.VoidTy, Args, /*isVarArg=*/false);
1306 
1307   return CGM.CreateRuntimeFunction(FTy, "__cxa_throw");
1308 }
1309 
1310 void ItaniumCXXABI::emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) {
1311   QualType ThrowType = E->getSubExpr()->getType();
1312   // Now allocate the exception object.
1313   llvm::Type *SizeTy = CGF.ConvertType(getContext().getSizeType());
1314   uint64_t TypeSize = getContext().getTypeSizeInChars(ThrowType).getQuantity();
1315 
1316   llvm::FunctionCallee AllocExceptionFn = getAllocateExceptionFn(CGM);
1317   llvm::CallInst *ExceptionPtr = CGF.EmitNounwindRuntimeCall(
1318       AllocExceptionFn, llvm::ConstantInt::get(SizeTy, TypeSize), "exception");
1319 
1320   CharUnits ExnAlign = CGF.getContext().getExnObjectAlignment();
1321   CGF.EmitAnyExprToExn(
1322       E->getSubExpr(), Address(ExceptionPtr, CGM.Int8Ty, ExnAlign));
1323 
1324   // Now throw the exception.
1325   llvm::Constant *TypeInfo = CGM.GetAddrOfRTTIDescriptor(ThrowType,
1326                                                          /*ForEH=*/true);
1327 
1328   // The address of the destructor.  If the exception type has a
1329   // trivial destructor (or isn't a record), we just pass null.
1330   llvm::Constant *Dtor = nullptr;
1331   if (const RecordType *RecordTy = ThrowType->getAs<RecordType>()) {
1332     CXXRecordDecl *Record = cast<CXXRecordDecl>(RecordTy->getDecl());
1333     if (!Record->hasTrivialDestructor()) {
1334       CXXDestructorDecl *DtorD = Record->getDestructor();
1335       Dtor = CGM.getAddrOfCXXStructor(GlobalDecl(DtorD, Dtor_Complete));
1336       Dtor = llvm::ConstantExpr::getBitCast(Dtor, CGM.Int8PtrTy);
1337     }
1338   }
1339   if (!Dtor) Dtor = llvm::Constant::getNullValue(CGM.Int8PtrTy);
1340 
1341   llvm::Value *args[] = { ExceptionPtr, TypeInfo, Dtor };
1342   CGF.EmitNoreturnRuntimeCallOrInvoke(getThrowFn(CGM), args);
1343 }
1344 
1345 static llvm::FunctionCallee getItaniumDynamicCastFn(CodeGenFunction &CGF) {
1346   // void *__dynamic_cast(const void *sub,
1347   //                      const abi::__class_type_info *src,
1348   //                      const abi::__class_type_info *dst,
1349   //                      std::ptrdiff_t src2dst_offset);
1350 
1351   llvm::Type *Int8PtrTy = CGF.Int8PtrTy;
1352   llvm::Type *PtrDiffTy =
1353     CGF.ConvertType(CGF.getContext().getPointerDiffType());
1354 
1355   llvm::Type *Args[4] = { Int8PtrTy, Int8PtrTy, Int8PtrTy, PtrDiffTy };
1356 
1357   llvm::FunctionType *FTy = llvm::FunctionType::get(Int8PtrTy, Args, false);
1358 
1359   // Mark the function as nounwind readonly.
1360   llvm::AttrBuilder FuncAttrs(CGF.getLLVMContext());
1361   FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1362   FuncAttrs.addMemoryAttr(llvm::MemoryEffects::readOnly());
1363   llvm::AttributeList Attrs = llvm::AttributeList::get(
1364       CGF.getLLVMContext(), llvm::AttributeList::FunctionIndex, FuncAttrs);
1365 
1366   return CGF.CGM.CreateRuntimeFunction(FTy, "__dynamic_cast", Attrs);
1367 }
1368 
1369 static llvm::FunctionCallee getBadCastFn(CodeGenFunction &CGF) {
1370   // void __cxa_bad_cast();
1371   llvm::FunctionType *FTy = llvm::FunctionType::get(CGF.VoidTy, false);
1372   return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_cast");
1373 }
1374 
1375 /// Compute the src2dst_offset hint as described in the
1376 /// Itanium C++ ABI [2.9.7]
1377 static CharUnits computeOffsetHint(ASTContext &Context,
1378                                    const CXXRecordDecl *Src,
1379                                    const CXXRecordDecl *Dst) {
1380   CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true,
1381                      /*DetectVirtual=*/false);
1382 
1383   // If Dst is not derived from Src we can skip the whole computation below and
1384   // return that Src is not a public base of Dst.  Record all inheritance paths.
1385   if (!Dst->isDerivedFrom(Src, Paths))
1386     return CharUnits::fromQuantity(-2ULL);
1387 
1388   unsigned NumPublicPaths = 0;
1389   CharUnits Offset;
1390 
1391   // Now walk all possible inheritance paths.
1392   for (const CXXBasePath &Path : Paths) {
1393     if (Path.Access != AS_public)  // Ignore non-public inheritance.
1394       continue;
1395 
1396     ++NumPublicPaths;
1397 
1398     for (const CXXBasePathElement &PathElement : Path) {
1399       // If the path contains a virtual base class we can't give any hint.
1400       // -1: no hint.
1401       if (PathElement.Base->isVirtual())
1402         return CharUnits::fromQuantity(-1ULL);
1403 
1404       if (NumPublicPaths > 1) // Won't use offsets, skip computation.
1405         continue;
1406 
1407       // Accumulate the base class offsets.
1408       const ASTRecordLayout &L = Context.getASTRecordLayout(PathElement.Class);
1409       Offset += L.getBaseClassOffset(
1410           PathElement.Base->getType()->getAsCXXRecordDecl());
1411     }
1412   }
1413 
1414   // -2: Src is not a public base of Dst.
1415   if (NumPublicPaths == 0)
1416     return CharUnits::fromQuantity(-2ULL);
1417 
1418   // -3: Src is a multiple public base type but never a virtual base type.
1419   if (NumPublicPaths > 1)
1420     return CharUnits::fromQuantity(-3ULL);
1421 
1422   // Otherwise, the Src type is a unique public nonvirtual base type of Dst.
1423   // Return the offset of Src from the origin of Dst.
1424   return Offset;
1425 }
1426 
1427 static llvm::FunctionCallee getBadTypeidFn(CodeGenFunction &CGF) {
1428   // void __cxa_bad_typeid();
1429   llvm::FunctionType *FTy = llvm::FunctionType::get(CGF.VoidTy, false);
1430 
1431   return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_typeid");
1432 }
1433 
1434 bool ItaniumCXXABI::shouldTypeidBeNullChecked(bool IsDeref,
1435                                               QualType SrcRecordTy) {
1436   return IsDeref;
1437 }
1438 
1439 void ItaniumCXXABI::EmitBadTypeidCall(CodeGenFunction &CGF) {
1440   llvm::FunctionCallee Fn = getBadTypeidFn(CGF);
1441   llvm::CallBase *Call = CGF.EmitRuntimeCallOrInvoke(Fn);
1442   Call->setDoesNotReturn();
1443   CGF.Builder.CreateUnreachable();
1444 }
1445 
1446 llvm::Value *ItaniumCXXABI::EmitTypeid(CodeGenFunction &CGF,
1447                                        QualType SrcRecordTy,
1448                                        Address ThisPtr,
1449                                        llvm::Type *StdTypeInfoPtrTy) {
1450   auto *ClassDecl =
1451       cast<CXXRecordDecl>(SrcRecordTy->castAs<RecordType>()->getDecl());
1452   llvm::Value *Value = CGF.GetVTablePtr(
1453       ThisPtr, llvm::PointerType::getUnqual(CGF.getLLVMContext()), ClassDecl);
1454 
1455   if (CGM.getItaniumVTableContext().isRelativeLayout()) {
1456     // Load the type info.
1457     Value = CGF.Builder.CreateBitCast(Value, CGM.Int8PtrTy);
1458     Value = CGF.Builder.CreateCall(
1459         CGM.getIntrinsic(llvm::Intrinsic::load_relative, {CGM.Int32Ty}),
1460         {Value, llvm::ConstantInt::get(CGM.Int32Ty, -4)});
1461   } else {
1462     // Load the type info.
1463     Value =
1464         CGF.Builder.CreateConstInBoundsGEP1_64(StdTypeInfoPtrTy, Value, -1ULL);
1465   }
1466   return CGF.Builder.CreateAlignedLoad(StdTypeInfoPtrTy, Value,
1467                                        CGF.getPointerAlign());
1468 }
1469 
1470 bool ItaniumCXXABI::shouldDynamicCastCallBeNullChecked(bool SrcIsPtr,
1471                                                        QualType SrcRecordTy) {
1472   return SrcIsPtr;
1473 }
1474 
1475 llvm::Value *ItaniumCXXABI::emitDynamicCastCall(
1476     CodeGenFunction &CGF, Address ThisAddr, QualType SrcRecordTy,
1477     QualType DestTy, QualType DestRecordTy, llvm::BasicBlock *CastEnd) {
1478   llvm::Type *PtrDiffLTy =
1479       CGF.ConvertType(CGF.getContext().getPointerDiffType());
1480 
1481   llvm::Value *SrcRTTI =
1482       CGF.CGM.GetAddrOfRTTIDescriptor(SrcRecordTy.getUnqualifiedType());
1483   llvm::Value *DestRTTI =
1484       CGF.CGM.GetAddrOfRTTIDescriptor(DestRecordTy.getUnqualifiedType());
1485 
1486   // Compute the offset hint.
1487   const CXXRecordDecl *SrcDecl = SrcRecordTy->getAsCXXRecordDecl();
1488   const CXXRecordDecl *DestDecl = DestRecordTy->getAsCXXRecordDecl();
1489   llvm::Value *OffsetHint = llvm::ConstantInt::get(
1490       PtrDiffLTy,
1491       computeOffsetHint(CGF.getContext(), SrcDecl, DestDecl).getQuantity());
1492 
1493   // Emit the call to __dynamic_cast.
1494   llvm::Value *Args[] = {ThisAddr.getPointer(), SrcRTTI, DestRTTI, OffsetHint};
1495   llvm::Value *Value =
1496       CGF.EmitNounwindRuntimeCall(getItaniumDynamicCastFn(CGF), Args);
1497 
1498   /// C++ [expr.dynamic.cast]p9:
1499   ///   A failed cast to reference type throws std::bad_cast
1500   if (DestTy->isReferenceType()) {
1501     llvm::BasicBlock *BadCastBlock =
1502         CGF.createBasicBlock("dynamic_cast.bad_cast");
1503 
1504     llvm::Value *IsNull = CGF.Builder.CreateIsNull(Value);
1505     CGF.Builder.CreateCondBr(IsNull, BadCastBlock, CastEnd);
1506 
1507     CGF.EmitBlock(BadCastBlock);
1508     EmitBadCastCall(CGF);
1509   }
1510 
1511   return Value;
1512 }
1513 
1514 llvm::Value *ItaniumCXXABI::emitExactDynamicCast(
1515     CodeGenFunction &CGF, Address ThisAddr, QualType SrcRecordTy,
1516     QualType DestTy, QualType DestRecordTy, llvm::BasicBlock *CastSuccess,
1517     llvm::BasicBlock *CastFail) {
1518   ASTContext &Context = getContext();
1519 
1520   // Find all the inheritance paths.
1521   const CXXRecordDecl *SrcDecl = SrcRecordTy->getAsCXXRecordDecl();
1522   const CXXRecordDecl *DestDecl = DestRecordTy->getAsCXXRecordDecl();
1523   CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true,
1524                      /*DetectVirtual=*/false);
1525   (void)DestDecl->isDerivedFrom(SrcDecl, Paths);
1526 
1527   // Find an offset within `DestDecl` where a `SrcDecl` instance and its vptr
1528   // might appear.
1529   std::optional<CharUnits> Offset;
1530   for (const CXXBasePath &Path : Paths) {
1531     // dynamic_cast only finds public inheritance paths.
1532     if (Path.Access != AS_public)
1533       continue;
1534 
1535     CharUnits PathOffset;
1536     for (const CXXBasePathElement &PathElement : Path) {
1537       // Find the offset along this inheritance step.
1538       const CXXRecordDecl *Base =
1539           PathElement.Base->getType()->getAsCXXRecordDecl();
1540       if (PathElement.Base->isVirtual()) {
1541         // For a virtual base class, we know that the derived class is exactly
1542         // DestDecl, so we can use the vbase offset from its layout.
1543         const ASTRecordLayout &L = Context.getASTRecordLayout(DestDecl);
1544         PathOffset = L.getVBaseClassOffset(Base);
1545       } else {
1546         const ASTRecordLayout &L =
1547             Context.getASTRecordLayout(PathElement.Class);
1548         PathOffset += L.getBaseClassOffset(Base);
1549       }
1550     }
1551 
1552     if (!Offset)
1553       Offset = PathOffset;
1554     else if (Offset != PathOffset) {
1555       // Base appears in at least two different places. Find the most-derived
1556       // object and see if it's a DestDecl. Note that the most-derived object
1557       // must be at least as aligned as this base class subobject, and must
1558       // have a vptr at offset 0.
1559       ThisAddr = Address(emitDynamicCastToVoid(CGF, ThisAddr, SrcRecordTy),
1560                          CGF.VoidPtrTy, ThisAddr.getAlignment());
1561       SrcDecl = DestDecl;
1562       Offset = CharUnits::Zero();
1563       break;
1564     }
1565   }
1566 
1567   if (!Offset) {
1568     // If there are no public inheritance paths, the cast always fails.
1569     CGF.EmitBranch(CastFail);
1570     return llvm::PoisonValue::get(CGF.VoidPtrTy);
1571   }
1572 
1573   // Compare the vptr against the expected vptr for the destination type at
1574   // this offset. Note that we do not know what type ThisAddr points to in
1575   // the case where the derived class multiply inherits from the base class
1576   // so we can't use GetVTablePtr, so we load the vptr directly instead.
1577   llvm::Instruction *VPtr = CGF.Builder.CreateLoad(
1578       ThisAddr.withElementType(CGF.VoidPtrPtrTy), "vtable");
1579   CGM.DecorateInstructionWithTBAA(
1580       VPtr, CGM.getTBAAVTablePtrAccessInfo(CGF.VoidPtrPtrTy));
1581   llvm::Value *Success = CGF.Builder.CreateICmpEQ(
1582       VPtr, getVTableAddressPoint(BaseSubobject(SrcDecl, *Offset), DestDecl));
1583   llvm::Value *Result = ThisAddr.getPointer();
1584   if (!Offset->isZero())
1585     Result = CGF.Builder.CreateInBoundsGEP(
1586         CGF.CharTy, Result,
1587         {llvm::ConstantInt::get(CGF.PtrDiffTy, -Offset->getQuantity())});
1588   CGF.Builder.CreateCondBr(Success, CastSuccess, CastFail);
1589   return Result;
1590 }
1591 
1592 llvm::Value *ItaniumCXXABI::emitDynamicCastToVoid(CodeGenFunction &CGF,
1593                                                   Address ThisAddr,
1594                                                   QualType SrcRecordTy) {
1595   auto *ClassDecl =
1596       cast<CXXRecordDecl>(SrcRecordTy->castAs<RecordType>()->getDecl());
1597   llvm::Value *OffsetToTop;
1598   if (CGM.getItaniumVTableContext().isRelativeLayout()) {
1599     // Get the vtable pointer.
1600     llvm::Value *VTable = CGF.GetVTablePtr(
1601         ThisAddr, llvm::PointerType::getUnqual(CGF.getLLVMContext()),
1602         ClassDecl);
1603 
1604     // Get the offset-to-top from the vtable.
1605     OffsetToTop =
1606         CGF.Builder.CreateConstInBoundsGEP1_32(CGM.Int32Ty, VTable, -2U);
1607     OffsetToTop = CGF.Builder.CreateAlignedLoad(
1608         CGM.Int32Ty, OffsetToTop, CharUnits::fromQuantity(4), "offset.to.top");
1609   } else {
1610     llvm::Type *PtrDiffLTy =
1611         CGF.ConvertType(CGF.getContext().getPointerDiffType());
1612 
1613     // Get the vtable pointer.
1614     llvm::Value *VTable = CGF.GetVTablePtr(
1615         ThisAddr, llvm::PointerType::getUnqual(CGF.getLLVMContext()),
1616         ClassDecl);
1617 
1618     // Get the offset-to-top from the vtable.
1619     OffsetToTop =
1620         CGF.Builder.CreateConstInBoundsGEP1_64(PtrDiffLTy, VTable, -2ULL);
1621     OffsetToTop = CGF.Builder.CreateAlignedLoad(
1622         PtrDiffLTy, OffsetToTop, CGF.getPointerAlign(), "offset.to.top");
1623   }
1624   // Finally, add the offset to the pointer.
1625   return CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, ThisAddr.getPointer(),
1626                                        OffsetToTop);
1627 }
1628 
1629 bool ItaniumCXXABI::EmitBadCastCall(CodeGenFunction &CGF) {
1630   llvm::FunctionCallee Fn = getBadCastFn(CGF);
1631   llvm::CallBase *Call = CGF.EmitRuntimeCallOrInvoke(Fn);
1632   Call->setDoesNotReturn();
1633   CGF.Builder.CreateUnreachable();
1634   return true;
1635 }
1636 
1637 llvm::Value *
1638 ItaniumCXXABI::GetVirtualBaseClassOffset(CodeGenFunction &CGF,
1639                                          Address This,
1640                                          const CXXRecordDecl *ClassDecl,
1641                                          const CXXRecordDecl *BaseClassDecl) {
1642   llvm::Value *VTablePtr = CGF.GetVTablePtr(This, CGM.Int8PtrTy, ClassDecl);
1643   CharUnits VBaseOffsetOffset =
1644       CGM.getItaniumVTableContext().getVirtualBaseOffsetOffset(ClassDecl,
1645                                                                BaseClassDecl);
1646   llvm::Value *VBaseOffsetPtr =
1647     CGF.Builder.CreateConstGEP1_64(
1648         CGF.Int8Ty, VTablePtr, VBaseOffsetOffset.getQuantity(),
1649         "vbase.offset.ptr");
1650 
1651   llvm::Value *VBaseOffset;
1652   if (CGM.getItaniumVTableContext().isRelativeLayout()) {
1653     VBaseOffset = CGF.Builder.CreateAlignedLoad(
1654         CGF.Int32Ty, VBaseOffsetPtr, CharUnits::fromQuantity(4),
1655         "vbase.offset");
1656   } else {
1657     VBaseOffset = CGF.Builder.CreateAlignedLoad(
1658         CGM.PtrDiffTy, VBaseOffsetPtr, CGF.getPointerAlign(), "vbase.offset");
1659   }
1660   return VBaseOffset;
1661 }
1662 
1663 void ItaniumCXXABI::EmitCXXConstructors(const CXXConstructorDecl *D) {
1664   // Just make sure we're in sync with TargetCXXABI.
1665   assert(CGM.getTarget().getCXXABI().hasConstructorVariants());
1666 
1667   // The constructor used for constructing this as a base class;
1668   // ignores virtual bases.
1669   CGM.EmitGlobal(GlobalDecl(D, Ctor_Base));
1670 
1671   // The constructor used for constructing this as a complete class;
1672   // constructs the virtual bases, then calls the base constructor.
1673   if (!D->getParent()->isAbstract()) {
1674     // We don't need to emit the complete ctor if the class is abstract.
1675     CGM.EmitGlobal(GlobalDecl(D, Ctor_Complete));
1676   }
1677 }
1678 
1679 CGCXXABI::AddedStructorArgCounts
1680 ItaniumCXXABI::buildStructorSignature(GlobalDecl GD,
1681                                       SmallVectorImpl<CanQualType> &ArgTys) {
1682   ASTContext &Context = getContext();
1683 
1684   // All parameters are already in place except VTT, which goes after 'this'.
1685   // These are Clang types, so we don't need to worry about sret yet.
1686 
1687   // Check if we need to add a VTT parameter (which has type global void **).
1688   if ((isa<CXXConstructorDecl>(GD.getDecl()) ? GD.getCtorType() == Ctor_Base
1689                                              : GD.getDtorType() == Dtor_Base) &&
1690       cast<CXXMethodDecl>(GD.getDecl())->getParent()->getNumVBases() != 0) {
1691     LangAS AS = CGM.GetGlobalVarAddressSpace(nullptr);
1692     QualType Q = Context.getAddrSpaceQualType(Context.VoidPtrTy, AS);
1693     ArgTys.insert(ArgTys.begin() + 1,
1694                   Context.getPointerType(CanQualType::CreateUnsafe(Q)));
1695     return AddedStructorArgCounts::prefix(1);
1696   }
1697   return AddedStructorArgCounts{};
1698 }
1699 
1700 void ItaniumCXXABI::EmitCXXDestructors(const CXXDestructorDecl *D) {
1701   // The destructor used for destructing this as a base class; ignores
1702   // virtual bases.
1703   CGM.EmitGlobal(GlobalDecl(D, Dtor_Base));
1704 
1705   // The destructor used for destructing this as a most-derived class;
1706   // call the base destructor and then destructs any virtual bases.
1707   CGM.EmitGlobal(GlobalDecl(D, Dtor_Complete));
1708 
1709   // The destructor in a virtual table is always a 'deleting'
1710   // destructor, which calls the complete destructor and then uses the
1711   // appropriate operator delete.
1712   if (D->isVirtual())
1713     CGM.EmitGlobal(GlobalDecl(D, Dtor_Deleting));
1714 }
1715 
1716 void ItaniumCXXABI::addImplicitStructorParams(CodeGenFunction &CGF,
1717                                               QualType &ResTy,
1718                                               FunctionArgList &Params) {
1719   const CXXMethodDecl *MD = cast<CXXMethodDecl>(CGF.CurGD.getDecl());
1720   assert(isa<CXXConstructorDecl>(MD) || isa<CXXDestructorDecl>(MD));
1721 
1722   // Check if we need a VTT parameter as well.
1723   if (NeedsVTTParameter(CGF.CurGD)) {
1724     ASTContext &Context = getContext();
1725 
1726     // FIXME: avoid the fake decl
1727     LangAS AS = CGM.GetGlobalVarAddressSpace(nullptr);
1728     QualType Q = Context.getAddrSpaceQualType(Context.VoidPtrTy, AS);
1729     QualType T = Context.getPointerType(Q);
1730     auto *VTTDecl = ImplicitParamDecl::Create(
1731         Context, /*DC=*/nullptr, MD->getLocation(), &Context.Idents.get("vtt"),
1732         T, ImplicitParamDecl::CXXVTT);
1733     Params.insert(Params.begin() + 1, VTTDecl);
1734     getStructorImplicitParamDecl(CGF) = VTTDecl;
1735   }
1736 }
1737 
1738 void ItaniumCXXABI::EmitInstanceFunctionProlog(CodeGenFunction &CGF) {
1739   // Naked functions have no prolog.
1740   if (CGF.CurFuncDecl && CGF.CurFuncDecl->hasAttr<NakedAttr>())
1741     return;
1742 
1743   /// Initialize the 'this' slot. In the Itanium C++ ABI, no prologue
1744   /// adjustments are required, because they are all handled by thunks.
1745   setCXXABIThisValue(CGF, loadIncomingCXXThis(CGF));
1746 
1747   /// Initialize the 'vtt' slot if needed.
1748   if (getStructorImplicitParamDecl(CGF)) {
1749     getStructorImplicitParamValue(CGF) = CGF.Builder.CreateLoad(
1750         CGF.GetAddrOfLocalVar(getStructorImplicitParamDecl(CGF)), "vtt");
1751   }
1752 
1753   /// If this is a function that the ABI specifies returns 'this', initialize
1754   /// the return slot to 'this' at the start of the function.
1755   ///
1756   /// Unlike the setting of return types, this is done within the ABI
1757   /// implementation instead of by clients of CGCXXABI because:
1758   /// 1) getThisValue is currently protected
1759   /// 2) in theory, an ABI could implement 'this' returns some other way;
1760   ///    HasThisReturn only specifies a contract, not the implementation
1761   if (HasThisReturn(CGF.CurGD))
1762     CGF.Builder.CreateStore(getThisValue(CGF), CGF.ReturnValue);
1763 }
1764 
1765 CGCXXABI::AddedStructorArgs ItaniumCXXABI::getImplicitConstructorArgs(
1766     CodeGenFunction &CGF, const CXXConstructorDecl *D, CXXCtorType Type,
1767     bool ForVirtualBase, bool Delegating) {
1768   if (!NeedsVTTParameter(GlobalDecl(D, Type)))
1769     return AddedStructorArgs{};
1770 
1771   // Insert the implicit 'vtt' argument as the second argument. Make sure to
1772   // correctly reflect its address space, which can differ from generic on
1773   // some targets.
1774   llvm::Value *VTT =
1775       CGF.GetVTTParameter(GlobalDecl(D, Type), ForVirtualBase, Delegating);
1776   LangAS AS = CGM.GetGlobalVarAddressSpace(nullptr);
1777   QualType Q = getContext().getAddrSpaceQualType(getContext().VoidPtrTy, AS);
1778   QualType VTTTy = getContext().getPointerType(Q);
1779   return AddedStructorArgs::prefix({{VTT, VTTTy}});
1780 }
1781 
1782 llvm::Value *ItaniumCXXABI::getCXXDestructorImplicitParam(
1783     CodeGenFunction &CGF, const CXXDestructorDecl *DD, CXXDtorType Type,
1784     bool ForVirtualBase, bool Delegating) {
1785   GlobalDecl GD(DD, Type);
1786   return CGF.GetVTTParameter(GD, ForVirtualBase, Delegating);
1787 }
1788 
1789 void ItaniumCXXABI::EmitDestructorCall(CodeGenFunction &CGF,
1790                                        const CXXDestructorDecl *DD,
1791                                        CXXDtorType Type, bool ForVirtualBase,
1792                                        bool Delegating, Address This,
1793                                        QualType ThisTy) {
1794   GlobalDecl GD(DD, Type);
1795   llvm::Value *VTT =
1796       getCXXDestructorImplicitParam(CGF, DD, Type, ForVirtualBase, Delegating);
1797   QualType VTTTy = getContext().getPointerType(getContext().VoidPtrTy);
1798 
1799   CGCallee Callee;
1800   if (getContext().getLangOpts().AppleKext &&
1801       Type != Dtor_Base && DD->isVirtual())
1802     Callee = CGF.BuildAppleKextVirtualDestructorCall(DD, Type, DD->getParent());
1803   else
1804     Callee = CGCallee::forDirect(CGM.getAddrOfCXXStructor(GD), GD);
1805 
1806   CGF.EmitCXXDestructorCall(GD, Callee, This.getPointer(), ThisTy, VTT, VTTTy,
1807                             nullptr);
1808 }
1809 
1810 void ItaniumCXXABI::emitVTableDefinitions(CodeGenVTables &CGVT,
1811                                           const CXXRecordDecl *RD) {
1812   llvm::GlobalVariable *VTable = getAddrOfVTable(RD, CharUnits());
1813   if (VTable->hasInitializer())
1814     return;
1815 
1816   ItaniumVTableContext &VTContext = CGM.getItaniumVTableContext();
1817   const VTableLayout &VTLayout = VTContext.getVTableLayout(RD);
1818   llvm::GlobalVariable::LinkageTypes Linkage = CGM.getVTableLinkage(RD);
1819   llvm::Constant *RTTI =
1820       CGM.GetAddrOfRTTIDescriptor(CGM.getContext().getTagDeclType(RD));
1821 
1822   // Create and set the initializer.
1823   ConstantInitBuilder builder(CGM);
1824   auto components = builder.beginStruct();
1825   CGVT.createVTableInitializer(components, VTLayout, RTTI,
1826                                llvm::GlobalValue::isLocalLinkage(Linkage));
1827   components.finishAndSetAsInitializer(VTable);
1828 
1829   // Set the correct linkage.
1830   VTable->setLinkage(Linkage);
1831 
1832   if (CGM.supportsCOMDAT() && VTable->isWeakForLinker())
1833     VTable->setComdat(CGM.getModule().getOrInsertComdat(VTable->getName()));
1834 
1835   // Set the right visibility.
1836   CGM.setGVProperties(VTable, RD);
1837 
1838   // If this is the magic class __cxxabiv1::__fundamental_type_info,
1839   // we will emit the typeinfo for the fundamental types. This is the
1840   // same behaviour as GCC.
1841   const DeclContext *DC = RD->getDeclContext();
1842   if (RD->getIdentifier() &&
1843       RD->getIdentifier()->isStr("__fundamental_type_info") &&
1844       isa<NamespaceDecl>(DC) && cast<NamespaceDecl>(DC)->getIdentifier() &&
1845       cast<NamespaceDecl>(DC)->getIdentifier()->isStr("__cxxabiv1") &&
1846       DC->getParent()->isTranslationUnit())
1847     EmitFundamentalRTTIDescriptors(RD);
1848 
1849   // Always emit type metadata on non-available_externally definitions, and on
1850   // available_externally definitions if we are performing whole program
1851   // devirtualization. For WPD we need the type metadata on all vtable
1852   // definitions to ensure we associate derived classes with base classes
1853   // defined in headers but with a strong definition only in a shared library.
1854   if (!VTable->isDeclarationForLinker() ||
1855       CGM.getCodeGenOpts().WholeProgramVTables) {
1856     CGM.EmitVTableTypeMetadata(RD, VTable, VTLayout);
1857     // For available_externally definitions, add the vtable to
1858     // @llvm.compiler.used so that it isn't deleted before whole program
1859     // analysis.
1860     if (VTable->isDeclarationForLinker()) {
1861       assert(CGM.getCodeGenOpts().WholeProgramVTables);
1862       CGM.addCompilerUsedGlobal(VTable);
1863     }
1864   }
1865 
1866   if (VTContext.isRelativeLayout()) {
1867     CGVT.RemoveHwasanMetadata(VTable);
1868     if (!VTable->isDSOLocal())
1869       CGVT.GenerateRelativeVTableAlias(VTable, VTable->getName());
1870   }
1871 }
1872 
1873 bool ItaniumCXXABI::isVirtualOffsetNeededForVTableField(
1874     CodeGenFunction &CGF, CodeGenFunction::VPtr Vptr) {
1875   if (Vptr.NearestVBase == nullptr)
1876     return false;
1877   return NeedsVTTParameter(CGF.CurGD);
1878 }
1879 
1880 llvm::Value *ItaniumCXXABI::getVTableAddressPointInStructor(
1881     CodeGenFunction &CGF, const CXXRecordDecl *VTableClass, BaseSubobject Base,
1882     const CXXRecordDecl *NearestVBase) {
1883 
1884   if ((Base.getBase()->getNumVBases() || NearestVBase != nullptr) &&
1885       NeedsVTTParameter(CGF.CurGD)) {
1886     return getVTableAddressPointInStructorWithVTT(CGF, VTableClass, Base,
1887                                                   NearestVBase);
1888   }
1889   return getVTableAddressPoint(Base, VTableClass);
1890 }
1891 
1892 llvm::Constant *
1893 ItaniumCXXABI::getVTableAddressPoint(BaseSubobject Base,
1894                                      const CXXRecordDecl *VTableClass) {
1895   llvm::GlobalValue *VTable = getAddrOfVTable(VTableClass, CharUnits());
1896 
1897   // Find the appropriate vtable within the vtable group, and the address point
1898   // within that vtable.
1899   VTableLayout::AddressPointLocation AddressPoint =
1900       CGM.getItaniumVTableContext()
1901           .getVTableLayout(VTableClass)
1902           .getAddressPoint(Base);
1903   llvm::Value *Indices[] = {
1904     llvm::ConstantInt::get(CGM.Int32Ty, 0),
1905     llvm::ConstantInt::get(CGM.Int32Ty, AddressPoint.VTableIndex),
1906     llvm::ConstantInt::get(CGM.Int32Ty, AddressPoint.AddressPointIndex),
1907   };
1908 
1909   return llvm::ConstantExpr::getGetElementPtr(VTable->getValueType(), VTable,
1910                                               Indices, /*InBounds=*/true,
1911                                               /*InRangeIndex=*/1);
1912 }
1913 
1914 // Check whether all the non-inline virtual methods for the class have the
1915 // specified attribute.
1916 template <typename T>
1917 static bool CXXRecordAllNonInlineVirtualsHaveAttr(const CXXRecordDecl *RD) {
1918   bool FoundNonInlineVirtualMethodWithAttr = false;
1919   for (const auto *D : RD->noload_decls()) {
1920     if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
1921       if (!FD->isVirtualAsWritten() || FD->isInlineSpecified() ||
1922           FD->doesThisDeclarationHaveABody())
1923         continue;
1924       if (!D->hasAttr<T>())
1925         return false;
1926       FoundNonInlineVirtualMethodWithAttr = true;
1927     }
1928   }
1929 
1930   // We didn't find any non-inline virtual methods missing the attribute.  We
1931   // will return true when we found at least one non-inline virtual with the
1932   // attribute.  (This lets our caller know that the attribute needs to be
1933   // propagated up to the vtable.)
1934   return FoundNonInlineVirtualMethodWithAttr;
1935 }
1936 
1937 llvm::Value *ItaniumCXXABI::getVTableAddressPointInStructorWithVTT(
1938     CodeGenFunction &CGF, const CXXRecordDecl *VTableClass, BaseSubobject Base,
1939     const CXXRecordDecl *NearestVBase) {
1940   assert((Base.getBase()->getNumVBases() || NearestVBase != nullptr) &&
1941          NeedsVTTParameter(CGF.CurGD) && "This class doesn't have VTT");
1942 
1943   // Get the secondary vpointer index.
1944   uint64_t VirtualPointerIndex =
1945       CGM.getVTables().getSecondaryVirtualPointerIndex(VTableClass, Base);
1946 
1947   /// Load the VTT.
1948   llvm::Value *VTT = CGF.LoadCXXVTT();
1949   if (VirtualPointerIndex)
1950     VTT = CGF.Builder.CreateConstInBoundsGEP1_64(CGF.GlobalsVoidPtrTy, VTT,
1951                                                  VirtualPointerIndex);
1952 
1953   // And load the address point from the VTT.
1954   return CGF.Builder.CreateAlignedLoad(CGF.GlobalsVoidPtrTy, VTT,
1955                                        CGF.getPointerAlign());
1956 }
1957 
1958 llvm::Constant *ItaniumCXXABI::getVTableAddressPointForConstExpr(
1959     BaseSubobject Base, const CXXRecordDecl *VTableClass) {
1960   return getVTableAddressPoint(Base, VTableClass);
1961 }
1962 
1963 llvm::GlobalVariable *ItaniumCXXABI::getAddrOfVTable(const CXXRecordDecl *RD,
1964                                                      CharUnits VPtrOffset) {
1965   assert(VPtrOffset.isZero() && "Itanium ABI only supports zero vptr offsets");
1966 
1967   llvm::GlobalVariable *&VTable = VTables[RD];
1968   if (VTable)
1969     return VTable;
1970 
1971   // Queue up this vtable for possible deferred emission.
1972   CGM.addDeferredVTable(RD);
1973 
1974   SmallString<256> Name;
1975   llvm::raw_svector_ostream Out(Name);
1976   getMangleContext().mangleCXXVTable(RD, Out);
1977 
1978   const VTableLayout &VTLayout =
1979       CGM.getItaniumVTableContext().getVTableLayout(RD);
1980   llvm::Type *VTableType = CGM.getVTables().getVTableType(VTLayout);
1981 
1982   // Use pointer to global alignment for the vtable. Otherwise we would align
1983   // them based on the size of the initializer which doesn't make sense as only
1984   // single values are read.
1985   LangAS AS = CGM.GetGlobalVarAddressSpace(nullptr);
1986   unsigned PAlign = CGM.getItaniumVTableContext().isRelativeLayout()
1987                         ? 32
1988                         : CGM.getTarget().getPointerAlign(AS);
1989 
1990   VTable = CGM.CreateOrReplaceCXXRuntimeVariable(
1991       Name, VTableType, llvm::GlobalValue::ExternalLinkage,
1992       getContext().toCharUnitsFromBits(PAlign).getAsAlign());
1993   VTable->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
1994 
1995   // In MS C++ if you have a class with virtual functions in which you are using
1996   // selective member import/export, then all virtual functions must be exported
1997   // unless they are inline, otherwise a link error will result. To match this
1998   // behavior, for such classes, we dllimport the vtable if it is defined
1999   // externally and all the non-inline virtual methods are marked dllimport, and
2000   // we dllexport the vtable if it is defined in this TU and all the non-inline
2001   // virtual methods are marked dllexport.
2002   if (CGM.getTarget().hasPS4DLLImportExport()) {
2003     if ((!RD->hasAttr<DLLImportAttr>()) && (!RD->hasAttr<DLLExportAttr>())) {
2004       if (CGM.getVTables().isVTableExternal(RD)) {
2005         if (CXXRecordAllNonInlineVirtualsHaveAttr<DLLImportAttr>(RD))
2006           VTable->setDLLStorageClass(llvm::GlobalValue::DLLImportStorageClass);
2007       } else {
2008         if (CXXRecordAllNonInlineVirtualsHaveAttr<DLLExportAttr>(RD))
2009           VTable->setDLLStorageClass(llvm::GlobalValue::DLLExportStorageClass);
2010       }
2011     }
2012   }
2013   CGM.setGVProperties(VTable, RD);
2014 
2015   return VTable;
2016 }
2017 
2018 CGCallee ItaniumCXXABI::getVirtualFunctionPointer(CodeGenFunction &CGF,
2019                                                   GlobalDecl GD,
2020                                                   Address This,
2021                                                   llvm::Type *Ty,
2022                                                   SourceLocation Loc) {
2023   llvm::Type *PtrTy = CGM.GlobalsInt8PtrTy;
2024   auto *MethodDecl = cast<CXXMethodDecl>(GD.getDecl());
2025   llvm::Value *VTable = CGF.GetVTablePtr(This, PtrTy, MethodDecl->getParent());
2026 
2027   uint64_t VTableIndex = CGM.getItaniumVTableContext().getMethodVTableIndex(GD);
2028   llvm::Value *VFunc;
2029   if (CGF.ShouldEmitVTableTypeCheckedLoad(MethodDecl->getParent())) {
2030     VFunc = CGF.EmitVTableTypeCheckedLoad(
2031         MethodDecl->getParent(), VTable, PtrTy,
2032         VTableIndex *
2033             CGM.getContext().getTargetInfo().getPointerWidth(LangAS::Default) /
2034             8);
2035   } else {
2036     CGF.EmitTypeMetadataCodeForVCall(MethodDecl->getParent(), VTable, Loc);
2037 
2038     llvm::Value *VFuncLoad;
2039     if (CGM.getItaniumVTableContext().isRelativeLayout()) {
2040       VFuncLoad = CGF.Builder.CreateCall(
2041           CGM.getIntrinsic(llvm::Intrinsic::load_relative, {CGM.Int32Ty}),
2042           {VTable, llvm::ConstantInt::get(CGM.Int32Ty, 4 * VTableIndex)});
2043     } else {
2044       llvm::Value *VTableSlotPtr = CGF.Builder.CreateConstInBoundsGEP1_64(
2045           PtrTy, VTable, VTableIndex, "vfn");
2046       VFuncLoad = CGF.Builder.CreateAlignedLoad(PtrTy, VTableSlotPtr,
2047                                                 CGF.getPointerAlign());
2048     }
2049 
2050     // Add !invariant.load md to virtual function load to indicate that
2051     // function didn't change inside vtable.
2052     // It's safe to add it without -fstrict-vtable-pointers, but it would not
2053     // help in devirtualization because it will only matter if we will have 2
2054     // the same virtual function loads from the same vtable load, which won't
2055     // happen without enabled devirtualization with -fstrict-vtable-pointers.
2056     if (CGM.getCodeGenOpts().OptimizationLevel > 0 &&
2057         CGM.getCodeGenOpts().StrictVTablePointers) {
2058       if (auto *VFuncLoadInstr = dyn_cast<llvm::Instruction>(VFuncLoad)) {
2059         VFuncLoadInstr->setMetadata(
2060             llvm::LLVMContext::MD_invariant_load,
2061             llvm::MDNode::get(CGM.getLLVMContext(),
2062                               llvm::ArrayRef<llvm::Metadata *>()));
2063       }
2064     }
2065     VFunc = VFuncLoad;
2066   }
2067 
2068   CGCallee Callee(GD, VFunc);
2069   return Callee;
2070 }
2071 
2072 llvm::Value *ItaniumCXXABI::EmitVirtualDestructorCall(
2073     CodeGenFunction &CGF, const CXXDestructorDecl *Dtor, CXXDtorType DtorType,
2074     Address This, DeleteOrMemberCallExpr E) {
2075   auto *CE = E.dyn_cast<const CXXMemberCallExpr *>();
2076   auto *D = E.dyn_cast<const CXXDeleteExpr *>();
2077   assert((CE != nullptr) ^ (D != nullptr));
2078   assert(CE == nullptr || CE->arg_begin() == CE->arg_end());
2079   assert(DtorType == Dtor_Deleting || DtorType == Dtor_Complete);
2080 
2081   GlobalDecl GD(Dtor, DtorType);
2082   const CGFunctionInfo *FInfo =
2083       &CGM.getTypes().arrangeCXXStructorDeclaration(GD);
2084   llvm::FunctionType *Ty = CGF.CGM.getTypes().GetFunctionType(*FInfo);
2085   CGCallee Callee = CGCallee::forVirtual(CE, GD, This, Ty);
2086 
2087   QualType ThisTy;
2088   if (CE) {
2089     ThisTy = CE->getObjectType();
2090   } else {
2091     ThisTy = D->getDestroyedType();
2092   }
2093 
2094   CGF.EmitCXXDestructorCall(GD, Callee, This.getPointer(), ThisTy, nullptr,
2095                             QualType(), nullptr);
2096   return nullptr;
2097 }
2098 
2099 void ItaniumCXXABI::emitVirtualInheritanceTables(const CXXRecordDecl *RD) {
2100   CodeGenVTables &VTables = CGM.getVTables();
2101   llvm::GlobalVariable *VTT = VTables.GetAddrOfVTT(RD);
2102   VTables.EmitVTTDefinition(VTT, CGM.getVTableLinkage(RD), RD);
2103 }
2104 
2105 bool ItaniumCXXABI::canSpeculativelyEmitVTableAsBaseClass(
2106     const CXXRecordDecl *RD) const {
2107   // We don't emit available_externally vtables if we are in -fapple-kext mode
2108   // because kext mode does not permit devirtualization.
2109   if (CGM.getLangOpts().AppleKext)
2110     return false;
2111 
2112   // If the vtable is hidden then it is not safe to emit an available_externally
2113   // copy of vtable.
2114   if (isVTableHidden(RD))
2115     return false;
2116 
2117   if (CGM.getCodeGenOpts().ForceEmitVTables)
2118     return true;
2119 
2120   // If we don't have any not emitted inline virtual function then we are safe
2121   // to emit an available_externally copy of vtable.
2122   // FIXME we can still emit a copy of the vtable if we
2123   // can emit definition of the inline functions.
2124   if (hasAnyUnusedVirtualInlineFunction(RD))
2125     return false;
2126 
2127   // For a class with virtual bases, we must also be able to speculatively
2128   // emit the VTT, because CodeGen doesn't have separate notions of "can emit
2129   // the vtable" and "can emit the VTT". For a base subobject, this means we
2130   // need to be able to emit non-virtual base vtables.
2131   if (RD->getNumVBases()) {
2132     for (const auto &B : RD->bases()) {
2133       auto *BRD = B.getType()->getAsCXXRecordDecl();
2134       assert(BRD && "no class for base specifier");
2135       if (B.isVirtual() || !BRD->isDynamicClass())
2136         continue;
2137       if (!canSpeculativelyEmitVTableAsBaseClass(BRD))
2138         return false;
2139     }
2140   }
2141 
2142   return true;
2143 }
2144 
2145 bool ItaniumCXXABI::canSpeculativelyEmitVTable(const CXXRecordDecl *RD) const {
2146   if (!canSpeculativelyEmitVTableAsBaseClass(RD))
2147     return false;
2148 
2149   // For a complete-object vtable (or more specifically, for the VTT), we need
2150   // to be able to speculatively emit the vtables of all dynamic virtual bases.
2151   for (const auto &B : RD->vbases()) {
2152     auto *BRD = B.getType()->getAsCXXRecordDecl();
2153     assert(BRD && "no class for base specifier");
2154     if (!BRD->isDynamicClass())
2155       continue;
2156     if (!canSpeculativelyEmitVTableAsBaseClass(BRD))
2157       return false;
2158   }
2159 
2160   return true;
2161 }
2162 static llvm::Value *performTypeAdjustment(CodeGenFunction &CGF,
2163                                           Address InitialPtr,
2164                                           int64_t NonVirtualAdjustment,
2165                                           int64_t VirtualAdjustment,
2166                                           bool IsReturnAdjustment) {
2167   if (!NonVirtualAdjustment && !VirtualAdjustment)
2168     return InitialPtr.getPointer();
2169 
2170   Address V = InitialPtr.withElementType(CGF.Int8Ty);
2171 
2172   // In a base-to-derived cast, the non-virtual adjustment is applied first.
2173   if (NonVirtualAdjustment && !IsReturnAdjustment) {
2174     V = CGF.Builder.CreateConstInBoundsByteGEP(V,
2175                               CharUnits::fromQuantity(NonVirtualAdjustment));
2176   }
2177 
2178   // Perform the virtual adjustment if we have one.
2179   llvm::Value *ResultPtr;
2180   if (VirtualAdjustment) {
2181     Address VTablePtrPtr = V.withElementType(CGF.Int8PtrTy);
2182     llvm::Value *VTablePtr = CGF.Builder.CreateLoad(VTablePtrPtr);
2183 
2184     llvm::Value *Offset;
2185     llvm::Value *OffsetPtr = CGF.Builder.CreateConstInBoundsGEP1_64(
2186         CGF.Int8Ty, VTablePtr, VirtualAdjustment);
2187     if (CGF.CGM.getItaniumVTableContext().isRelativeLayout()) {
2188       // Load the adjustment offset from the vtable as a 32-bit int.
2189       Offset =
2190           CGF.Builder.CreateAlignedLoad(CGF.Int32Ty, OffsetPtr,
2191                                         CharUnits::fromQuantity(4));
2192     } else {
2193       llvm::Type *PtrDiffTy =
2194           CGF.ConvertType(CGF.getContext().getPointerDiffType());
2195 
2196       // Load the adjustment offset from the vtable.
2197       Offset = CGF.Builder.CreateAlignedLoad(PtrDiffTy, OffsetPtr,
2198                                              CGF.getPointerAlign());
2199     }
2200     // Adjust our pointer.
2201     ResultPtr = CGF.Builder.CreateInBoundsGEP(
2202         V.getElementType(), V.getPointer(), Offset);
2203   } else {
2204     ResultPtr = V.getPointer();
2205   }
2206 
2207   // In a derived-to-base conversion, the non-virtual adjustment is
2208   // applied second.
2209   if (NonVirtualAdjustment && IsReturnAdjustment) {
2210     ResultPtr = CGF.Builder.CreateConstInBoundsGEP1_64(CGF.Int8Ty, ResultPtr,
2211                                                        NonVirtualAdjustment);
2212   }
2213 
2214   // Cast back to the original type.
2215   return CGF.Builder.CreateBitCast(ResultPtr, InitialPtr.getType());
2216 }
2217 
2218 llvm::Value *ItaniumCXXABI::performThisAdjustment(CodeGenFunction &CGF,
2219                                                   Address This,
2220                                                   const ThisAdjustment &TA) {
2221   return performTypeAdjustment(CGF, This, TA.NonVirtual,
2222                                TA.Virtual.Itanium.VCallOffsetOffset,
2223                                /*IsReturnAdjustment=*/false);
2224 }
2225 
2226 llvm::Value *
2227 ItaniumCXXABI::performReturnAdjustment(CodeGenFunction &CGF, Address Ret,
2228                                        const ReturnAdjustment &RA) {
2229   return performTypeAdjustment(CGF, Ret, RA.NonVirtual,
2230                                RA.Virtual.Itanium.VBaseOffsetOffset,
2231                                /*IsReturnAdjustment=*/true);
2232 }
2233 
2234 void ARMCXXABI::EmitReturnFromThunk(CodeGenFunction &CGF,
2235                                     RValue RV, QualType ResultType) {
2236   if (!isa<CXXDestructorDecl>(CGF.CurGD.getDecl()))
2237     return ItaniumCXXABI::EmitReturnFromThunk(CGF, RV, ResultType);
2238 
2239   // Destructor thunks in the ARM ABI have indeterminate results.
2240   llvm::Type *T = CGF.ReturnValue.getElementType();
2241   RValue Undef = RValue::get(llvm::UndefValue::get(T));
2242   return ItaniumCXXABI::EmitReturnFromThunk(CGF, Undef, ResultType);
2243 }
2244 
2245 /************************** Array allocation cookies **************************/
2246 
2247 CharUnits ItaniumCXXABI::getArrayCookieSizeImpl(QualType elementType) {
2248   // The array cookie is a size_t; pad that up to the element alignment.
2249   // The cookie is actually right-justified in that space.
2250   return std::max(CharUnits::fromQuantity(CGM.SizeSizeInBytes),
2251                   CGM.getContext().getPreferredTypeAlignInChars(elementType));
2252 }
2253 
2254 Address ItaniumCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
2255                                              Address NewPtr,
2256                                              llvm::Value *NumElements,
2257                                              const CXXNewExpr *expr,
2258                                              QualType ElementType) {
2259   assert(requiresArrayCookie(expr));
2260 
2261   unsigned AS = NewPtr.getAddressSpace();
2262 
2263   ASTContext &Ctx = getContext();
2264   CharUnits SizeSize = CGF.getSizeSize();
2265 
2266   // The size of the cookie.
2267   CharUnits CookieSize =
2268       std::max(SizeSize, Ctx.getPreferredTypeAlignInChars(ElementType));
2269   assert(CookieSize == getArrayCookieSizeImpl(ElementType));
2270 
2271   // Compute an offset to the cookie.
2272   Address CookiePtr = NewPtr;
2273   CharUnits CookieOffset = CookieSize - SizeSize;
2274   if (!CookieOffset.isZero())
2275     CookiePtr = CGF.Builder.CreateConstInBoundsByteGEP(CookiePtr, CookieOffset);
2276 
2277   // Write the number of elements into the appropriate slot.
2278   Address NumElementsPtr = CookiePtr.withElementType(CGF.SizeTy);
2279   llvm::Instruction *SI = CGF.Builder.CreateStore(NumElements, NumElementsPtr);
2280 
2281   // Handle the array cookie specially in ASan.
2282   if (CGM.getLangOpts().Sanitize.has(SanitizerKind::Address) && AS == 0 &&
2283       (expr->getOperatorNew()->isReplaceableGlobalAllocationFunction() ||
2284        CGM.getCodeGenOpts().SanitizeAddressPoisonCustomArrayCookie)) {
2285     // The store to the CookiePtr does not need to be instrumented.
2286     SI->setNoSanitizeMetadata();
2287     llvm::FunctionType *FTy =
2288         llvm::FunctionType::get(CGM.VoidTy, NumElementsPtr.getType(), false);
2289     llvm::FunctionCallee F =
2290         CGM.CreateRuntimeFunction(FTy, "__asan_poison_cxx_array_cookie");
2291     CGF.Builder.CreateCall(F, NumElementsPtr.getPointer());
2292   }
2293 
2294   // Finally, compute a pointer to the actual data buffer by skipping
2295   // over the cookie completely.
2296   return CGF.Builder.CreateConstInBoundsByteGEP(NewPtr, CookieSize);
2297 }
2298 
2299 llvm::Value *ItaniumCXXABI::readArrayCookieImpl(CodeGenFunction &CGF,
2300                                                 Address allocPtr,
2301                                                 CharUnits cookieSize) {
2302   // The element size is right-justified in the cookie.
2303   Address numElementsPtr = allocPtr;
2304   CharUnits numElementsOffset = cookieSize - CGF.getSizeSize();
2305   if (!numElementsOffset.isZero())
2306     numElementsPtr =
2307       CGF.Builder.CreateConstInBoundsByteGEP(numElementsPtr, numElementsOffset);
2308 
2309   unsigned AS = allocPtr.getAddressSpace();
2310   numElementsPtr = numElementsPtr.withElementType(CGF.SizeTy);
2311   if (!CGM.getLangOpts().Sanitize.has(SanitizerKind::Address) || AS != 0)
2312     return CGF.Builder.CreateLoad(numElementsPtr);
2313   // In asan mode emit a function call instead of a regular load and let the
2314   // run-time deal with it: if the shadow is properly poisoned return the
2315   // cookie, otherwise return 0 to avoid an infinite loop calling DTORs.
2316   // We can't simply ignore this load using nosanitize metadata because
2317   // the metadata may be lost.
2318   llvm::FunctionType *FTy = llvm::FunctionType::get(
2319       CGF.SizeTy, llvm::PointerType::getUnqual(CGF.getLLVMContext()), false);
2320   llvm::FunctionCallee F =
2321       CGM.CreateRuntimeFunction(FTy, "__asan_load_cxx_array_cookie");
2322   return CGF.Builder.CreateCall(F, numElementsPtr.getPointer());
2323 }
2324 
2325 CharUnits ARMCXXABI::getArrayCookieSizeImpl(QualType elementType) {
2326   // ARM says that the cookie is always:
2327   //   struct array_cookie {
2328   //     std::size_t element_size; // element_size != 0
2329   //     std::size_t element_count;
2330   //   };
2331   // But the base ABI doesn't give anything an alignment greater than
2332   // 8, so we can dismiss this as typical ABI-author blindness to
2333   // actual language complexity and round up to the element alignment.
2334   return std::max(CharUnits::fromQuantity(2 * CGM.SizeSizeInBytes),
2335                   CGM.getContext().getTypeAlignInChars(elementType));
2336 }
2337 
2338 Address ARMCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
2339                                          Address newPtr,
2340                                          llvm::Value *numElements,
2341                                          const CXXNewExpr *expr,
2342                                          QualType elementType) {
2343   assert(requiresArrayCookie(expr));
2344 
2345   // The cookie is always at the start of the buffer.
2346   Address cookie = newPtr;
2347 
2348   // The first element is the element size.
2349   cookie = cookie.withElementType(CGF.SizeTy);
2350   llvm::Value *elementSize = llvm::ConstantInt::get(CGF.SizeTy,
2351                  getContext().getTypeSizeInChars(elementType).getQuantity());
2352   CGF.Builder.CreateStore(elementSize, cookie);
2353 
2354   // The second element is the element count.
2355   cookie = CGF.Builder.CreateConstInBoundsGEP(cookie, 1);
2356   CGF.Builder.CreateStore(numElements, cookie);
2357 
2358   // Finally, compute a pointer to the actual data buffer by skipping
2359   // over the cookie completely.
2360   CharUnits cookieSize = ARMCXXABI::getArrayCookieSizeImpl(elementType);
2361   return CGF.Builder.CreateConstInBoundsByteGEP(newPtr, cookieSize);
2362 }
2363 
2364 llvm::Value *ARMCXXABI::readArrayCookieImpl(CodeGenFunction &CGF,
2365                                             Address allocPtr,
2366                                             CharUnits cookieSize) {
2367   // The number of elements is at offset sizeof(size_t) relative to
2368   // the allocated pointer.
2369   Address numElementsPtr
2370     = CGF.Builder.CreateConstInBoundsByteGEP(allocPtr, CGF.getSizeSize());
2371 
2372   numElementsPtr = numElementsPtr.withElementType(CGF.SizeTy);
2373   return CGF.Builder.CreateLoad(numElementsPtr);
2374 }
2375 
2376 /*********************** Static local initialization **************************/
2377 
2378 static llvm::FunctionCallee getGuardAcquireFn(CodeGenModule &CGM,
2379                                               llvm::PointerType *GuardPtrTy) {
2380   // int __cxa_guard_acquire(__guard *guard_object);
2381   llvm::FunctionType *FTy =
2382     llvm::FunctionType::get(CGM.getTypes().ConvertType(CGM.getContext().IntTy),
2383                             GuardPtrTy, /*isVarArg=*/false);
2384   return CGM.CreateRuntimeFunction(
2385       FTy, "__cxa_guard_acquire",
2386       llvm::AttributeList::get(CGM.getLLVMContext(),
2387                                llvm::AttributeList::FunctionIndex,
2388                                llvm::Attribute::NoUnwind));
2389 }
2390 
2391 static llvm::FunctionCallee getGuardReleaseFn(CodeGenModule &CGM,
2392                                               llvm::PointerType *GuardPtrTy) {
2393   // void __cxa_guard_release(__guard *guard_object);
2394   llvm::FunctionType *FTy =
2395     llvm::FunctionType::get(CGM.VoidTy, GuardPtrTy, /*isVarArg=*/false);
2396   return CGM.CreateRuntimeFunction(
2397       FTy, "__cxa_guard_release",
2398       llvm::AttributeList::get(CGM.getLLVMContext(),
2399                                llvm::AttributeList::FunctionIndex,
2400                                llvm::Attribute::NoUnwind));
2401 }
2402 
2403 static llvm::FunctionCallee getGuardAbortFn(CodeGenModule &CGM,
2404                                             llvm::PointerType *GuardPtrTy) {
2405   // void __cxa_guard_abort(__guard *guard_object);
2406   llvm::FunctionType *FTy =
2407     llvm::FunctionType::get(CGM.VoidTy, GuardPtrTy, /*isVarArg=*/false);
2408   return CGM.CreateRuntimeFunction(
2409       FTy, "__cxa_guard_abort",
2410       llvm::AttributeList::get(CGM.getLLVMContext(),
2411                                llvm::AttributeList::FunctionIndex,
2412                                llvm::Attribute::NoUnwind));
2413 }
2414 
2415 namespace {
2416   struct CallGuardAbort final : EHScopeStack::Cleanup {
2417     llvm::GlobalVariable *Guard;
2418     CallGuardAbort(llvm::GlobalVariable *Guard) : Guard(Guard) {}
2419 
2420     void Emit(CodeGenFunction &CGF, Flags flags) override {
2421       CGF.EmitNounwindRuntimeCall(getGuardAbortFn(CGF.CGM, Guard->getType()),
2422                                   Guard);
2423     }
2424   };
2425 }
2426 
2427 /// The ARM code here follows the Itanium code closely enough that we
2428 /// just special-case it at particular places.
2429 void ItaniumCXXABI::EmitGuardedInit(CodeGenFunction &CGF,
2430                                     const VarDecl &D,
2431                                     llvm::GlobalVariable *var,
2432                                     bool shouldPerformInit) {
2433   CGBuilderTy &Builder = CGF.Builder;
2434 
2435   // Inline variables that weren't instantiated from variable templates have
2436   // partially-ordered initialization within their translation unit.
2437   bool NonTemplateInline =
2438       D.isInline() &&
2439       !isTemplateInstantiation(D.getTemplateSpecializationKind());
2440 
2441   // We only need to use thread-safe statics for local non-TLS variables and
2442   // inline variables; other global initialization is always single-threaded
2443   // or (through lazy dynamic loading in multiple threads) unsequenced.
2444   bool threadsafe = getContext().getLangOpts().ThreadsafeStatics &&
2445                     (D.isLocalVarDecl() || NonTemplateInline) &&
2446                     !D.getTLSKind();
2447 
2448   // If we have a global variable with internal linkage and thread-safe statics
2449   // are disabled, we can just let the guard variable be of type i8.
2450   bool useInt8GuardVariable = !threadsafe && var->hasInternalLinkage();
2451 
2452   llvm::IntegerType *guardTy;
2453   CharUnits guardAlignment;
2454   if (useInt8GuardVariable) {
2455     guardTy = CGF.Int8Ty;
2456     guardAlignment = CharUnits::One();
2457   } else {
2458     // Guard variables are 64 bits in the generic ABI and size width on ARM
2459     // (i.e. 32-bit on AArch32, 64-bit on AArch64).
2460     if (UseARMGuardVarABI) {
2461       guardTy = CGF.SizeTy;
2462       guardAlignment = CGF.getSizeAlign();
2463     } else {
2464       guardTy = CGF.Int64Ty;
2465       guardAlignment =
2466           CharUnits::fromQuantity(CGM.getDataLayout().getABITypeAlign(guardTy));
2467     }
2468   }
2469   llvm::PointerType *guardPtrTy = llvm::PointerType::get(
2470       CGF.CGM.getLLVMContext(),
2471       CGF.CGM.getDataLayout().getDefaultGlobalsAddressSpace());
2472 
2473   // Create the guard variable if we don't already have it (as we
2474   // might if we're double-emitting this function body).
2475   llvm::GlobalVariable *guard = CGM.getStaticLocalDeclGuardAddress(&D);
2476   if (!guard) {
2477     // Mangle the name for the guard.
2478     SmallString<256> guardName;
2479     {
2480       llvm::raw_svector_ostream out(guardName);
2481       getMangleContext().mangleStaticGuardVariable(&D, out);
2482     }
2483 
2484     // Create the guard variable with a zero-initializer.
2485     // Just absorb linkage, visibility and dll storage class  from the guarded
2486     // variable.
2487     guard = new llvm::GlobalVariable(CGM.getModule(), guardTy,
2488                                      false, var->getLinkage(),
2489                                      llvm::ConstantInt::get(guardTy, 0),
2490                                      guardName.str());
2491     guard->setDSOLocal(var->isDSOLocal());
2492     guard->setVisibility(var->getVisibility());
2493     guard->setDLLStorageClass(var->getDLLStorageClass());
2494     // If the variable is thread-local, so is its guard variable.
2495     guard->setThreadLocalMode(var->getThreadLocalMode());
2496     guard->setAlignment(guardAlignment.getAsAlign());
2497 
2498     // The ABI says: "It is suggested that it be emitted in the same COMDAT
2499     // group as the associated data object." In practice, this doesn't work for
2500     // non-ELF and non-Wasm object formats, so only do it for ELF and Wasm.
2501     llvm::Comdat *C = var->getComdat();
2502     if (!D.isLocalVarDecl() && C &&
2503         (CGM.getTarget().getTriple().isOSBinFormatELF() ||
2504          CGM.getTarget().getTriple().isOSBinFormatWasm())) {
2505       guard->setComdat(C);
2506     } else if (CGM.supportsCOMDAT() && guard->isWeakForLinker()) {
2507       guard->setComdat(CGM.getModule().getOrInsertComdat(guard->getName()));
2508     }
2509 
2510     CGM.setStaticLocalDeclGuardAddress(&D, guard);
2511   }
2512 
2513   Address guardAddr = Address(guard, guard->getValueType(), guardAlignment);
2514 
2515   // Test whether the variable has completed initialization.
2516   //
2517   // Itanium C++ ABI 3.3.2:
2518   //   The following is pseudo-code showing how these functions can be used:
2519   //     if (obj_guard.first_byte == 0) {
2520   //       if ( __cxa_guard_acquire (&obj_guard) ) {
2521   //         try {
2522   //           ... initialize the object ...;
2523   //         } catch (...) {
2524   //            __cxa_guard_abort (&obj_guard);
2525   //            throw;
2526   //         }
2527   //         ... queue object destructor with __cxa_atexit() ...;
2528   //         __cxa_guard_release (&obj_guard);
2529   //       }
2530   //     }
2531   //
2532   // If threadsafe statics are enabled, but we don't have inline atomics, just
2533   // call __cxa_guard_acquire unconditionally.  The "inline" check isn't
2534   // actually inline, and the user might not expect calls to __atomic libcalls.
2535 
2536   unsigned MaxInlineWidthInBits = CGF.getTarget().getMaxAtomicInlineWidth();
2537   llvm::BasicBlock *EndBlock = CGF.createBasicBlock("init.end");
2538   if (!threadsafe || MaxInlineWidthInBits) {
2539     // Load the first byte of the guard variable.
2540     llvm::LoadInst *LI =
2541         Builder.CreateLoad(guardAddr.withElementType(CGM.Int8Ty));
2542 
2543     // Itanium ABI:
2544     //   An implementation supporting thread-safety on multiprocessor
2545     //   systems must also guarantee that references to the initialized
2546     //   object do not occur before the load of the initialization flag.
2547     //
2548     // In LLVM, we do this by marking the load Acquire.
2549     if (threadsafe)
2550       LI->setAtomic(llvm::AtomicOrdering::Acquire);
2551 
2552     // For ARM, we should only check the first bit, rather than the entire byte:
2553     //
2554     // ARM C++ ABI 3.2.3.1:
2555     //   To support the potential use of initialization guard variables
2556     //   as semaphores that are the target of ARM SWP and LDREX/STREX
2557     //   synchronizing instructions we define a static initialization
2558     //   guard variable to be a 4-byte aligned, 4-byte word with the
2559     //   following inline access protocol.
2560     //     #define INITIALIZED 1
2561     //     if ((obj_guard & INITIALIZED) != INITIALIZED) {
2562     //       if (__cxa_guard_acquire(&obj_guard))
2563     //         ...
2564     //     }
2565     //
2566     // and similarly for ARM64:
2567     //
2568     // ARM64 C++ ABI 3.2.2:
2569     //   This ABI instead only specifies the value bit 0 of the static guard
2570     //   variable; all other bits are platform defined. Bit 0 shall be 0 when the
2571     //   variable is not initialized and 1 when it is.
2572     llvm::Value *V =
2573         (UseARMGuardVarABI && !useInt8GuardVariable)
2574             ? Builder.CreateAnd(LI, llvm::ConstantInt::get(CGM.Int8Ty, 1))
2575             : LI;
2576     llvm::Value *NeedsInit = Builder.CreateIsNull(V, "guard.uninitialized");
2577 
2578     llvm::BasicBlock *InitCheckBlock = CGF.createBasicBlock("init.check");
2579 
2580     // Check if the first byte of the guard variable is zero.
2581     CGF.EmitCXXGuardedInitBranch(NeedsInit, InitCheckBlock, EndBlock,
2582                                  CodeGenFunction::GuardKind::VariableGuard, &D);
2583 
2584     CGF.EmitBlock(InitCheckBlock);
2585   }
2586 
2587   // The semantics of dynamic initialization of variables with static or thread
2588   // storage duration depends on whether they are declared at block-scope. The
2589   // initialization of such variables at block-scope can be aborted with an
2590   // exception and later retried (per C++20 [stmt.dcl]p4), and recursive entry
2591   // to their initialization has undefined behavior (also per C++20
2592   // [stmt.dcl]p4). For such variables declared at non-block scope, exceptions
2593   // lead to termination (per C++20 [except.terminate]p1), and recursive
2594   // references to the variables are governed only by the lifetime rules (per
2595   // C++20 [class.cdtor]p2), which means such references are perfectly fine as
2596   // long as they avoid touching memory. As a result, block-scope variables must
2597   // not be marked as initialized until after initialization completes (unless
2598   // the mark is reverted following an exception), but non-block-scope variables
2599   // must be marked prior to initialization so that recursive accesses during
2600   // initialization do not restart initialization.
2601 
2602   // Variables used when coping with thread-safe statics and exceptions.
2603   if (threadsafe) {
2604     // Call __cxa_guard_acquire.
2605     llvm::Value *V
2606       = CGF.EmitNounwindRuntimeCall(getGuardAcquireFn(CGM, guardPtrTy), guard);
2607 
2608     llvm::BasicBlock *InitBlock = CGF.createBasicBlock("init");
2609 
2610     Builder.CreateCondBr(Builder.CreateIsNotNull(V, "tobool"),
2611                          InitBlock, EndBlock);
2612 
2613     // Call __cxa_guard_abort along the exceptional edge.
2614     CGF.EHStack.pushCleanup<CallGuardAbort>(EHCleanup, guard);
2615 
2616     CGF.EmitBlock(InitBlock);
2617   } else if (!D.isLocalVarDecl()) {
2618     // For non-local variables, store 1 into the first byte of the guard
2619     // variable before the object initialization begins so that references
2620     // to the variable during initialization don't restart initialization.
2621     Builder.CreateStore(llvm::ConstantInt::get(CGM.Int8Ty, 1),
2622                         guardAddr.withElementType(CGM.Int8Ty));
2623   }
2624 
2625   // Emit the initializer and add a global destructor if appropriate.
2626   CGF.EmitCXXGlobalVarDeclInit(D, var, shouldPerformInit);
2627 
2628   if (threadsafe) {
2629     // Pop the guard-abort cleanup if we pushed one.
2630     CGF.PopCleanupBlock();
2631 
2632     // Call __cxa_guard_release.  This cannot throw.
2633     CGF.EmitNounwindRuntimeCall(getGuardReleaseFn(CGM, guardPtrTy),
2634                                 guardAddr.getPointer());
2635   } else if (D.isLocalVarDecl()) {
2636     // For local variables, store 1 into the first byte of the guard variable
2637     // after the object initialization completes so that initialization is
2638     // retried if initialization is interrupted by an exception.
2639     Builder.CreateStore(llvm::ConstantInt::get(CGM.Int8Ty, 1),
2640                         guardAddr.withElementType(CGM.Int8Ty));
2641   }
2642 
2643   CGF.EmitBlock(EndBlock);
2644 }
2645 
2646 /// Register a global destructor using __cxa_atexit.
2647 static void emitGlobalDtorWithCXAAtExit(CodeGenFunction &CGF,
2648                                         llvm::FunctionCallee dtor,
2649                                         llvm::Constant *addr, bool TLS) {
2650   assert(!CGF.getTarget().getTriple().isOSAIX() &&
2651          "unexpected call to emitGlobalDtorWithCXAAtExit");
2652   assert((TLS || CGF.getTypes().getCodeGenOpts().CXAAtExit) &&
2653          "__cxa_atexit is disabled");
2654   const char *Name = "__cxa_atexit";
2655   if (TLS) {
2656     const llvm::Triple &T = CGF.getTarget().getTriple();
2657     Name = T.isOSDarwin() ?  "_tlv_atexit" : "__cxa_thread_atexit";
2658   }
2659 
2660   // We're assuming that the destructor function is something we can
2661   // reasonably call with the default CC.
2662   llvm::Type *dtorTy = llvm::PointerType::getUnqual(CGF.getLLVMContext());
2663 
2664   // Preserve address space of addr.
2665   auto AddrAS = addr ? addr->getType()->getPointerAddressSpace() : 0;
2666   auto AddrPtrTy = AddrAS ? llvm::PointerType::get(CGF.getLLVMContext(), AddrAS)
2667                           : CGF.Int8PtrTy;
2668 
2669   // Create a variable that binds the atexit to this shared object.
2670   llvm::Constant *handle =
2671       CGF.CGM.CreateRuntimeVariable(CGF.Int8Ty, "__dso_handle");
2672   auto *GV = cast<llvm::GlobalValue>(handle->stripPointerCasts());
2673   GV->setVisibility(llvm::GlobalValue::HiddenVisibility);
2674 
2675   // extern "C" int __cxa_atexit(void (*f)(void *), void *p, void *d);
2676   llvm::Type *paramTys[] = {dtorTy, AddrPtrTy, handle->getType()};
2677   llvm::FunctionType *atexitTy =
2678     llvm::FunctionType::get(CGF.IntTy, paramTys, false);
2679 
2680   // Fetch the actual function.
2681   llvm::FunctionCallee atexit = CGF.CGM.CreateRuntimeFunction(atexitTy, Name);
2682   if (llvm::Function *fn = dyn_cast<llvm::Function>(atexit.getCallee()))
2683     fn->setDoesNotThrow();
2684 
2685   if (!addr)
2686     // addr is null when we are trying to register a dtor annotated with
2687     // __attribute__((destructor)) in a constructor function. Using null here is
2688     // okay because this argument is just passed back to the destructor
2689     // function.
2690     addr = llvm::Constant::getNullValue(CGF.Int8PtrTy);
2691 
2692   llvm::Value *args[] = {dtor.getCallee(), addr, handle};
2693   CGF.EmitNounwindRuntimeCall(atexit, args);
2694 }
2695 
2696 static llvm::Function *createGlobalInitOrCleanupFn(CodeGen::CodeGenModule &CGM,
2697                                                    StringRef FnName) {
2698   // Create a function that registers/unregisters destructors that have the same
2699   // priority.
2700   llvm::FunctionType *FTy = llvm::FunctionType::get(CGM.VoidTy, false);
2701   llvm::Function *GlobalInitOrCleanupFn = CGM.CreateGlobalInitOrCleanUpFunction(
2702       FTy, FnName, CGM.getTypes().arrangeNullaryFunction(), SourceLocation());
2703 
2704   return GlobalInitOrCleanupFn;
2705 }
2706 
2707 void CodeGenModule::unregisterGlobalDtorsWithUnAtExit() {
2708   for (const auto &I : DtorsUsingAtExit) {
2709     int Priority = I.first;
2710     std::string GlobalCleanupFnName =
2711         std::string("__GLOBAL_cleanup_") + llvm::to_string(Priority);
2712 
2713     llvm::Function *GlobalCleanupFn =
2714         createGlobalInitOrCleanupFn(*this, GlobalCleanupFnName);
2715 
2716     CodeGenFunction CGF(*this);
2717     CGF.StartFunction(GlobalDecl(), getContext().VoidTy, GlobalCleanupFn,
2718                       getTypes().arrangeNullaryFunction(), FunctionArgList(),
2719                       SourceLocation(), SourceLocation());
2720     auto AL = ApplyDebugLocation::CreateArtificial(CGF);
2721 
2722     // Get the destructor function type, void(*)(void).
2723     llvm::FunctionType *dtorFuncTy = llvm::FunctionType::get(CGF.VoidTy, false);
2724 
2725     // Destructor functions are run/unregistered in non-ascending
2726     // order of their priorities.
2727     const llvm::TinyPtrVector<llvm::Function *> &Dtors = I.second;
2728     auto itv = Dtors.rbegin();
2729     while (itv != Dtors.rend()) {
2730       llvm::Function *Dtor = *itv;
2731 
2732       // We're assuming that the destructor function is something we can
2733       // reasonably call with the correct CC.
2734       llvm::Value *V = CGF.unregisterGlobalDtorWithUnAtExit(Dtor);
2735       llvm::Value *NeedsDestruct =
2736           CGF.Builder.CreateIsNull(V, "needs_destruct");
2737 
2738       llvm::BasicBlock *DestructCallBlock =
2739           CGF.createBasicBlock("destruct.call");
2740       llvm::BasicBlock *EndBlock = CGF.createBasicBlock(
2741           (itv + 1) != Dtors.rend() ? "unatexit.call" : "destruct.end");
2742       // Check if unatexit returns a value of 0. If it does, jump to
2743       // DestructCallBlock, otherwise jump to EndBlock directly.
2744       CGF.Builder.CreateCondBr(NeedsDestruct, DestructCallBlock, EndBlock);
2745 
2746       CGF.EmitBlock(DestructCallBlock);
2747 
2748       // Emit the call to casted Dtor.
2749       llvm::CallInst *CI = CGF.Builder.CreateCall(dtorFuncTy, Dtor);
2750       // Make sure the call and the callee agree on calling convention.
2751       CI->setCallingConv(Dtor->getCallingConv());
2752 
2753       CGF.EmitBlock(EndBlock);
2754 
2755       itv++;
2756     }
2757 
2758     CGF.FinishFunction();
2759     AddGlobalDtor(GlobalCleanupFn, Priority);
2760   }
2761 }
2762 
2763 void CodeGenModule::registerGlobalDtorsWithAtExit() {
2764   for (const auto &I : DtorsUsingAtExit) {
2765     int Priority = I.first;
2766     std::string GlobalInitFnName =
2767         std::string("__GLOBAL_init_") + llvm::to_string(Priority);
2768     llvm::Function *GlobalInitFn =
2769         createGlobalInitOrCleanupFn(*this, GlobalInitFnName);
2770 
2771     CodeGenFunction CGF(*this);
2772     CGF.StartFunction(GlobalDecl(), getContext().VoidTy, GlobalInitFn,
2773                       getTypes().arrangeNullaryFunction(), FunctionArgList(),
2774                       SourceLocation(), SourceLocation());
2775     auto AL = ApplyDebugLocation::CreateArtificial(CGF);
2776 
2777     // Since constructor functions are run in non-descending order of their
2778     // priorities, destructors are registered in non-descending order of their
2779     // priorities, and since destructor functions are run in the reverse order
2780     // of their registration, destructor functions are run in non-ascending
2781     // order of their priorities.
2782     const llvm::TinyPtrVector<llvm::Function *> &Dtors = I.second;
2783     for (auto *Dtor : Dtors) {
2784       // Register the destructor function calling __cxa_atexit if it is
2785       // available. Otherwise fall back on calling atexit.
2786       if (getCodeGenOpts().CXAAtExit) {
2787         emitGlobalDtorWithCXAAtExit(CGF, Dtor, nullptr, false);
2788       } else {
2789         // We're assuming that the destructor function is something we can
2790         // reasonably call with the correct CC.
2791         CGF.registerGlobalDtorWithAtExit(Dtor);
2792       }
2793     }
2794 
2795     CGF.FinishFunction();
2796     AddGlobalCtor(GlobalInitFn, Priority);
2797   }
2798 
2799   if (getCXXABI().useSinitAndSterm())
2800     unregisterGlobalDtorsWithUnAtExit();
2801 }
2802 
2803 /// Register a global destructor as best as we know how.
2804 void ItaniumCXXABI::registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
2805                                        llvm::FunctionCallee dtor,
2806                                        llvm::Constant *addr) {
2807   if (D.isNoDestroy(CGM.getContext()))
2808     return;
2809 
2810   // emitGlobalDtorWithCXAAtExit will emit a call to either __cxa_thread_atexit
2811   // or __cxa_atexit depending on whether this VarDecl is a thread-local storage
2812   // or not. CXAAtExit controls only __cxa_atexit, so use it if it is enabled.
2813   // We can always use __cxa_thread_atexit.
2814   if (CGM.getCodeGenOpts().CXAAtExit || D.getTLSKind())
2815     return emitGlobalDtorWithCXAAtExit(CGF, dtor, addr, D.getTLSKind());
2816 
2817   // In Apple kexts, we want to add a global destructor entry.
2818   // FIXME: shouldn't this be guarded by some variable?
2819   if (CGM.getLangOpts().AppleKext) {
2820     // Generate a global destructor entry.
2821     return CGM.AddCXXDtorEntry(dtor, addr);
2822   }
2823 
2824   CGF.registerGlobalDtorWithAtExit(D, dtor, addr);
2825 }
2826 
2827 static bool isThreadWrapperReplaceable(const VarDecl *VD,
2828                                        CodeGen::CodeGenModule &CGM) {
2829   assert(!VD->isStaticLocal() && "static local VarDecls don't need wrappers!");
2830   // Darwin prefers to have references to thread local variables to go through
2831   // the thread wrapper instead of directly referencing the backing variable.
2832   return VD->getTLSKind() == VarDecl::TLS_Dynamic &&
2833          CGM.getTarget().getTriple().isOSDarwin();
2834 }
2835 
2836 /// Get the appropriate linkage for the wrapper function. This is essentially
2837 /// the weak form of the variable's linkage; every translation unit which needs
2838 /// the wrapper emits a copy, and we want the linker to merge them.
2839 static llvm::GlobalValue::LinkageTypes
2840 getThreadLocalWrapperLinkage(const VarDecl *VD, CodeGen::CodeGenModule &CGM) {
2841   llvm::GlobalValue::LinkageTypes VarLinkage =
2842       CGM.getLLVMLinkageVarDefinition(VD);
2843 
2844   // For internal linkage variables, we don't need an external or weak wrapper.
2845   if (llvm::GlobalValue::isLocalLinkage(VarLinkage))
2846     return VarLinkage;
2847 
2848   // If the thread wrapper is replaceable, give it appropriate linkage.
2849   if (isThreadWrapperReplaceable(VD, CGM))
2850     if (!llvm::GlobalVariable::isLinkOnceLinkage(VarLinkage) &&
2851         !llvm::GlobalVariable::isWeakODRLinkage(VarLinkage))
2852       return VarLinkage;
2853   return llvm::GlobalValue::WeakODRLinkage;
2854 }
2855 
2856 llvm::Function *
2857 ItaniumCXXABI::getOrCreateThreadLocalWrapper(const VarDecl *VD,
2858                                              llvm::Value *Val) {
2859   // Mangle the name for the thread_local wrapper function.
2860   SmallString<256> WrapperName;
2861   {
2862     llvm::raw_svector_ostream Out(WrapperName);
2863     getMangleContext().mangleItaniumThreadLocalWrapper(VD, Out);
2864   }
2865 
2866   // FIXME: If VD is a definition, we should regenerate the function attributes
2867   // before returning.
2868   if (llvm::Value *V = CGM.getModule().getNamedValue(WrapperName))
2869     return cast<llvm::Function>(V);
2870 
2871   QualType RetQT = VD->getType();
2872   if (RetQT->isReferenceType())
2873     RetQT = RetQT.getNonReferenceType();
2874 
2875   const CGFunctionInfo &FI = CGM.getTypes().arrangeBuiltinFunctionDeclaration(
2876       getContext().getPointerType(RetQT), FunctionArgList());
2877 
2878   llvm::FunctionType *FnTy = CGM.getTypes().GetFunctionType(FI);
2879   llvm::Function *Wrapper =
2880       llvm::Function::Create(FnTy, getThreadLocalWrapperLinkage(VD, CGM),
2881                              WrapperName.str(), &CGM.getModule());
2882 
2883   if (CGM.supportsCOMDAT() && Wrapper->isWeakForLinker())
2884     Wrapper->setComdat(CGM.getModule().getOrInsertComdat(Wrapper->getName()));
2885 
2886   CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, Wrapper, /*IsThunk=*/false);
2887 
2888   // Always resolve references to the wrapper at link time.
2889   if (!Wrapper->hasLocalLinkage())
2890     if (!isThreadWrapperReplaceable(VD, CGM) ||
2891         llvm::GlobalVariable::isLinkOnceLinkage(Wrapper->getLinkage()) ||
2892         llvm::GlobalVariable::isWeakODRLinkage(Wrapper->getLinkage()) ||
2893         VD->getVisibility() == HiddenVisibility)
2894       Wrapper->setVisibility(llvm::GlobalValue::HiddenVisibility);
2895 
2896   if (isThreadWrapperReplaceable(VD, CGM)) {
2897     Wrapper->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
2898     Wrapper->addFnAttr(llvm::Attribute::NoUnwind);
2899   }
2900 
2901   ThreadWrappers.push_back({VD, Wrapper});
2902   return Wrapper;
2903 }
2904 
2905 void ItaniumCXXABI::EmitThreadLocalInitFuncs(
2906     CodeGenModule &CGM, ArrayRef<const VarDecl *> CXXThreadLocals,
2907     ArrayRef<llvm::Function *> CXXThreadLocalInits,
2908     ArrayRef<const VarDecl *> CXXThreadLocalInitVars) {
2909   llvm::Function *InitFunc = nullptr;
2910 
2911   // Separate initializers into those with ordered (or partially-ordered)
2912   // initialization and those with unordered initialization.
2913   llvm::SmallVector<llvm::Function *, 8> OrderedInits;
2914   llvm::SmallDenseMap<const VarDecl *, llvm::Function *> UnorderedInits;
2915   for (unsigned I = 0; I != CXXThreadLocalInits.size(); ++I) {
2916     if (isTemplateInstantiation(
2917             CXXThreadLocalInitVars[I]->getTemplateSpecializationKind()))
2918       UnorderedInits[CXXThreadLocalInitVars[I]->getCanonicalDecl()] =
2919           CXXThreadLocalInits[I];
2920     else
2921       OrderedInits.push_back(CXXThreadLocalInits[I]);
2922   }
2923 
2924   if (!OrderedInits.empty()) {
2925     // Generate a guarded initialization function.
2926     llvm::FunctionType *FTy =
2927         llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
2928     const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
2929     InitFunc = CGM.CreateGlobalInitOrCleanUpFunction(FTy, "__tls_init", FI,
2930                                                      SourceLocation(),
2931                                                      /*TLS=*/true);
2932     llvm::GlobalVariable *Guard = new llvm::GlobalVariable(
2933         CGM.getModule(), CGM.Int8Ty, /*isConstant=*/false,
2934         llvm::GlobalVariable::InternalLinkage,
2935         llvm::ConstantInt::get(CGM.Int8Ty, 0), "__tls_guard");
2936     Guard->setThreadLocal(true);
2937     Guard->setThreadLocalMode(CGM.GetDefaultLLVMTLSModel());
2938 
2939     CharUnits GuardAlign = CharUnits::One();
2940     Guard->setAlignment(GuardAlign.getAsAlign());
2941 
2942     CodeGenFunction(CGM).GenerateCXXGlobalInitFunc(
2943         InitFunc, OrderedInits, ConstantAddress(Guard, CGM.Int8Ty, GuardAlign));
2944     // On Darwin platforms, use CXX_FAST_TLS calling convention.
2945     if (CGM.getTarget().getTriple().isOSDarwin()) {
2946       InitFunc->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
2947       InitFunc->addFnAttr(llvm::Attribute::NoUnwind);
2948     }
2949   }
2950 
2951   // Create declarations for thread wrappers for all thread-local variables
2952   // with non-discardable definitions in this translation unit.
2953   for (const VarDecl *VD : CXXThreadLocals) {
2954     if (VD->hasDefinition() &&
2955         !isDiscardableGVALinkage(getContext().GetGVALinkageForVariable(VD))) {
2956       llvm::GlobalValue *GV = CGM.GetGlobalValue(CGM.getMangledName(VD));
2957       getOrCreateThreadLocalWrapper(VD, GV);
2958     }
2959   }
2960 
2961   // Emit all referenced thread wrappers.
2962   for (auto VDAndWrapper : ThreadWrappers) {
2963     const VarDecl *VD = VDAndWrapper.first;
2964     llvm::GlobalVariable *Var =
2965         cast<llvm::GlobalVariable>(CGM.GetGlobalValue(CGM.getMangledName(VD)));
2966     llvm::Function *Wrapper = VDAndWrapper.second;
2967 
2968     // Some targets require that all access to thread local variables go through
2969     // the thread wrapper.  This means that we cannot attempt to create a thread
2970     // wrapper or a thread helper.
2971     if (!VD->hasDefinition()) {
2972       if (isThreadWrapperReplaceable(VD, CGM)) {
2973         Wrapper->setLinkage(llvm::Function::ExternalLinkage);
2974         continue;
2975       }
2976 
2977       // If this isn't a TU in which this variable is defined, the thread
2978       // wrapper is discardable.
2979       if (Wrapper->getLinkage() == llvm::Function::WeakODRLinkage)
2980         Wrapper->setLinkage(llvm::Function::LinkOnceODRLinkage);
2981     }
2982 
2983     CGM.SetLLVMFunctionAttributesForDefinition(nullptr, Wrapper);
2984 
2985     // Mangle the name for the thread_local initialization function.
2986     SmallString<256> InitFnName;
2987     {
2988       llvm::raw_svector_ostream Out(InitFnName);
2989       getMangleContext().mangleItaniumThreadLocalInit(VD, Out);
2990     }
2991 
2992     llvm::FunctionType *InitFnTy = llvm::FunctionType::get(CGM.VoidTy, false);
2993 
2994     // If we have a definition for the variable, emit the initialization
2995     // function as an alias to the global Init function (if any). Otherwise,
2996     // produce a declaration of the initialization function.
2997     llvm::GlobalValue *Init = nullptr;
2998     bool InitIsInitFunc = false;
2999     bool HasConstantInitialization = false;
3000     if (!usesThreadWrapperFunction(VD)) {
3001       HasConstantInitialization = true;
3002     } else if (VD->hasDefinition()) {
3003       InitIsInitFunc = true;
3004       llvm::Function *InitFuncToUse = InitFunc;
3005       if (isTemplateInstantiation(VD->getTemplateSpecializationKind()))
3006         InitFuncToUse = UnorderedInits.lookup(VD->getCanonicalDecl());
3007       if (InitFuncToUse)
3008         Init = llvm::GlobalAlias::create(Var->getLinkage(), InitFnName.str(),
3009                                          InitFuncToUse);
3010     } else {
3011       // Emit a weak global function referring to the initialization function.
3012       // This function will not exist if the TU defining the thread_local
3013       // variable in question does not need any dynamic initialization for
3014       // its thread_local variables.
3015       Init = llvm::Function::Create(InitFnTy,
3016                                     llvm::GlobalVariable::ExternalWeakLinkage,
3017                                     InitFnName.str(), &CGM.getModule());
3018       const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
3019       CGM.SetLLVMFunctionAttributes(
3020           GlobalDecl(), FI, cast<llvm::Function>(Init), /*IsThunk=*/false);
3021     }
3022 
3023     if (Init) {
3024       Init->setVisibility(Var->getVisibility());
3025       // Don't mark an extern_weak function DSO local on windows.
3026       if (!CGM.getTriple().isOSWindows() || !Init->hasExternalWeakLinkage())
3027         Init->setDSOLocal(Var->isDSOLocal());
3028     }
3029 
3030     llvm::LLVMContext &Context = CGM.getModule().getContext();
3031 
3032     // The linker on AIX is not happy with missing weak symbols.  However,
3033     // other TUs will not know whether the initialization routine exists
3034     // so create an empty, init function to satisfy the linker.
3035     // This is needed whenever a thread wrapper function is not used, and
3036     // also when the symbol is weak.
3037     if (CGM.getTriple().isOSAIX() && VD->hasDefinition() &&
3038         isEmittedWithConstantInitializer(VD, true) &&
3039         !mayNeedDestruction(VD)) {
3040       // Init should be null.  If it were non-null, then the logic above would
3041       // either be defining the function to be an alias or declaring the
3042       // function with the expectation that the definition of the variable
3043       // is elsewhere.
3044       assert(Init == nullptr && "Expected Init to be null.");
3045 
3046       llvm::Function *Func = llvm::Function::Create(
3047           InitFnTy, Var->getLinkage(), InitFnName.str(), &CGM.getModule());
3048       const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
3049       CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI,
3050                                     cast<llvm::Function>(Func),
3051                                     /*IsThunk=*/false);
3052       // Create a function body that just returns
3053       llvm::BasicBlock *Entry = llvm::BasicBlock::Create(Context, "", Func);
3054       CGBuilderTy Builder(CGM, Entry);
3055       Builder.CreateRetVoid();
3056     }
3057 
3058     llvm::BasicBlock *Entry = llvm::BasicBlock::Create(Context, "", Wrapper);
3059     CGBuilderTy Builder(CGM, Entry);
3060     if (HasConstantInitialization) {
3061       // No dynamic initialization to invoke.
3062     } else if (InitIsInitFunc) {
3063       if (Init) {
3064         llvm::CallInst *CallVal = Builder.CreateCall(InitFnTy, Init);
3065         if (isThreadWrapperReplaceable(VD, CGM)) {
3066           CallVal->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
3067           llvm::Function *Fn =
3068               cast<llvm::Function>(cast<llvm::GlobalAlias>(Init)->getAliasee());
3069           Fn->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
3070         }
3071       }
3072     } else if (CGM.getTriple().isOSAIX()) {
3073       // On AIX, except if constinit and also neither of class type or of
3074       // (possibly multi-dimensional) array of class type, thread_local vars
3075       // will have init routines regardless of whether they are
3076       // const-initialized.  Since the routine is guaranteed to exist, we can
3077       // unconditionally call it without testing for its existance.  This
3078       // avoids potentially unresolved weak symbols which the AIX linker
3079       // isn't happy with.
3080       Builder.CreateCall(InitFnTy, Init);
3081     } else {
3082       // Don't know whether we have an init function. Call it if it exists.
3083       llvm::Value *Have = Builder.CreateIsNotNull(Init);
3084       llvm::BasicBlock *InitBB = llvm::BasicBlock::Create(Context, "", Wrapper);
3085       llvm::BasicBlock *ExitBB = llvm::BasicBlock::Create(Context, "", Wrapper);
3086       Builder.CreateCondBr(Have, InitBB, ExitBB);
3087 
3088       Builder.SetInsertPoint(InitBB);
3089       Builder.CreateCall(InitFnTy, Init);
3090       Builder.CreateBr(ExitBB);
3091 
3092       Builder.SetInsertPoint(ExitBB);
3093     }
3094 
3095     // For a reference, the result of the wrapper function is a pointer to
3096     // the referenced object.
3097     llvm::Value *Val = Builder.CreateThreadLocalAddress(Var);
3098 
3099     if (VD->getType()->isReferenceType()) {
3100       CharUnits Align = CGM.getContext().getDeclAlign(VD);
3101       Val = Builder.CreateAlignedLoad(Var->getValueType(), Val, Align);
3102     }
3103     if (Val->getType() != Wrapper->getReturnType())
3104       Val = Builder.CreatePointerBitCastOrAddrSpaceCast(
3105           Val, Wrapper->getReturnType(), "");
3106 
3107     Builder.CreateRet(Val);
3108   }
3109 }
3110 
3111 LValue ItaniumCXXABI::EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF,
3112                                                    const VarDecl *VD,
3113                                                    QualType LValType) {
3114   llvm::Value *Val = CGF.CGM.GetAddrOfGlobalVar(VD);
3115   llvm::Function *Wrapper = getOrCreateThreadLocalWrapper(VD, Val);
3116 
3117   llvm::CallInst *CallVal = CGF.Builder.CreateCall(Wrapper);
3118   CallVal->setCallingConv(Wrapper->getCallingConv());
3119 
3120   LValue LV;
3121   if (VD->getType()->isReferenceType())
3122     LV = CGF.MakeNaturalAlignAddrLValue(CallVal, LValType);
3123   else
3124     LV = CGF.MakeAddrLValue(CallVal, LValType,
3125                             CGF.getContext().getDeclAlign(VD));
3126   // FIXME: need setObjCGCLValueClass?
3127   return LV;
3128 }
3129 
3130 /// Return whether the given global decl needs a VTT parameter, which it does
3131 /// if it's a base constructor or destructor with virtual bases.
3132 bool ItaniumCXXABI::NeedsVTTParameter(GlobalDecl GD) {
3133   const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
3134 
3135   // We don't have any virtual bases, just return early.
3136   if (!MD->getParent()->getNumVBases())
3137     return false;
3138 
3139   // Check if we have a base constructor.
3140   if (isa<CXXConstructorDecl>(MD) && GD.getCtorType() == Ctor_Base)
3141     return true;
3142 
3143   // Check if we have a base destructor.
3144   if (isa<CXXDestructorDecl>(MD) && GD.getDtorType() == Dtor_Base)
3145     return true;
3146 
3147   return false;
3148 }
3149 
3150 namespace {
3151 class ItaniumRTTIBuilder {
3152   CodeGenModule &CGM;  // Per-module state.
3153   llvm::LLVMContext &VMContext;
3154   const ItaniumCXXABI &CXXABI;  // Per-module state.
3155 
3156   /// Fields - The fields of the RTTI descriptor currently being built.
3157   SmallVector<llvm::Constant *, 16> Fields;
3158 
3159   /// GetAddrOfTypeName - Returns the mangled type name of the given type.
3160   llvm::GlobalVariable *
3161   GetAddrOfTypeName(QualType Ty, llvm::GlobalVariable::LinkageTypes Linkage);
3162 
3163   /// GetAddrOfExternalRTTIDescriptor - Returns the constant for the RTTI
3164   /// descriptor of the given type.
3165   llvm::Constant *GetAddrOfExternalRTTIDescriptor(QualType Ty);
3166 
3167   /// BuildVTablePointer - Build the vtable pointer for the given type.
3168   void BuildVTablePointer(const Type *Ty);
3169 
3170   /// BuildSIClassTypeInfo - Build an abi::__si_class_type_info, used for single
3171   /// inheritance, according to the Itanium C++ ABI, 2.9.5p6b.
3172   void BuildSIClassTypeInfo(const CXXRecordDecl *RD);
3173 
3174   /// BuildVMIClassTypeInfo - Build an abi::__vmi_class_type_info, used for
3175   /// classes with bases that do not satisfy the abi::__si_class_type_info
3176   /// constraints, according ti the Itanium C++ ABI, 2.9.5p5c.
3177   void BuildVMIClassTypeInfo(const CXXRecordDecl *RD);
3178 
3179   /// BuildPointerTypeInfo - Build an abi::__pointer_type_info struct, used
3180   /// for pointer types.
3181   void BuildPointerTypeInfo(QualType PointeeTy);
3182 
3183   /// BuildObjCObjectTypeInfo - Build the appropriate kind of
3184   /// type_info for an object type.
3185   void BuildObjCObjectTypeInfo(const ObjCObjectType *Ty);
3186 
3187   /// BuildPointerToMemberTypeInfo - Build an abi::__pointer_to_member_type_info
3188   /// struct, used for member pointer types.
3189   void BuildPointerToMemberTypeInfo(const MemberPointerType *Ty);
3190 
3191 public:
3192   ItaniumRTTIBuilder(const ItaniumCXXABI &ABI)
3193       : CGM(ABI.CGM), VMContext(CGM.getModule().getContext()), CXXABI(ABI) {}
3194 
3195   // Pointer type info flags.
3196   enum {
3197     /// PTI_Const - Type has const qualifier.
3198     PTI_Const = 0x1,
3199 
3200     /// PTI_Volatile - Type has volatile qualifier.
3201     PTI_Volatile = 0x2,
3202 
3203     /// PTI_Restrict - Type has restrict qualifier.
3204     PTI_Restrict = 0x4,
3205 
3206     /// PTI_Incomplete - Type is incomplete.
3207     PTI_Incomplete = 0x8,
3208 
3209     /// PTI_ContainingClassIncomplete - Containing class is incomplete.
3210     /// (in pointer to member).
3211     PTI_ContainingClassIncomplete = 0x10,
3212 
3213     /// PTI_TransactionSafe - Pointee is transaction_safe function (C++ TM TS).
3214     //PTI_TransactionSafe = 0x20,
3215 
3216     /// PTI_Noexcept - Pointee is noexcept function (C++1z).
3217     PTI_Noexcept = 0x40,
3218   };
3219 
3220   // VMI type info flags.
3221   enum {
3222     /// VMI_NonDiamondRepeat - Class has non-diamond repeated inheritance.
3223     VMI_NonDiamondRepeat = 0x1,
3224 
3225     /// VMI_DiamondShaped - Class is diamond shaped.
3226     VMI_DiamondShaped = 0x2
3227   };
3228 
3229   // Base class type info flags.
3230   enum {
3231     /// BCTI_Virtual - Base class is virtual.
3232     BCTI_Virtual = 0x1,
3233 
3234     /// BCTI_Public - Base class is public.
3235     BCTI_Public = 0x2
3236   };
3237 
3238   /// BuildTypeInfo - Build the RTTI type info struct for the given type, or
3239   /// link to an existing RTTI descriptor if one already exists.
3240   llvm::Constant *BuildTypeInfo(QualType Ty);
3241 
3242   /// BuildTypeInfo - Build the RTTI type info struct for the given type.
3243   llvm::Constant *BuildTypeInfo(
3244       QualType Ty,
3245       llvm::GlobalVariable::LinkageTypes Linkage,
3246       llvm::GlobalValue::VisibilityTypes Visibility,
3247       llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass);
3248 };
3249 }
3250 
3251 llvm::GlobalVariable *ItaniumRTTIBuilder::GetAddrOfTypeName(
3252     QualType Ty, llvm::GlobalVariable::LinkageTypes Linkage) {
3253   SmallString<256> Name;
3254   llvm::raw_svector_ostream Out(Name);
3255   CGM.getCXXABI().getMangleContext().mangleCXXRTTIName(Ty, Out);
3256 
3257   // We know that the mangled name of the type starts at index 4 of the
3258   // mangled name of the typename, so we can just index into it in order to
3259   // get the mangled name of the type.
3260   llvm::Constant *Init = llvm::ConstantDataArray::getString(VMContext,
3261                                                             Name.substr(4));
3262   auto Align = CGM.getContext().getTypeAlignInChars(CGM.getContext().CharTy);
3263 
3264   llvm::GlobalVariable *GV = CGM.CreateOrReplaceCXXRuntimeVariable(
3265       Name, Init->getType(), Linkage, Align.getAsAlign());
3266 
3267   GV->setInitializer(Init);
3268 
3269   return GV;
3270 }
3271 
3272 llvm::Constant *
3273 ItaniumRTTIBuilder::GetAddrOfExternalRTTIDescriptor(QualType Ty) {
3274   // Mangle the RTTI name.
3275   SmallString<256> Name;
3276   llvm::raw_svector_ostream Out(Name);
3277   CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out);
3278 
3279   // Look for an existing global.
3280   llvm::GlobalVariable *GV = CGM.getModule().getNamedGlobal(Name);
3281 
3282   if (!GV) {
3283     // Create a new global variable.
3284     // Note for the future: If we would ever like to do deferred emission of
3285     // RTTI, check if emitting vtables opportunistically need any adjustment.
3286 
3287     GV = new llvm::GlobalVariable(
3288         CGM.getModule(), CGM.GlobalsInt8PtrTy,
3289         /*isConstant=*/true, llvm::GlobalValue::ExternalLinkage, nullptr, Name);
3290     const CXXRecordDecl *RD = Ty->getAsCXXRecordDecl();
3291     CGM.setGVProperties(GV, RD);
3292     // Import the typeinfo symbol when all non-inline virtual methods are
3293     // imported.
3294     if (CGM.getTarget().hasPS4DLLImportExport()) {
3295       if (RD && CXXRecordAllNonInlineVirtualsHaveAttr<DLLImportAttr>(RD)) {
3296         GV->setDLLStorageClass(llvm::GlobalVariable::DLLImportStorageClass);
3297         CGM.setDSOLocal(GV);
3298       }
3299     }
3300   }
3301 
3302   return GV;
3303 }
3304 
3305 /// TypeInfoIsInStandardLibrary - Given a builtin type, returns whether the type
3306 /// info for that type is defined in the standard library.
3307 static bool TypeInfoIsInStandardLibrary(const BuiltinType *Ty) {
3308   // Itanium C++ ABI 2.9.2:
3309   //   Basic type information (e.g. for "int", "bool", etc.) will be kept in
3310   //   the run-time support library. Specifically, the run-time support
3311   //   library should contain type_info objects for the types X, X* and
3312   //   X const*, for every X in: void, std::nullptr_t, bool, wchar_t, char,
3313   //   unsigned char, signed char, short, unsigned short, int, unsigned int,
3314   //   long, unsigned long, long long, unsigned long long, float, double,
3315   //   long double, char16_t, char32_t, and the IEEE 754r decimal and
3316   //   half-precision floating point types.
3317   //
3318   // GCC also emits RTTI for __int128.
3319   // FIXME: We do not emit RTTI information for decimal types here.
3320 
3321   // Types added here must also be added to EmitFundamentalRTTIDescriptors.
3322   switch (Ty->getKind()) {
3323     case BuiltinType::Void:
3324     case BuiltinType::NullPtr:
3325     case BuiltinType::Bool:
3326     case BuiltinType::WChar_S:
3327     case BuiltinType::WChar_U:
3328     case BuiltinType::Char_U:
3329     case BuiltinType::Char_S:
3330     case BuiltinType::UChar:
3331     case BuiltinType::SChar:
3332     case BuiltinType::Short:
3333     case BuiltinType::UShort:
3334     case BuiltinType::Int:
3335     case BuiltinType::UInt:
3336     case BuiltinType::Long:
3337     case BuiltinType::ULong:
3338     case BuiltinType::LongLong:
3339     case BuiltinType::ULongLong:
3340     case BuiltinType::Half:
3341     case BuiltinType::Float:
3342     case BuiltinType::Double:
3343     case BuiltinType::LongDouble:
3344     case BuiltinType::Float16:
3345     case BuiltinType::Float128:
3346     case BuiltinType::Ibm128:
3347     case BuiltinType::Char8:
3348     case BuiltinType::Char16:
3349     case BuiltinType::Char32:
3350     case BuiltinType::Int128:
3351     case BuiltinType::UInt128:
3352       return true;
3353 
3354 #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
3355     case BuiltinType::Id:
3356 #include "clang/Basic/OpenCLImageTypes.def"
3357 #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
3358     case BuiltinType::Id:
3359 #include "clang/Basic/OpenCLExtensionTypes.def"
3360     case BuiltinType::OCLSampler:
3361     case BuiltinType::OCLEvent:
3362     case BuiltinType::OCLClkEvent:
3363     case BuiltinType::OCLQueue:
3364     case BuiltinType::OCLReserveID:
3365 #define SVE_TYPE(Name, Id, SingletonId) \
3366     case BuiltinType::Id:
3367 #include "clang/Basic/AArch64SVEACLETypes.def"
3368 #define PPC_VECTOR_TYPE(Name, Id, Size) \
3369     case BuiltinType::Id:
3370 #include "clang/Basic/PPCTypes.def"
3371 #define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
3372 #include "clang/Basic/RISCVVTypes.def"
3373 #define WASM_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
3374 #include "clang/Basic/WebAssemblyReferenceTypes.def"
3375     case BuiltinType::ShortAccum:
3376     case BuiltinType::Accum:
3377     case BuiltinType::LongAccum:
3378     case BuiltinType::UShortAccum:
3379     case BuiltinType::UAccum:
3380     case BuiltinType::ULongAccum:
3381     case BuiltinType::ShortFract:
3382     case BuiltinType::Fract:
3383     case BuiltinType::LongFract:
3384     case BuiltinType::UShortFract:
3385     case BuiltinType::UFract:
3386     case BuiltinType::ULongFract:
3387     case BuiltinType::SatShortAccum:
3388     case BuiltinType::SatAccum:
3389     case BuiltinType::SatLongAccum:
3390     case BuiltinType::SatUShortAccum:
3391     case BuiltinType::SatUAccum:
3392     case BuiltinType::SatULongAccum:
3393     case BuiltinType::SatShortFract:
3394     case BuiltinType::SatFract:
3395     case BuiltinType::SatLongFract:
3396     case BuiltinType::SatUShortFract:
3397     case BuiltinType::SatUFract:
3398     case BuiltinType::SatULongFract:
3399     case BuiltinType::BFloat16:
3400       return false;
3401 
3402     case BuiltinType::Dependent:
3403 #define BUILTIN_TYPE(Id, SingletonId)
3404 #define PLACEHOLDER_TYPE(Id, SingletonId) \
3405     case BuiltinType::Id:
3406 #include "clang/AST/BuiltinTypes.def"
3407       llvm_unreachable("asking for RRTI for a placeholder type!");
3408 
3409     case BuiltinType::ObjCId:
3410     case BuiltinType::ObjCClass:
3411     case BuiltinType::ObjCSel:
3412       llvm_unreachable("FIXME: Objective-C types are unsupported!");
3413   }
3414 
3415   llvm_unreachable("Invalid BuiltinType Kind!");
3416 }
3417 
3418 static bool TypeInfoIsInStandardLibrary(const PointerType *PointerTy) {
3419   QualType PointeeTy = PointerTy->getPointeeType();
3420   const BuiltinType *BuiltinTy = dyn_cast<BuiltinType>(PointeeTy);
3421   if (!BuiltinTy)
3422     return false;
3423 
3424   // Check the qualifiers.
3425   Qualifiers Quals = PointeeTy.getQualifiers();
3426   Quals.removeConst();
3427 
3428   if (!Quals.empty())
3429     return false;
3430 
3431   return TypeInfoIsInStandardLibrary(BuiltinTy);
3432 }
3433 
3434 /// IsStandardLibraryRTTIDescriptor - Returns whether the type
3435 /// information for the given type exists in the standard library.
3436 static bool IsStandardLibraryRTTIDescriptor(QualType Ty) {
3437   // Type info for builtin types is defined in the standard library.
3438   if (const BuiltinType *BuiltinTy = dyn_cast<BuiltinType>(Ty))
3439     return TypeInfoIsInStandardLibrary(BuiltinTy);
3440 
3441   // Type info for some pointer types to builtin types is defined in the
3442   // standard library.
3443   if (const PointerType *PointerTy = dyn_cast<PointerType>(Ty))
3444     return TypeInfoIsInStandardLibrary(PointerTy);
3445 
3446   return false;
3447 }
3448 
3449 /// ShouldUseExternalRTTIDescriptor - Returns whether the type information for
3450 /// the given type exists somewhere else, and that we should not emit the type
3451 /// information in this translation unit.  Assumes that it is not a
3452 /// standard-library type.
3453 static bool ShouldUseExternalRTTIDescriptor(CodeGenModule &CGM,
3454                                             QualType Ty) {
3455   ASTContext &Context = CGM.getContext();
3456 
3457   // If RTTI is disabled, assume it might be disabled in the
3458   // translation unit that defines any potential key function, too.
3459   if (!Context.getLangOpts().RTTI) return false;
3460 
3461   if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) {
3462     const CXXRecordDecl *RD = cast<CXXRecordDecl>(RecordTy->getDecl());
3463     if (!RD->hasDefinition())
3464       return false;
3465 
3466     if (!RD->isDynamicClass())
3467       return false;
3468 
3469     // FIXME: this may need to be reconsidered if the key function
3470     // changes.
3471     // N.B. We must always emit the RTTI data ourselves if there exists a key
3472     // function.
3473     bool IsDLLImport = RD->hasAttr<DLLImportAttr>();
3474 
3475     // Don't import the RTTI but emit it locally.
3476     if (CGM.getTriple().isWindowsGNUEnvironment())
3477       return false;
3478 
3479     if (CGM.getVTables().isVTableExternal(RD)) {
3480       if (CGM.getTarget().hasPS4DLLImportExport())
3481         return true;
3482 
3483       return IsDLLImport && !CGM.getTriple().isWindowsItaniumEnvironment()
3484                  ? false
3485                  : true;
3486     }
3487     if (IsDLLImport)
3488       return true;
3489   }
3490 
3491   return false;
3492 }
3493 
3494 /// IsIncompleteClassType - Returns whether the given record type is incomplete.
3495 static bool IsIncompleteClassType(const RecordType *RecordTy) {
3496   return !RecordTy->getDecl()->isCompleteDefinition();
3497 }
3498 
3499 /// ContainsIncompleteClassType - Returns whether the given type contains an
3500 /// incomplete class type. This is true if
3501 ///
3502 ///   * The given type is an incomplete class type.
3503 ///   * The given type is a pointer type whose pointee type contains an
3504 ///     incomplete class type.
3505 ///   * The given type is a member pointer type whose class is an incomplete
3506 ///     class type.
3507 ///   * The given type is a member pointer type whoise pointee type contains an
3508 ///     incomplete class type.
3509 /// is an indirect or direct pointer to an incomplete class type.
3510 static bool ContainsIncompleteClassType(QualType Ty) {
3511   if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) {
3512     if (IsIncompleteClassType(RecordTy))
3513       return true;
3514   }
3515 
3516   if (const PointerType *PointerTy = dyn_cast<PointerType>(Ty))
3517     return ContainsIncompleteClassType(PointerTy->getPointeeType());
3518 
3519   if (const MemberPointerType *MemberPointerTy =
3520       dyn_cast<MemberPointerType>(Ty)) {
3521     // Check if the class type is incomplete.
3522     const RecordType *ClassType = cast<RecordType>(MemberPointerTy->getClass());
3523     if (IsIncompleteClassType(ClassType))
3524       return true;
3525 
3526     return ContainsIncompleteClassType(MemberPointerTy->getPointeeType());
3527   }
3528 
3529   return false;
3530 }
3531 
3532 // CanUseSingleInheritance - Return whether the given record decl has a "single,
3533 // public, non-virtual base at offset zero (i.e. the derived class is dynamic
3534 // iff the base is)", according to Itanium C++ ABI, 2.95p6b.
3535 static bool CanUseSingleInheritance(const CXXRecordDecl *RD) {
3536   // Check the number of bases.
3537   if (RD->getNumBases() != 1)
3538     return false;
3539 
3540   // Get the base.
3541   CXXRecordDecl::base_class_const_iterator Base = RD->bases_begin();
3542 
3543   // Check that the base is not virtual.
3544   if (Base->isVirtual())
3545     return false;
3546 
3547   // Check that the base is public.
3548   if (Base->getAccessSpecifier() != AS_public)
3549     return false;
3550 
3551   // Check that the class is dynamic iff the base is.
3552   auto *BaseDecl =
3553       cast<CXXRecordDecl>(Base->getType()->castAs<RecordType>()->getDecl());
3554   if (!BaseDecl->isEmpty() &&
3555       BaseDecl->isDynamicClass() != RD->isDynamicClass())
3556     return false;
3557 
3558   return true;
3559 }
3560 
3561 void ItaniumRTTIBuilder::BuildVTablePointer(const Type *Ty) {
3562   // abi::__class_type_info.
3563   static const char * const ClassTypeInfo =
3564     "_ZTVN10__cxxabiv117__class_type_infoE";
3565   // abi::__si_class_type_info.
3566   static const char * const SIClassTypeInfo =
3567     "_ZTVN10__cxxabiv120__si_class_type_infoE";
3568   // abi::__vmi_class_type_info.
3569   static const char * const VMIClassTypeInfo =
3570     "_ZTVN10__cxxabiv121__vmi_class_type_infoE";
3571 
3572   const char *VTableName = nullptr;
3573 
3574   switch (Ty->getTypeClass()) {
3575 #define TYPE(Class, Base)
3576 #define ABSTRACT_TYPE(Class, Base)
3577 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
3578 #define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
3579 #define DEPENDENT_TYPE(Class, Base) case Type::Class:
3580 #include "clang/AST/TypeNodes.inc"
3581     llvm_unreachable("Non-canonical and dependent types shouldn't get here");
3582 
3583   case Type::LValueReference:
3584   case Type::RValueReference:
3585     llvm_unreachable("References shouldn't get here");
3586 
3587   case Type::Auto:
3588   case Type::DeducedTemplateSpecialization:
3589     llvm_unreachable("Undeduced type shouldn't get here");
3590 
3591   case Type::Pipe:
3592     llvm_unreachable("Pipe types shouldn't get here");
3593 
3594   case Type::Builtin:
3595   case Type::BitInt:
3596   // GCC treats vector and complex types as fundamental types.
3597   case Type::Vector:
3598   case Type::ExtVector:
3599   case Type::ConstantMatrix:
3600   case Type::Complex:
3601   case Type::Atomic:
3602   // FIXME: GCC treats block pointers as fundamental types?!
3603   case Type::BlockPointer:
3604     // abi::__fundamental_type_info.
3605     VTableName = "_ZTVN10__cxxabiv123__fundamental_type_infoE";
3606     break;
3607 
3608   case Type::ConstantArray:
3609   case Type::IncompleteArray:
3610   case Type::VariableArray:
3611     // abi::__array_type_info.
3612     VTableName = "_ZTVN10__cxxabiv117__array_type_infoE";
3613     break;
3614 
3615   case Type::FunctionNoProto:
3616   case Type::FunctionProto:
3617     // abi::__function_type_info.
3618     VTableName = "_ZTVN10__cxxabiv120__function_type_infoE";
3619     break;
3620 
3621   case Type::Enum:
3622     // abi::__enum_type_info.
3623     VTableName = "_ZTVN10__cxxabiv116__enum_type_infoE";
3624     break;
3625 
3626   case Type::Record: {
3627     const CXXRecordDecl *RD =
3628       cast<CXXRecordDecl>(cast<RecordType>(Ty)->getDecl());
3629 
3630     if (!RD->hasDefinition() || !RD->getNumBases()) {
3631       VTableName = ClassTypeInfo;
3632     } else if (CanUseSingleInheritance(RD)) {
3633       VTableName = SIClassTypeInfo;
3634     } else {
3635       VTableName = VMIClassTypeInfo;
3636     }
3637 
3638     break;
3639   }
3640 
3641   case Type::ObjCObject:
3642     // Ignore protocol qualifiers.
3643     Ty = cast<ObjCObjectType>(Ty)->getBaseType().getTypePtr();
3644 
3645     // Handle id and Class.
3646     if (isa<BuiltinType>(Ty)) {
3647       VTableName = ClassTypeInfo;
3648       break;
3649     }
3650 
3651     assert(isa<ObjCInterfaceType>(Ty));
3652     [[fallthrough]];
3653 
3654   case Type::ObjCInterface:
3655     if (cast<ObjCInterfaceType>(Ty)->getDecl()->getSuperClass()) {
3656       VTableName = SIClassTypeInfo;
3657     } else {
3658       VTableName = ClassTypeInfo;
3659     }
3660     break;
3661 
3662   case Type::ObjCObjectPointer:
3663   case Type::Pointer:
3664     // abi::__pointer_type_info.
3665     VTableName = "_ZTVN10__cxxabiv119__pointer_type_infoE";
3666     break;
3667 
3668   case Type::MemberPointer:
3669     // abi::__pointer_to_member_type_info.
3670     VTableName = "_ZTVN10__cxxabiv129__pointer_to_member_type_infoE";
3671     break;
3672   }
3673 
3674   llvm::Constant *VTable = nullptr;
3675 
3676   // Check if the alias exists. If it doesn't, then get or create the global.
3677   if (CGM.getItaniumVTableContext().isRelativeLayout())
3678     VTable = CGM.getModule().getNamedAlias(VTableName);
3679   if (!VTable)
3680     VTable =
3681         CGM.getModule().getOrInsertGlobal(VTableName, CGM.GlobalsInt8PtrTy);
3682 
3683   CGM.setDSOLocal(cast<llvm::GlobalValue>(VTable->stripPointerCasts()));
3684 
3685   llvm::Type *PtrDiffTy =
3686       CGM.getTypes().ConvertType(CGM.getContext().getPointerDiffType());
3687 
3688   // The vtable address point is 2.
3689   if (CGM.getItaniumVTableContext().isRelativeLayout()) {
3690     // The vtable address point is 8 bytes after its start:
3691     // 4 for the offset to top + 4 for the relative offset to rtti.
3692     llvm::Constant *Eight = llvm::ConstantInt::get(CGM.Int32Ty, 8);
3693     VTable =
3694         llvm::ConstantExpr::getInBoundsGetElementPtr(CGM.Int8Ty, VTable, Eight);
3695   } else {
3696     llvm::Constant *Two = llvm::ConstantInt::get(PtrDiffTy, 2);
3697     VTable = llvm::ConstantExpr::getInBoundsGetElementPtr(CGM.GlobalsInt8PtrTy,
3698                                                           VTable, Two);
3699   }
3700 
3701   Fields.push_back(VTable);
3702 }
3703 
3704 /// Return the linkage that the type info and type info name constants
3705 /// should have for the given type.
3706 static llvm::GlobalVariable::LinkageTypes getTypeInfoLinkage(CodeGenModule &CGM,
3707                                                              QualType Ty) {
3708   // Itanium C++ ABI 2.9.5p7:
3709   //   In addition, it and all of the intermediate abi::__pointer_type_info
3710   //   structs in the chain down to the abi::__class_type_info for the
3711   //   incomplete class type must be prevented from resolving to the
3712   //   corresponding type_info structs for the complete class type, possibly
3713   //   by making them local static objects. Finally, a dummy class RTTI is
3714   //   generated for the incomplete type that will not resolve to the final
3715   //   complete class RTTI (because the latter need not exist), possibly by
3716   //   making it a local static object.
3717   if (ContainsIncompleteClassType(Ty))
3718     return llvm::GlobalValue::InternalLinkage;
3719 
3720   switch (Ty->getLinkage()) {
3721   case NoLinkage:
3722   case InternalLinkage:
3723   case UniqueExternalLinkage:
3724     return llvm::GlobalValue::InternalLinkage;
3725 
3726   case VisibleNoLinkage:
3727   case ModuleLinkage:
3728   case ExternalLinkage:
3729     // RTTI is not enabled, which means that this type info struct is going
3730     // to be used for exception handling. Give it linkonce_odr linkage.
3731     if (!CGM.getLangOpts().RTTI)
3732       return llvm::GlobalValue::LinkOnceODRLinkage;
3733 
3734     if (const RecordType *Record = dyn_cast<RecordType>(Ty)) {
3735       const CXXRecordDecl *RD = cast<CXXRecordDecl>(Record->getDecl());
3736       if (RD->hasAttr<WeakAttr>())
3737         return llvm::GlobalValue::WeakODRLinkage;
3738       if (CGM.getTriple().isWindowsItaniumEnvironment())
3739         if (RD->hasAttr<DLLImportAttr>() &&
3740             ShouldUseExternalRTTIDescriptor(CGM, Ty))
3741           return llvm::GlobalValue::ExternalLinkage;
3742       // MinGW always uses LinkOnceODRLinkage for type info.
3743       if (RD->isDynamicClass() &&
3744           !CGM.getContext()
3745                .getTargetInfo()
3746                .getTriple()
3747                .isWindowsGNUEnvironment())
3748         return CGM.getVTableLinkage(RD);
3749     }
3750 
3751     return llvm::GlobalValue::LinkOnceODRLinkage;
3752   }
3753 
3754   llvm_unreachable("Invalid linkage!");
3755 }
3756 
3757 llvm::Constant *ItaniumRTTIBuilder::BuildTypeInfo(QualType Ty) {
3758   // We want to operate on the canonical type.
3759   Ty = Ty.getCanonicalType();
3760 
3761   // Check if we've already emitted an RTTI descriptor for this type.
3762   SmallString<256> Name;
3763   llvm::raw_svector_ostream Out(Name);
3764   CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out);
3765 
3766   llvm::GlobalVariable *OldGV = CGM.getModule().getNamedGlobal(Name);
3767   if (OldGV && !OldGV->isDeclaration()) {
3768     assert(!OldGV->hasAvailableExternallyLinkage() &&
3769            "available_externally typeinfos not yet implemented");
3770 
3771     return OldGV;
3772   }
3773 
3774   // Check if there is already an external RTTI descriptor for this type.
3775   if (IsStandardLibraryRTTIDescriptor(Ty) ||
3776       ShouldUseExternalRTTIDescriptor(CGM, Ty))
3777     return GetAddrOfExternalRTTIDescriptor(Ty);
3778 
3779   // Emit the standard library with external linkage.
3780   llvm::GlobalVariable::LinkageTypes Linkage = getTypeInfoLinkage(CGM, Ty);
3781 
3782   // Give the type_info object and name the formal visibility of the
3783   // type itself.
3784   llvm::GlobalValue::VisibilityTypes llvmVisibility;
3785   if (llvm::GlobalValue::isLocalLinkage(Linkage))
3786     // If the linkage is local, only default visibility makes sense.
3787     llvmVisibility = llvm::GlobalValue::DefaultVisibility;
3788   else if (CXXABI.classifyRTTIUniqueness(Ty, Linkage) ==
3789            ItaniumCXXABI::RUK_NonUniqueHidden)
3790     llvmVisibility = llvm::GlobalValue::HiddenVisibility;
3791   else
3792     llvmVisibility = CodeGenModule::GetLLVMVisibility(Ty->getVisibility());
3793 
3794   llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass =
3795       llvm::GlobalValue::DefaultStorageClass;
3796   if (auto RD = Ty->getAsCXXRecordDecl()) {
3797     if ((CGM.getTriple().isWindowsItaniumEnvironment() &&
3798          RD->hasAttr<DLLExportAttr>()) ||
3799         (CGM.shouldMapVisibilityToDLLExport(RD) &&
3800          !llvm::GlobalValue::isLocalLinkage(Linkage) &&
3801          llvmVisibility == llvm::GlobalValue::DefaultVisibility))
3802       DLLStorageClass = llvm::GlobalValue::DLLExportStorageClass;
3803   }
3804   return BuildTypeInfo(Ty, Linkage, llvmVisibility, DLLStorageClass);
3805 }
3806 
3807 llvm::Constant *ItaniumRTTIBuilder::BuildTypeInfo(
3808       QualType Ty,
3809       llvm::GlobalVariable::LinkageTypes Linkage,
3810       llvm::GlobalValue::VisibilityTypes Visibility,
3811       llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass) {
3812   // Add the vtable pointer.
3813   BuildVTablePointer(cast<Type>(Ty));
3814 
3815   // And the name.
3816   llvm::GlobalVariable *TypeName = GetAddrOfTypeName(Ty, Linkage);
3817   llvm::Constant *TypeNameField;
3818 
3819   // If we're supposed to demote the visibility, be sure to set a flag
3820   // to use a string comparison for type_info comparisons.
3821   ItaniumCXXABI::RTTIUniquenessKind RTTIUniqueness =
3822       CXXABI.classifyRTTIUniqueness(Ty, Linkage);
3823   if (RTTIUniqueness != ItaniumCXXABI::RUK_Unique) {
3824     // The flag is the sign bit, which on ARM64 is defined to be clear
3825     // for global pointers.  This is very ARM64-specific.
3826     TypeNameField = llvm::ConstantExpr::getPtrToInt(TypeName, CGM.Int64Ty);
3827     llvm::Constant *flag =
3828         llvm::ConstantInt::get(CGM.Int64Ty, ((uint64_t)1) << 63);
3829     TypeNameField = llvm::ConstantExpr::getAdd(TypeNameField, flag);
3830     TypeNameField =
3831         llvm::ConstantExpr::getIntToPtr(TypeNameField, CGM.GlobalsInt8PtrTy);
3832   } else {
3833     TypeNameField = TypeName;
3834   }
3835   Fields.push_back(TypeNameField);
3836 
3837   switch (Ty->getTypeClass()) {
3838 #define TYPE(Class, Base)
3839 #define ABSTRACT_TYPE(Class, Base)
3840 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
3841 #define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
3842 #define DEPENDENT_TYPE(Class, Base) case Type::Class:
3843 #include "clang/AST/TypeNodes.inc"
3844     llvm_unreachable("Non-canonical and dependent types shouldn't get here");
3845 
3846   // GCC treats vector types as fundamental types.
3847   case Type::Builtin:
3848   case Type::Vector:
3849   case Type::ExtVector:
3850   case Type::ConstantMatrix:
3851   case Type::Complex:
3852   case Type::BlockPointer:
3853     // Itanium C++ ABI 2.9.5p4:
3854     // abi::__fundamental_type_info adds no data members to std::type_info.
3855     break;
3856 
3857   case Type::LValueReference:
3858   case Type::RValueReference:
3859     llvm_unreachable("References shouldn't get here");
3860 
3861   case Type::Auto:
3862   case Type::DeducedTemplateSpecialization:
3863     llvm_unreachable("Undeduced type shouldn't get here");
3864 
3865   case Type::Pipe:
3866     break;
3867 
3868   case Type::BitInt:
3869     break;
3870 
3871   case Type::ConstantArray:
3872   case Type::IncompleteArray:
3873   case Type::VariableArray:
3874     // Itanium C++ ABI 2.9.5p5:
3875     // abi::__array_type_info adds no data members to std::type_info.
3876     break;
3877 
3878   case Type::FunctionNoProto:
3879   case Type::FunctionProto:
3880     // Itanium C++ ABI 2.9.5p5:
3881     // abi::__function_type_info adds no data members to std::type_info.
3882     break;
3883 
3884   case Type::Enum:
3885     // Itanium C++ ABI 2.9.5p5:
3886     // abi::__enum_type_info adds no data members to std::type_info.
3887     break;
3888 
3889   case Type::Record: {
3890     const CXXRecordDecl *RD =
3891       cast<CXXRecordDecl>(cast<RecordType>(Ty)->getDecl());
3892     if (!RD->hasDefinition() || !RD->getNumBases()) {
3893       // We don't need to emit any fields.
3894       break;
3895     }
3896 
3897     if (CanUseSingleInheritance(RD))
3898       BuildSIClassTypeInfo(RD);
3899     else
3900       BuildVMIClassTypeInfo(RD);
3901 
3902     break;
3903   }
3904 
3905   case Type::ObjCObject:
3906   case Type::ObjCInterface:
3907     BuildObjCObjectTypeInfo(cast<ObjCObjectType>(Ty));
3908     break;
3909 
3910   case Type::ObjCObjectPointer:
3911     BuildPointerTypeInfo(cast<ObjCObjectPointerType>(Ty)->getPointeeType());
3912     break;
3913 
3914   case Type::Pointer:
3915     BuildPointerTypeInfo(cast<PointerType>(Ty)->getPointeeType());
3916     break;
3917 
3918   case Type::MemberPointer:
3919     BuildPointerToMemberTypeInfo(cast<MemberPointerType>(Ty));
3920     break;
3921 
3922   case Type::Atomic:
3923     // No fields, at least for the moment.
3924     break;
3925   }
3926 
3927   llvm::Constant *Init = llvm::ConstantStruct::getAnon(Fields);
3928 
3929   SmallString<256> Name;
3930   llvm::raw_svector_ostream Out(Name);
3931   CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out);
3932   llvm::Module &M = CGM.getModule();
3933   llvm::GlobalVariable *OldGV = M.getNamedGlobal(Name);
3934   llvm::GlobalVariable *GV =
3935       new llvm::GlobalVariable(M, Init->getType(),
3936                                /*isConstant=*/true, Linkage, Init, Name);
3937 
3938   // Export the typeinfo in the same circumstances as the vtable is exported.
3939   auto GVDLLStorageClass = DLLStorageClass;
3940   if (CGM.getTarget().hasPS4DLLImportExport()) {
3941     if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) {
3942       const CXXRecordDecl *RD = cast<CXXRecordDecl>(RecordTy->getDecl());
3943       if (RD->hasAttr<DLLExportAttr>() ||
3944           CXXRecordAllNonInlineVirtualsHaveAttr<DLLExportAttr>(RD)) {
3945         GVDLLStorageClass = llvm::GlobalVariable::DLLExportStorageClass;
3946       }
3947     }
3948   }
3949 
3950   // If there's already an old global variable, replace it with the new one.
3951   if (OldGV) {
3952     GV->takeName(OldGV);
3953     llvm::Constant *NewPtr =
3954       llvm::ConstantExpr::getBitCast(GV, OldGV->getType());
3955     OldGV->replaceAllUsesWith(NewPtr);
3956     OldGV->eraseFromParent();
3957   }
3958 
3959   if (CGM.supportsCOMDAT() && GV->isWeakForLinker())
3960     GV->setComdat(M.getOrInsertComdat(GV->getName()));
3961 
3962   CharUnits Align = CGM.getContext().toCharUnitsFromBits(
3963       CGM.getTarget().getPointerAlign(CGM.GetGlobalVarAddressSpace(nullptr)));
3964   GV->setAlignment(Align.getAsAlign());
3965 
3966   // The Itanium ABI specifies that type_info objects must be globally
3967   // unique, with one exception: if the type is an incomplete class
3968   // type or a (possibly indirect) pointer to one.  That exception
3969   // affects the general case of comparing type_info objects produced
3970   // by the typeid operator, which is why the comparison operators on
3971   // std::type_info generally use the type_info name pointers instead
3972   // of the object addresses.  However, the language's built-in uses
3973   // of RTTI generally require class types to be complete, even when
3974   // manipulating pointers to those class types.  This allows the
3975   // implementation of dynamic_cast to rely on address equality tests,
3976   // which is much faster.
3977 
3978   // All of this is to say that it's important that both the type_info
3979   // object and the type_info name be uniqued when weakly emitted.
3980 
3981   TypeName->setVisibility(Visibility);
3982   CGM.setDSOLocal(TypeName);
3983 
3984   GV->setVisibility(Visibility);
3985   CGM.setDSOLocal(GV);
3986 
3987   TypeName->setDLLStorageClass(DLLStorageClass);
3988   GV->setDLLStorageClass(CGM.getTarget().hasPS4DLLImportExport()
3989                              ? GVDLLStorageClass
3990                              : DLLStorageClass);
3991 
3992   TypeName->setPartition(CGM.getCodeGenOpts().SymbolPartition);
3993   GV->setPartition(CGM.getCodeGenOpts().SymbolPartition);
3994 
3995   return GV;
3996 }
3997 
3998 /// BuildObjCObjectTypeInfo - Build the appropriate kind of type_info
3999 /// for the given Objective-C object type.
4000 void ItaniumRTTIBuilder::BuildObjCObjectTypeInfo(const ObjCObjectType *OT) {
4001   // Drop qualifiers.
4002   const Type *T = OT->getBaseType().getTypePtr();
4003   assert(isa<BuiltinType>(T) || isa<ObjCInterfaceType>(T));
4004 
4005   // The builtin types are abi::__class_type_infos and don't require
4006   // extra fields.
4007   if (isa<BuiltinType>(T)) return;
4008 
4009   ObjCInterfaceDecl *Class = cast<ObjCInterfaceType>(T)->getDecl();
4010   ObjCInterfaceDecl *Super = Class->getSuperClass();
4011 
4012   // Root classes are also __class_type_info.
4013   if (!Super) return;
4014 
4015   QualType SuperTy = CGM.getContext().getObjCInterfaceType(Super);
4016 
4017   // Everything else is single inheritance.
4018   llvm::Constant *BaseTypeInfo =
4019       ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(SuperTy);
4020   Fields.push_back(BaseTypeInfo);
4021 }
4022 
4023 /// BuildSIClassTypeInfo - Build an abi::__si_class_type_info, used for single
4024 /// inheritance, according to the Itanium C++ ABI, 2.95p6b.
4025 void ItaniumRTTIBuilder::BuildSIClassTypeInfo(const CXXRecordDecl *RD) {
4026   // Itanium C++ ABI 2.9.5p6b:
4027   // It adds to abi::__class_type_info a single member pointing to the
4028   // type_info structure for the base type,
4029   llvm::Constant *BaseTypeInfo =
4030     ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(RD->bases_begin()->getType());
4031   Fields.push_back(BaseTypeInfo);
4032 }
4033 
4034 namespace {
4035   /// SeenBases - Contains virtual and non-virtual bases seen when traversing
4036   /// a class hierarchy.
4037   struct SeenBases {
4038     llvm::SmallPtrSet<const CXXRecordDecl *, 16> NonVirtualBases;
4039     llvm::SmallPtrSet<const CXXRecordDecl *, 16> VirtualBases;
4040   };
4041 }
4042 
4043 /// ComputeVMIClassTypeInfoFlags - Compute the value of the flags member in
4044 /// abi::__vmi_class_type_info.
4045 ///
4046 static unsigned ComputeVMIClassTypeInfoFlags(const CXXBaseSpecifier *Base,
4047                                              SeenBases &Bases) {
4048 
4049   unsigned Flags = 0;
4050 
4051   auto *BaseDecl =
4052       cast<CXXRecordDecl>(Base->getType()->castAs<RecordType>()->getDecl());
4053 
4054   if (Base->isVirtual()) {
4055     // Mark the virtual base as seen.
4056     if (!Bases.VirtualBases.insert(BaseDecl).second) {
4057       // If this virtual base has been seen before, then the class is diamond
4058       // shaped.
4059       Flags |= ItaniumRTTIBuilder::VMI_DiamondShaped;
4060     } else {
4061       if (Bases.NonVirtualBases.count(BaseDecl))
4062         Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat;
4063     }
4064   } else {
4065     // Mark the non-virtual base as seen.
4066     if (!Bases.NonVirtualBases.insert(BaseDecl).second) {
4067       // If this non-virtual base has been seen before, then the class has non-
4068       // diamond shaped repeated inheritance.
4069       Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat;
4070     } else {
4071       if (Bases.VirtualBases.count(BaseDecl))
4072         Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat;
4073     }
4074   }
4075 
4076   // Walk all bases.
4077   for (const auto &I : BaseDecl->bases())
4078     Flags |= ComputeVMIClassTypeInfoFlags(&I, Bases);
4079 
4080   return Flags;
4081 }
4082 
4083 static unsigned ComputeVMIClassTypeInfoFlags(const CXXRecordDecl *RD) {
4084   unsigned Flags = 0;
4085   SeenBases Bases;
4086 
4087   // Walk all bases.
4088   for (const auto &I : RD->bases())
4089     Flags |= ComputeVMIClassTypeInfoFlags(&I, Bases);
4090 
4091   return Flags;
4092 }
4093 
4094 /// BuildVMIClassTypeInfo - Build an abi::__vmi_class_type_info, used for
4095 /// classes with bases that do not satisfy the abi::__si_class_type_info
4096 /// constraints, according ti the Itanium C++ ABI, 2.9.5p5c.
4097 void ItaniumRTTIBuilder::BuildVMIClassTypeInfo(const CXXRecordDecl *RD) {
4098   llvm::Type *UnsignedIntLTy =
4099     CGM.getTypes().ConvertType(CGM.getContext().UnsignedIntTy);
4100 
4101   // Itanium C++ ABI 2.9.5p6c:
4102   //   __flags is a word with flags describing details about the class
4103   //   structure, which may be referenced by using the __flags_masks
4104   //   enumeration. These flags refer to both direct and indirect bases.
4105   unsigned Flags = ComputeVMIClassTypeInfoFlags(RD);
4106   Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags));
4107 
4108   // Itanium C++ ABI 2.9.5p6c:
4109   //   __base_count is a word with the number of direct proper base class
4110   //   descriptions that follow.
4111   Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, RD->getNumBases()));
4112 
4113   if (!RD->getNumBases())
4114     return;
4115 
4116   // Now add the base class descriptions.
4117 
4118   // Itanium C++ ABI 2.9.5p6c:
4119   //   __base_info[] is an array of base class descriptions -- one for every
4120   //   direct proper base. Each description is of the type:
4121   //
4122   //   struct abi::__base_class_type_info {
4123   //   public:
4124   //     const __class_type_info *__base_type;
4125   //     long __offset_flags;
4126   //
4127   //     enum __offset_flags_masks {
4128   //       __virtual_mask = 0x1,
4129   //       __public_mask = 0x2,
4130   //       __offset_shift = 8
4131   //     };
4132   //   };
4133 
4134   // If we're in mingw and 'long' isn't wide enough for a pointer, use 'long
4135   // long' instead of 'long' for __offset_flags. libstdc++abi uses long long on
4136   // LLP64 platforms.
4137   // FIXME: Consider updating libc++abi to match, and extend this logic to all
4138   // LLP64 platforms.
4139   QualType OffsetFlagsTy = CGM.getContext().LongTy;
4140   const TargetInfo &TI = CGM.getContext().getTargetInfo();
4141   if (TI.getTriple().isOSCygMing() &&
4142       TI.getPointerWidth(LangAS::Default) > TI.getLongWidth())
4143     OffsetFlagsTy = CGM.getContext().LongLongTy;
4144   llvm::Type *OffsetFlagsLTy =
4145       CGM.getTypes().ConvertType(OffsetFlagsTy);
4146 
4147   for (const auto &Base : RD->bases()) {
4148     // The __base_type member points to the RTTI for the base type.
4149     Fields.push_back(ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(Base.getType()));
4150 
4151     auto *BaseDecl =
4152         cast<CXXRecordDecl>(Base.getType()->castAs<RecordType>()->getDecl());
4153 
4154     int64_t OffsetFlags = 0;
4155 
4156     // All but the lower 8 bits of __offset_flags are a signed offset.
4157     // For a non-virtual base, this is the offset in the object of the base
4158     // subobject. For a virtual base, this is the offset in the virtual table of
4159     // the virtual base offset for the virtual base referenced (negative).
4160     CharUnits Offset;
4161     if (Base.isVirtual())
4162       Offset =
4163         CGM.getItaniumVTableContext().getVirtualBaseOffsetOffset(RD, BaseDecl);
4164     else {
4165       const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
4166       Offset = Layout.getBaseClassOffset(BaseDecl);
4167     };
4168 
4169     OffsetFlags = uint64_t(Offset.getQuantity()) << 8;
4170 
4171     // The low-order byte of __offset_flags contains flags, as given by the
4172     // masks from the enumeration __offset_flags_masks.
4173     if (Base.isVirtual())
4174       OffsetFlags |= BCTI_Virtual;
4175     if (Base.getAccessSpecifier() == AS_public)
4176       OffsetFlags |= BCTI_Public;
4177 
4178     Fields.push_back(llvm::ConstantInt::get(OffsetFlagsLTy, OffsetFlags));
4179   }
4180 }
4181 
4182 /// Compute the flags for a __pbase_type_info, and remove the corresponding
4183 /// pieces from \p Type.
4184 static unsigned extractPBaseFlags(ASTContext &Ctx, QualType &Type) {
4185   unsigned Flags = 0;
4186 
4187   if (Type.isConstQualified())
4188     Flags |= ItaniumRTTIBuilder::PTI_Const;
4189   if (Type.isVolatileQualified())
4190     Flags |= ItaniumRTTIBuilder::PTI_Volatile;
4191   if (Type.isRestrictQualified())
4192     Flags |= ItaniumRTTIBuilder::PTI_Restrict;
4193   Type = Type.getUnqualifiedType();
4194 
4195   // Itanium C++ ABI 2.9.5p7:
4196   //   When the abi::__pbase_type_info is for a direct or indirect pointer to an
4197   //   incomplete class type, the incomplete target type flag is set.
4198   if (ContainsIncompleteClassType(Type))
4199     Flags |= ItaniumRTTIBuilder::PTI_Incomplete;
4200 
4201   if (auto *Proto = Type->getAs<FunctionProtoType>()) {
4202     if (Proto->isNothrow()) {
4203       Flags |= ItaniumRTTIBuilder::PTI_Noexcept;
4204       Type = Ctx.getFunctionTypeWithExceptionSpec(Type, EST_None);
4205     }
4206   }
4207 
4208   return Flags;
4209 }
4210 
4211 /// BuildPointerTypeInfo - Build an abi::__pointer_type_info struct,
4212 /// used for pointer types.
4213 void ItaniumRTTIBuilder::BuildPointerTypeInfo(QualType PointeeTy) {
4214   // Itanium C++ ABI 2.9.5p7:
4215   //   __flags is a flag word describing the cv-qualification and other
4216   //   attributes of the type pointed to
4217   unsigned Flags = extractPBaseFlags(CGM.getContext(), PointeeTy);
4218 
4219   llvm::Type *UnsignedIntLTy =
4220     CGM.getTypes().ConvertType(CGM.getContext().UnsignedIntTy);
4221   Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags));
4222 
4223   // Itanium C++ ABI 2.9.5p7:
4224   //  __pointee is a pointer to the std::type_info derivation for the
4225   //  unqualified type being pointed to.
4226   llvm::Constant *PointeeTypeInfo =
4227       ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(PointeeTy);
4228   Fields.push_back(PointeeTypeInfo);
4229 }
4230 
4231 /// BuildPointerToMemberTypeInfo - Build an abi::__pointer_to_member_type_info
4232 /// struct, used for member pointer types.
4233 void
4234 ItaniumRTTIBuilder::BuildPointerToMemberTypeInfo(const MemberPointerType *Ty) {
4235   QualType PointeeTy = Ty->getPointeeType();
4236 
4237   // Itanium C++ ABI 2.9.5p7:
4238   //   __flags is a flag word describing the cv-qualification and other
4239   //   attributes of the type pointed to.
4240   unsigned Flags = extractPBaseFlags(CGM.getContext(), PointeeTy);
4241 
4242   const RecordType *ClassType = cast<RecordType>(Ty->getClass());
4243   if (IsIncompleteClassType(ClassType))
4244     Flags |= PTI_ContainingClassIncomplete;
4245 
4246   llvm::Type *UnsignedIntLTy =
4247     CGM.getTypes().ConvertType(CGM.getContext().UnsignedIntTy);
4248   Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags));
4249 
4250   // Itanium C++ ABI 2.9.5p7:
4251   //   __pointee is a pointer to the std::type_info derivation for the
4252   //   unqualified type being pointed to.
4253   llvm::Constant *PointeeTypeInfo =
4254       ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(PointeeTy);
4255   Fields.push_back(PointeeTypeInfo);
4256 
4257   // Itanium C++ ABI 2.9.5p9:
4258   //   __context is a pointer to an abi::__class_type_info corresponding to the
4259   //   class type containing the member pointed to
4260   //   (e.g., the "A" in "int A::*").
4261   Fields.push_back(
4262       ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(QualType(ClassType, 0)));
4263 }
4264 
4265 llvm::Constant *ItaniumCXXABI::getAddrOfRTTIDescriptor(QualType Ty) {
4266   return ItaniumRTTIBuilder(*this).BuildTypeInfo(Ty);
4267 }
4268 
4269 void ItaniumCXXABI::EmitFundamentalRTTIDescriptors(const CXXRecordDecl *RD) {
4270   // Types added here must also be added to TypeInfoIsInStandardLibrary.
4271   QualType FundamentalTypes[] = {
4272       getContext().VoidTy,             getContext().NullPtrTy,
4273       getContext().BoolTy,             getContext().WCharTy,
4274       getContext().CharTy,             getContext().UnsignedCharTy,
4275       getContext().SignedCharTy,       getContext().ShortTy,
4276       getContext().UnsignedShortTy,    getContext().IntTy,
4277       getContext().UnsignedIntTy,      getContext().LongTy,
4278       getContext().UnsignedLongTy,     getContext().LongLongTy,
4279       getContext().UnsignedLongLongTy, getContext().Int128Ty,
4280       getContext().UnsignedInt128Ty,   getContext().HalfTy,
4281       getContext().FloatTy,            getContext().DoubleTy,
4282       getContext().LongDoubleTy,       getContext().Float128Ty,
4283       getContext().Char8Ty,            getContext().Char16Ty,
4284       getContext().Char32Ty
4285   };
4286   llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass =
4287       RD->hasAttr<DLLExportAttr>() || CGM.shouldMapVisibilityToDLLExport(RD)
4288           ? llvm::GlobalValue::DLLExportStorageClass
4289           : llvm::GlobalValue::DefaultStorageClass;
4290   llvm::GlobalValue::VisibilityTypes Visibility =
4291       CodeGenModule::GetLLVMVisibility(RD->getVisibility());
4292   for (const QualType &FundamentalType : FundamentalTypes) {
4293     QualType PointerType = getContext().getPointerType(FundamentalType);
4294     QualType PointerTypeConst = getContext().getPointerType(
4295         FundamentalType.withConst());
4296     for (QualType Type : {FundamentalType, PointerType, PointerTypeConst})
4297       ItaniumRTTIBuilder(*this).BuildTypeInfo(
4298           Type, llvm::GlobalValue::ExternalLinkage,
4299           Visibility, DLLStorageClass);
4300   }
4301 }
4302 
4303 /// What sort of uniqueness rules should we use for the RTTI for the
4304 /// given type?
4305 ItaniumCXXABI::RTTIUniquenessKind ItaniumCXXABI::classifyRTTIUniqueness(
4306     QualType CanTy, llvm::GlobalValue::LinkageTypes Linkage) const {
4307   if (shouldRTTIBeUnique())
4308     return RUK_Unique;
4309 
4310   // It's only necessary for linkonce_odr or weak_odr linkage.
4311   if (Linkage != llvm::GlobalValue::LinkOnceODRLinkage &&
4312       Linkage != llvm::GlobalValue::WeakODRLinkage)
4313     return RUK_Unique;
4314 
4315   // It's only necessary with default visibility.
4316   if (CanTy->getVisibility() != DefaultVisibility)
4317     return RUK_Unique;
4318 
4319   // If we're not required to publish this symbol, hide it.
4320   if (Linkage == llvm::GlobalValue::LinkOnceODRLinkage)
4321     return RUK_NonUniqueHidden;
4322 
4323   // If we're required to publish this symbol, as we might be under an
4324   // explicit instantiation, leave it with default visibility but
4325   // enable string-comparisons.
4326   assert(Linkage == llvm::GlobalValue::WeakODRLinkage);
4327   return RUK_NonUniqueVisible;
4328 }
4329 
4330 // Find out how to codegen the complete destructor and constructor
4331 namespace {
4332 enum class StructorCodegen { Emit, RAUW, Alias, COMDAT };
4333 }
4334 static StructorCodegen getCodegenToUse(CodeGenModule &CGM,
4335                                        const CXXMethodDecl *MD) {
4336   if (!CGM.getCodeGenOpts().CXXCtorDtorAliases)
4337     return StructorCodegen::Emit;
4338 
4339   // The complete and base structors are not equivalent if there are any virtual
4340   // bases, so emit separate functions.
4341   if (MD->getParent()->getNumVBases())
4342     return StructorCodegen::Emit;
4343 
4344   GlobalDecl AliasDecl;
4345   if (const auto *DD = dyn_cast<CXXDestructorDecl>(MD)) {
4346     AliasDecl = GlobalDecl(DD, Dtor_Complete);
4347   } else {
4348     const auto *CD = cast<CXXConstructorDecl>(MD);
4349     AliasDecl = GlobalDecl(CD, Ctor_Complete);
4350   }
4351   llvm::GlobalValue::LinkageTypes Linkage = CGM.getFunctionLinkage(AliasDecl);
4352 
4353   if (llvm::GlobalValue::isDiscardableIfUnused(Linkage))
4354     return StructorCodegen::RAUW;
4355 
4356   // FIXME: Should we allow available_externally aliases?
4357   if (!llvm::GlobalAlias::isValidLinkage(Linkage))
4358     return StructorCodegen::RAUW;
4359 
4360   if (llvm::GlobalValue::isWeakForLinker(Linkage)) {
4361     // Only ELF and wasm support COMDATs with arbitrary names (C5/D5).
4362     if (CGM.getTarget().getTriple().isOSBinFormatELF() ||
4363         CGM.getTarget().getTriple().isOSBinFormatWasm())
4364       return StructorCodegen::COMDAT;
4365     return StructorCodegen::Emit;
4366   }
4367 
4368   return StructorCodegen::Alias;
4369 }
4370 
4371 static void emitConstructorDestructorAlias(CodeGenModule &CGM,
4372                                            GlobalDecl AliasDecl,
4373                                            GlobalDecl TargetDecl) {
4374   llvm::GlobalValue::LinkageTypes Linkage = CGM.getFunctionLinkage(AliasDecl);
4375 
4376   StringRef MangledName = CGM.getMangledName(AliasDecl);
4377   llvm::GlobalValue *Entry = CGM.GetGlobalValue(MangledName);
4378   if (Entry && !Entry->isDeclaration())
4379     return;
4380 
4381   auto *Aliasee = cast<llvm::GlobalValue>(CGM.GetAddrOfGlobal(TargetDecl));
4382 
4383   // Create the alias with no name.
4384   auto *Alias = llvm::GlobalAlias::create(Linkage, "", Aliasee);
4385 
4386   // Constructors and destructors are always unnamed_addr.
4387   Alias->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
4388 
4389   // Switch any previous uses to the alias.
4390   if (Entry) {
4391     assert(Entry->getType() == Aliasee->getType() &&
4392            "declaration exists with different type");
4393     Alias->takeName(Entry);
4394     Entry->replaceAllUsesWith(Alias);
4395     Entry->eraseFromParent();
4396   } else {
4397     Alias->setName(MangledName);
4398   }
4399 
4400   // Finally, set up the alias with its proper name and attributes.
4401   CGM.SetCommonAttributes(AliasDecl, Alias);
4402 }
4403 
4404 void ItaniumCXXABI::emitCXXStructor(GlobalDecl GD) {
4405   auto *MD = cast<CXXMethodDecl>(GD.getDecl());
4406   auto *CD = dyn_cast<CXXConstructorDecl>(MD);
4407   const CXXDestructorDecl *DD = CD ? nullptr : cast<CXXDestructorDecl>(MD);
4408 
4409   StructorCodegen CGType = getCodegenToUse(CGM, MD);
4410 
4411   if (CD ? GD.getCtorType() == Ctor_Complete
4412          : GD.getDtorType() == Dtor_Complete) {
4413     GlobalDecl BaseDecl;
4414     if (CD)
4415       BaseDecl = GD.getWithCtorType(Ctor_Base);
4416     else
4417       BaseDecl = GD.getWithDtorType(Dtor_Base);
4418 
4419     if (CGType == StructorCodegen::Alias || CGType == StructorCodegen::COMDAT) {
4420       emitConstructorDestructorAlias(CGM, GD, BaseDecl);
4421       return;
4422     }
4423 
4424     if (CGType == StructorCodegen::RAUW) {
4425       StringRef MangledName = CGM.getMangledName(GD);
4426       auto *Aliasee = CGM.GetAddrOfGlobal(BaseDecl);
4427       CGM.addReplacement(MangledName, Aliasee);
4428       return;
4429     }
4430   }
4431 
4432   // The base destructor is equivalent to the base destructor of its
4433   // base class if there is exactly one non-virtual base class with a
4434   // non-trivial destructor, there are no fields with a non-trivial
4435   // destructor, and the body of the destructor is trivial.
4436   if (DD && GD.getDtorType() == Dtor_Base &&
4437       CGType != StructorCodegen::COMDAT &&
4438       !CGM.TryEmitBaseDestructorAsAlias(DD))
4439     return;
4440 
4441   // FIXME: The deleting destructor is equivalent to the selected operator
4442   // delete if:
4443   //  * either the delete is a destroying operator delete or the destructor
4444   //    would be trivial if it weren't virtual,
4445   //  * the conversion from the 'this' parameter to the first parameter of the
4446   //    destructor is equivalent to a bitcast,
4447   //  * the destructor does not have an implicit "this" return, and
4448   //  * the operator delete has the same calling convention and IR function type
4449   //    as the destructor.
4450   // In such cases we should try to emit the deleting dtor as an alias to the
4451   // selected 'operator delete'.
4452 
4453   llvm::Function *Fn = CGM.codegenCXXStructor(GD);
4454 
4455   if (CGType == StructorCodegen::COMDAT) {
4456     SmallString<256> Buffer;
4457     llvm::raw_svector_ostream Out(Buffer);
4458     if (DD)
4459       getMangleContext().mangleCXXDtorComdat(DD, Out);
4460     else
4461       getMangleContext().mangleCXXCtorComdat(CD, Out);
4462     llvm::Comdat *C = CGM.getModule().getOrInsertComdat(Out.str());
4463     Fn->setComdat(C);
4464   } else {
4465     CGM.maybeSetTrivialComdat(*MD, *Fn);
4466   }
4467 }
4468 
4469 static llvm::FunctionCallee getBeginCatchFn(CodeGenModule &CGM) {
4470   // void *__cxa_begin_catch(void*);
4471   llvm::FunctionType *FTy = llvm::FunctionType::get(
4472       CGM.Int8PtrTy, CGM.Int8PtrTy, /*isVarArg=*/false);
4473 
4474   return CGM.CreateRuntimeFunction(FTy, "__cxa_begin_catch");
4475 }
4476 
4477 static llvm::FunctionCallee getEndCatchFn(CodeGenModule &CGM) {
4478   // void __cxa_end_catch();
4479   llvm::FunctionType *FTy =
4480       llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
4481 
4482   return CGM.CreateRuntimeFunction(FTy, "__cxa_end_catch");
4483 }
4484 
4485 static llvm::FunctionCallee getGetExceptionPtrFn(CodeGenModule &CGM) {
4486   // void *__cxa_get_exception_ptr(void*);
4487   llvm::FunctionType *FTy = llvm::FunctionType::get(
4488       CGM.Int8PtrTy, CGM.Int8PtrTy, /*isVarArg=*/false);
4489 
4490   return CGM.CreateRuntimeFunction(FTy, "__cxa_get_exception_ptr");
4491 }
4492 
4493 namespace {
4494   /// A cleanup to call __cxa_end_catch.  In many cases, the caught
4495   /// exception type lets us state definitively that the thrown exception
4496   /// type does not have a destructor.  In particular:
4497   ///   - Catch-alls tell us nothing, so we have to conservatively
4498   ///     assume that the thrown exception might have a destructor.
4499   ///   - Catches by reference behave according to their base types.
4500   ///   - Catches of non-record types will only trigger for exceptions
4501   ///     of non-record types, which never have destructors.
4502   ///   - Catches of record types can trigger for arbitrary subclasses
4503   ///     of the caught type, so we have to assume the actual thrown
4504   ///     exception type might have a throwing destructor, even if the
4505   ///     caught type's destructor is trivial or nothrow.
4506   struct CallEndCatch final : EHScopeStack::Cleanup {
4507     CallEndCatch(bool MightThrow) : MightThrow(MightThrow) {}
4508     bool MightThrow;
4509 
4510     void Emit(CodeGenFunction &CGF, Flags flags) override {
4511       if (!MightThrow) {
4512         CGF.EmitNounwindRuntimeCall(getEndCatchFn(CGF.CGM));
4513         return;
4514       }
4515 
4516       CGF.EmitRuntimeCallOrInvoke(getEndCatchFn(CGF.CGM));
4517     }
4518   };
4519 }
4520 
4521 /// Emits a call to __cxa_begin_catch and enters a cleanup to call
4522 /// __cxa_end_catch.
4523 ///
4524 /// \param EndMightThrow - true if __cxa_end_catch might throw
4525 static llvm::Value *CallBeginCatch(CodeGenFunction &CGF,
4526                                    llvm::Value *Exn,
4527                                    bool EndMightThrow) {
4528   llvm::CallInst *call =
4529     CGF.EmitNounwindRuntimeCall(getBeginCatchFn(CGF.CGM), Exn);
4530 
4531   CGF.EHStack.pushCleanup<CallEndCatch>(NormalAndEHCleanup, EndMightThrow);
4532 
4533   return call;
4534 }
4535 
4536 /// A "special initializer" callback for initializing a catch
4537 /// parameter during catch initialization.
4538 static void InitCatchParam(CodeGenFunction &CGF,
4539                            const VarDecl &CatchParam,
4540                            Address ParamAddr,
4541                            SourceLocation Loc) {
4542   // Load the exception from where the landing pad saved it.
4543   llvm::Value *Exn = CGF.getExceptionFromSlot();
4544 
4545   CanQualType CatchType =
4546     CGF.CGM.getContext().getCanonicalType(CatchParam.getType());
4547   llvm::Type *LLVMCatchTy = CGF.ConvertTypeForMem(CatchType);
4548 
4549   // If we're catching by reference, we can just cast the object
4550   // pointer to the appropriate pointer.
4551   if (isa<ReferenceType>(CatchType)) {
4552     QualType CaughtType = cast<ReferenceType>(CatchType)->getPointeeType();
4553     bool EndCatchMightThrow = CaughtType->isRecordType();
4554 
4555     // __cxa_begin_catch returns the adjusted object pointer.
4556     llvm::Value *AdjustedExn = CallBeginCatch(CGF, Exn, EndCatchMightThrow);
4557 
4558     // We have no way to tell the personality function that we're
4559     // catching by reference, so if we're catching a pointer,
4560     // __cxa_begin_catch will actually return that pointer by value.
4561     if (const PointerType *PT = dyn_cast<PointerType>(CaughtType)) {
4562       QualType PointeeType = PT->getPointeeType();
4563 
4564       // When catching by reference, generally we should just ignore
4565       // this by-value pointer and use the exception object instead.
4566       if (!PointeeType->isRecordType()) {
4567 
4568         // Exn points to the struct _Unwind_Exception header, which
4569         // we have to skip past in order to reach the exception data.
4570         unsigned HeaderSize =
4571           CGF.CGM.getTargetCodeGenInfo().getSizeOfUnwindException();
4572         AdjustedExn =
4573             CGF.Builder.CreateConstGEP1_32(CGF.Int8Ty, Exn, HeaderSize);
4574 
4575       // However, if we're catching a pointer-to-record type that won't
4576       // work, because the personality function might have adjusted
4577       // the pointer.  There's actually no way for us to fully satisfy
4578       // the language/ABI contract here:  we can't use Exn because it
4579       // might have the wrong adjustment, but we can't use the by-value
4580       // pointer because it's off by a level of abstraction.
4581       //
4582       // The current solution is to dump the adjusted pointer into an
4583       // alloca, which breaks language semantics (because changing the
4584       // pointer doesn't change the exception) but at least works.
4585       // The better solution would be to filter out non-exact matches
4586       // and rethrow them, but this is tricky because the rethrow
4587       // really needs to be catchable by other sites at this landing
4588       // pad.  The best solution is to fix the personality function.
4589       } else {
4590         // Pull the pointer for the reference type off.
4591         llvm::Type *PtrTy = CGF.ConvertTypeForMem(CaughtType);
4592 
4593         // Create the temporary and write the adjusted pointer into it.
4594         Address ExnPtrTmp =
4595           CGF.CreateTempAlloca(PtrTy, CGF.getPointerAlign(), "exn.byref.tmp");
4596         llvm::Value *Casted = CGF.Builder.CreateBitCast(AdjustedExn, PtrTy);
4597         CGF.Builder.CreateStore(Casted, ExnPtrTmp);
4598 
4599         // Bind the reference to the temporary.
4600         AdjustedExn = ExnPtrTmp.getPointer();
4601       }
4602     }
4603 
4604     llvm::Value *ExnCast =
4605       CGF.Builder.CreateBitCast(AdjustedExn, LLVMCatchTy, "exn.byref");
4606     CGF.Builder.CreateStore(ExnCast, ParamAddr);
4607     return;
4608   }
4609 
4610   // Scalars and complexes.
4611   TypeEvaluationKind TEK = CGF.getEvaluationKind(CatchType);
4612   if (TEK != TEK_Aggregate) {
4613     llvm::Value *AdjustedExn = CallBeginCatch(CGF, Exn, false);
4614 
4615     // If the catch type is a pointer type, __cxa_begin_catch returns
4616     // the pointer by value.
4617     if (CatchType->hasPointerRepresentation()) {
4618       llvm::Value *CastExn =
4619         CGF.Builder.CreateBitCast(AdjustedExn, LLVMCatchTy, "exn.casted");
4620 
4621       switch (CatchType.getQualifiers().getObjCLifetime()) {
4622       case Qualifiers::OCL_Strong:
4623         CastExn = CGF.EmitARCRetainNonBlock(CastExn);
4624         [[fallthrough]];
4625 
4626       case Qualifiers::OCL_None:
4627       case Qualifiers::OCL_ExplicitNone:
4628       case Qualifiers::OCL_Autoreleasing:
4629         CGF.Builder.CreateStore(CastExn, ParamAddr);
4630         return;
4631 
4632       case Qualifiers::OCL_Weak:
4633         CGF.EmitARCInitWeak(ParamAddr, CastExn);
4634         return;
4635       }
4636       llvm_unreachable("bad ownership qualifier!");
4637     }
4638 
4639     // Otherwise, it returns a pointer into the exception object.
4640 
4641     LValue srcLV = CGF.MakeNaturalAlignAddrLValue(AdjustedExn, CatchType);
4642     LValue destLV = CGF.MakeAddrLValue(ParamAddr, CatchType);
4643     switch (TEK) {
4644     case TEK_Complex:
4645       CGF.EmitStoreOfComplex(CGF.EmitLoadOfComplex(srcLV, Loc), destLV,
4646                              /*init*/ true);
4647       return;
4648     case TEK_Scalar: {
4649       llvm::Value *ExnLoad = CGF.EmitLoadOfScalar(srcLV, Loc);
4650       CGF.EmitStoreOfScalar(ExnLoad, destLV, /*init*/ true);
4651       return;
4652     }
4653     case TEK_Aggregate:
4654       llvm_unreachable("evaluation kind filtered out!");
4655     }
4656     llvm_unreachable("bad evaluation kind");
4657   }
4658 
4659   assert(isa<RecordType>(CatchType) && "unexpected catch type!");
4660   auto catchRD = CatchType->getAsCXXRecordDecl();
4661   CharUnits caughtExnAlignment = CGF.CGM.getClassPointerAlignment(catchRD);
4662 
4663   llvm::Type *PtrTy =
4664       llvm::PointerType::getUnqual(CGF.getLLVMContext()); // addrspace 0 ok
4665 
4666   // Check for a copy expression.  If we don't have a copy expression,
4667   // that means a trivial copy is okay.
4668   const Expr *copyExpr = CatchParam.getInit();
4669   if (!copyExpr) {
4670     llvm::Value *rawAdjustedExn = CallBeginCatch(CGF, Exn, true);
4671     Address adjustedExn(CGF.Builder.CreateBitCast(rawAdjustedExn, PtrTy),
4672                         LLVMCatchTy, caughtExnAlignment);
4673     LValue Dest = CGF.MakeAddrLValue(ParamAddr, CatchType);
4674     LValue Src = CGF.MakeAddrLValue(adjustedExn, CatchType);
4675     CGF.EmitAggregateCopy(Dest, Src, CatchType, AggValueSlot::DoesNotOverlap);
4676     return;
4677   }
4678 
4679   // We have to call __cxa_get_exception_ptr to get the adjusted
4680   // pointer before copying.
4681   llvm::CallInst *rawAdjustedExn =
4682     CGF.EmitNounwindRuntimeCall(getGetExceptionPtrFn(CGF.CGM), Exn);
4683 
4684   // Cast that to the appropriate type.
4685   Address adjustedExn(CGF.Builder.CreateBitCast(rawAdjustedExn, PtrTy),
4686                       LLVMCatchTy, caughtExnAlignment);
4687 
4688   // The copy expression is defined in terms of an OpaqueValueExpr.
4689   // Find it and map it to the adjusted expression.
4690   CodeGenFunction::OpaqueValueMapping
4691     opaque(CGF, OpaqueValueExpr::findInCopyConstruct(copyExpr),
4692            CGF.MakeAddrLValue(adjustedExn, CatchParam.getType()));
4693 
4694   // Call the copy ctor in a terminate scope.
4695   CGF.EHStack.pushTerminate();
4696 
4697   // Perform the copy construction.
4698   CGF.EmitAggExpr(copyExpr,
4699                   AggValueSlot::forAddr(ParamAddr, Qualifiers(),
4700                                         AggValueSlot::IsNotDestructed,
4701                                         AggValueSlot::DoesNotNeedGCBarriers,
4702                                         AggValueSlot::IsNotAliased,
4703                                         AggValueSlot::DoesNotOverlap));
4704 
4705   // Leave the terminate scope.
4706   CGF.EHStack.popTerminate();
4707 
4708   // Undo the opaque value mapping.
4709   opaque.pop();
4710 
4711   // Finally we can call __cxa_begin_catch.
4712   CallBeginCatch(CGF, Exn, true);
4713 }
4714 
4715 /// Begins a catch statement by initializing the catch variable and
4716 /// calling __cxa_begin_catch.
4717 void ItaniumCXXABI::emitBeginCatch(CodeGenFunction &CGF,
4718                                    const CXXCatchStmt *S) {
4719   // We have to be very careful with the ordering of cleanups here:
4720   //   C++ [except.throw]p4:
4721   //     The destruction [of the exception temporary] occurs
4722   //     immediately after the destruction of the object declared in
4723   //     the exception-declaration in the handler.
4724   //
4725   // So the precise ordering is:
4726   //   1.  Construct catch variable.
4727   //   2.  __cxa_begin_catch
4728   //   3.  Enter __cxa_end_catch cleanup
4729   //   4.  Enter dtor cleanup
4730   //
4731   // We do this by using a slightly abnormal initialization process.
4732   // Delegation sequence:
4733   //   - ExitCXXTryStmt opens a RunCleanupsScope
4734   //     - EmitAutoVarAlloca creates the variable and debug info
4735   //       - InitCatchParam initializes the variable from the exception
4736   //       - CallBeginCatch calls __cxa_begin_catch
4737   //       - CallBeginCatch enters the __cxa_end_catch cleanup
4738   //     - EmitAutoVarCleanups enters the variable destructor cleanup
4739   //   - EmitCXXTryStmt emits the code for the catch body
4740   //   - EmitCXXTryStmt close the RunCleanupsScope
4741 
4742   VarDecl *CatchParam = S->getExceptionDecl();
4743   if (!CatchParam) {
4744     llvm::Value *Exn = CGF.getExceptionFromSlot();
4745     CallBeginCatch(CGF, Exn, true);
4746     return;
4747   }
4748 
4749   // Emit the local.
4750   CodeGenFunction::AutoVarEmission var = CGF.EmitAutoVarAlloca(*CatchParam);
4751   InitCatchParam(CGF, *CatchParam, var.getObjectAddress(CGF), S->getBeginLoc());
4752   CGF.EmitAutoVarCleanups(var);
4753 }
4754 
4755 /// Get or define the following function:
4756 ///   void @__clang_call_terminate(i8* %exn) nounwind noreturn
4757 /// This code is used only in C++.
4758 static llvm::FunctionCallee getClangCallTerminateFn(CodeGenModule &CGM) {
4759   ASTContext &C = CGM.getContext();
4760   const CGFunctionInfo &FI = CGM.getTypes().arrangeBuiltinFunctionDeclaration(
4761       C.VoidTy, {C.getPointerType(C.CharTy)});
4762   llvm::FunctionType *fnTy = CGM.getTypes().GetFunctionType(FI);
4763   llvm::FunctionCallee fnRef = CGM.CreateRuntimeFunction(
4764       fnTy, "__clang_call_terminate", llvm::AttributeList(), /*Local=*/true);
4765   llvm::Function *fn =
4766       cast<llvm::Function>(fnRef.getCallee()->stripPointerCasts());
4767   if (fn->empty()) {
4768     CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, fn, /*IsThunk=*/false);
4769     CGM.SetLLVMFunctionAttributesForDefinition(nullptr, fn);
4770     fn->setDoesNotThrow();
4771     fn->setDoesNotReturn();
4772 
4773     // What we really want is to massively penalize inlining without
4774     // forbidding it completely.  The difference between that and
4775     // 'noinline' is negligible.
4776     fn->addFnAttr(llvm::Attribute::NoInline);
4777 
4778     // Allow this function to be shared across translation units, but
4779     // we don't want it to turn into an exported symbol.
4780     fn->setLinkage(llvm::Function::LinkOnceODRLinkage);
4781     fn->setVisibility(llvm::Function::HiddenVisibility);
4782     if (CGM.supportsCOMDAT())
4783       fn->setComdat(CGM.getModule().getOrInsertComdat(fn->getName()));
4784 
4785     // Set up the function.
4786     llvm::BasicBlock *entry =
4787         llvm::BasicBlock::Create(CGM.getLLVMContext(), "", fn);
4788     CGBuilderTy builder(CGM, entry);
4789 
4790     // Pull the exception pointer out of the parameter list.
4791     llvm::Value *exn = &*fn->arg_begin();
4792 
4793     // Call __cxa_begin_catch(exn).
4794     llvm::CallInst *catchCall = builder.CreateCall(getBeginCatchFn(CGM), exn);
4795     catchCall->setDoesNotThrow();
4796     catchCall->setCallingConv(CGM.getRuntimeCC());
4797 
4798     // Call std::terminate().
4799     llvm::CallInst *termCall = builder.CreateCall(CGM.getTerminateFn());
4800     termCall->setDoesNotThrow();
4801     termCall->setDoesNotReturn();
4802     termCall->setCallingConv(CGM.getRuntimeCC());
4803 
4804     // std::terminate cannot return.
4805     builder.CreateUnreachable();
4806   }
4807   return fnRef;
4808 }
4809 
4810 llvm::CallInst *
4811 ItaniumCXXABI::emitTerminateForUnexpectedException(CodeGenFunction &CGF,
4812                                                    llvm::Value *Exn) {
4813   // In C++, we want to call __cxa_begin_catch() before terminating.
4814   if (Exn) {
4815     assert(CGF.CGM.getLangOpts().CPlusPlus);
4816     return CGF.EmitNounwindRuntimeCall(getClangCallTerminateFn(CGF.CGM), Exn);
4817   }
4818   return CGF.EmitNounwindRuntimeCall(CGF.CGM.getTerminateFn());
4819 }
4820 
4821 std::pair<llvm::Value *, const CXXRecordDecl *>
4822 ItaniumCXXABI::LoadVTablePtr(CodeGenFunction &CGF, Address This,
4823                              const CXXRecordDecl *RD) {
4824   return {CGF.GetVTablePtr(This, CGM.Int8PtrTy, RD), RD};
4825 }
4826 
4827 void WebAssemblyCXXABI::emitBeginCatch(CodeGenFunction &CGF,
4828                                        const CXXCatchStmt *C) {
4829   if (CGF.getTarget().hasFeature("exception-handling"))
4830     CGF.EHStack.pushCleanup<CatchRetScope>(
4831         NormalCleanup, cast<llvm::CatchPadInst>(CGF.CurrentFuncletPad));
4832   ItaniumCXXABI::emitBeginCatch(CGF, C);
4833 }
4834 
4835 llvm::CallInst *
4836 WebAssemblyCXXABI::emitTerminateForUnexpectedException(CodeGenFunction &CGF,
4837                                                        llvm::Value *Exn) {
4838   // Itanium ABI calls __clang_call_terminate(), which __cxa_begin_catch() on
4839   // the violating exception to mark it handled, but it is currently hard to do
4840   // with wasm EH instruction structure with catch/catch_all, we just call
4841   // std::terminate and ignore the violating exception as in CGCXXABI.
4842   // TODO Consider code transformation that makes calling __clang_call_terminate
4843   // possible.
4844   return CGCXXABI::emitTerminateForUnexpectedException(CGF, Exn);
4845 }
4846 
4847 /// Register a global destructor as best as we know how.
4848 void XLCXXABI::registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
4849                                   llvm::FunctionCallee Dtor,
4850                                   llvm::Constant *Addr) {
4851   if (D.getTLSKind() != VarDecl::TLS_None) {
4852     llvm::PointerType *PtrTy =
4853         llvm::PointerType::getUnqual(CGF.getLLVMContext());
4854 
4855     // extern "C" int __pt_atexit_np(int flags, int(*)(int,...), ...);
4856     llvm::FunctionType *AtExitTy =
4857         llvm::FunctionType::get(CGM.IntTy, {CGM.IntTy, PtrTy}, true);
4858 
4859     // Fetch the actual function.
4860     llvm::FunctionCallee AtExit =
4861         CGM.CreateRuntimeFunction(AtExitTy, "__pt_atexit_np");
4862 
4863     // Create __dtor function for the var decl.
4864     llvm::Function *DtorStub = CGF.createTLSAtExitStub(D, Dtor, Addr, AtExit);
4865 
4866     // Register above __dtor with atexit().
4867     // First param is flags and must be 0, second param is function ptr
4868     llvm::Value *NV = llvm::Constant::getNullValue(CGM.IntTy);
4869     CGF.EmitNounwindRuntimeCall(AtExit, {NV, DtorStub});
4870 
4871     // Cannot unregister TLS __dtor so done
4872     return;
4873   }
4874 
4875   // Create __dtor function for the var decl.
4876   llvm::Function *DtorStub = CGF.createAtExitStub(D, Dtor, Addr);
4877 
4878   // Register above __dtor with atexit().
4879   CGF.registerGlobalDtorWithAtExit(DtorStub);
4880 
4881   // Emit __finalize function to unregister __dtor and (as appropriate) call
4882   // __dtor.
4883   emitCXXStermFinalizer(D, DtorStub, Addr);
4884 }
4885 
4886 void XLCXXABI::emitCXXStermFinalizer(const VarDecl &D, llvm::Function *dtorStub,
4887                                      llvm::Constant *addr) {
4888   llvm::FunctionType *FTy = llvm::FunctionType::get(CGM.VoidTy, false);
4889   SmallString<256> FnName;
4890   {
4891     llvm::raw_svector_ostream Out(FnName);
4892     getMangleContext().mangleDynamicStermFinalizer(&D, Out);
4893   }
4894 
4895   // Create the finalization action associated with a variable.
4896   const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
4897   llvm::Function *StermFinalizer = CGM.CreateGlobalInitOrCleanUpFunction(
4898       FTy, FnName.str(), FI, D.getLocation());
4899 
4900   CodeGenFunction CGF(CGM);
4901 
4902   CGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, StermFinalizer, FI,
4903                     FunctionArgList(), D.getLocation(),
4904                     D.getInit()->getExprLoc());
4905 
4906   // The unatexit subroutine unregisters __dtor functions that were previously
4907   // registered by the atexit subroutine. If the referenced function is found,
4908   // the unatexit returns a value of 0, meaning that the cleanup is still
4909   // pending (and we should call the __dtor function).
4910   llvm::Value *V = CGF.unregisterGlobalDtorWithUnAtExit(dtorStub);
4911 
4912   llvm::Value *NeedsDestruct = CGF.Builder.CreateIsNull(V, "needs_destruct");
4913 
4914   llvm::BasicBlock *DestructCallBlock = CGF.createBasicBlock("destruct.call");
4915   llvm::BasicBlock *EndBlock = CGF.createBasicBlock("destruct.end");
4916 
4917   // Check if unatexit returns a value of 0. If it does, jump to
4918   // DestructCallBlock, otherwise jump to EndBlock directly.
4919   CGF.Builder.CreateCondBr(NeedsDestruct, DestructCallBlock, EndBlock);
4920 
4921   CGF.EmitBlock(DestructCallBlock);
4922 
4923   // Emit the call to dtorStub.
4924   llvm::CallInst *CI = CGF.Builder.CreateCall(dtorStub);
4925 
4926   // Make sure the call and the callee agree on calling convention.
4927   CI->setCallingConv(dtorStub->getCallingConv());
4928 
4929   CGF.EmitBlock(EndBlock);
4930 
4931   CGF.FinishFunction();
4932 
4933   if (auto *IPA = D.getAttr<InitPriorityAttr>()) {
4934     CGM.AddCXXPrioritizedStermFinalizerEntry(StermFinalizer,
4935                                              IPA->getPriority());
4936   } else if (isTemplateInstantiation(D.getTemplateSpecializationKind()) ||
4937              getContext().GetGVALinkageForVariable(&D) == GVA_DiscardableODR) {
4938     // According to C++ [basic.start.init]p2, class template static data
4939     // members (i.e., implicitly or explicitly instantiated specializations)
4940     // have unordered initialization. As a consequence, we can put them into
4941     // their own llvm.global_dtors entry.
4942     CGM.AddCXXStermFinalizerToGlobalDtor(StermFinalizer, 65535);
4943   } else {
4944     CGM.AddCXXStermFinalizerEntry(StermFinalizer);
4945   }
4946 }
4947