1 //===------- ItaniumCXXABI.cpp - Emit LLVM Code from ASTs for a Module ----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This provides C++ code generation targeting the Itanium C++ ABI.  The class
10 // in this file generates structures that follow the Itanium C++ ABI, which is
11 // documented at:
12 //  https://itanium-cxx-abi.github.io/cxx-abi/abi.html
13 //  https://itanium-cxx-abi.github.io/cxx-abi/abi-eh.html
14 //
15 // It also supports the closely-related ARM ABI, documented at:
16 // https://developer.arm.com/documentation/ihi0041/g/
17 //
18 //===----------------------------------------------------------------------===//
19 
20 #include "CGCXXABI.h"
21 #include "CGCleanup.h"
22 #include "CGRecordLayout.h"
23 #include "CGVTables.h"
24 #include "CodeGenFunction.h"
25 #include "CodeGenModule.h"
26 #include "TargetInfo.h"
27 #include "clang/AST/Attr.h"
28 #include "clang/AST/Mangle.h"
29 #include "clang/AST/StmtCXX.h"
30 #include "clang/AST/Type.h"
31 #include "clang/CodeGen/ConstantInitBuilder.h"
32 #include "llvm/IR/DataLayout.h"
33 #include "llvm/IR/GlobalValue.h"
34 #include "llvm/IR/Instructions.h"
35 #include "llvm/IR/Intrinsics.h"
36 #include "llvm/IR/Value.h"
37 #include "llvm/Support/ScopedPrinter.h"
38 
39 using namespace clang;
40 using namespace CodeGen;
41 
42 namespace {
43 class ItaniumCXXABI : public CodeGen::CGCXXABI {
44   /// VTables - All the vtables which have been defined.
45   llvm::DenseMap<const CXXRecordDecl *, llvm::GlobalVariable *> VTables;
46 
47   /// All the thread wrapper functions that have been used.
48   llvm::SmallVector<std::pair<const VarDecl *, llvm::Function *>, 8>
49       ThreadWrappers;
50 
51 protected:
52   bool UseARMMethodPtrABI;
53   bool UseARMGuardVarABI;
54   bool Use32BitVTableOffsetABI;
55 
56   ItaniumMangleContext &getMangleContext() {
57     return cast<ItaniumMangleContext>(CodeGen::CGCXXABI::getMangleContext());
58   }
59 
60 public:
61   ItaniumCXXABI(CodeGen::CodeGenModule &CGM,
62                 bool UseARMMethodPtrABI = false,
63                 bool UseARMGuardVarABI = false) :
64     CGCXXABI(CGM), UseARMMethodPtrABI(UseARMMethodPtrABI),
65     UseARMGuardVarABI(UseARMGuardVarABI),
66     Use32BitVTableOffsetABI(false) { }
67 
68   bool classifyReturnType(CGFunctionInfo &FI) const override;
69 
70   RecordArgABI getRecordArgABI(const CXXRecordDecl *RD) const override {
71     // If C++ prohibits us from making a copy, pass by address.
72     if (!RD->canPassInRegisters())
73       return RAA_Indirect;
74     return RAA_Default;
75   }
76 
77   bool isThisCompleteObject(GlobalDecl GD) const override {
78     // The Itanium ABI has separate complete-object vs.  base-object
79     // variants of both constructors and destructors.
80     if (isa<CXXDestructorDecl>(GD.getDecl())) {
81       switch (GD.getDtorType()) {
82       case Dtor_Complete:
83       case Dtor_Deleting:
84         return true;
85 
86       case Dtor_Base:
87         return false;
88 
89       case Dtor_Comdat:
90         llvm_unreachable("emitting dtor comdat as function?");
91       }
92       llvm_unreachable("bad dtor kind");
93     }
94     if (isa<CXXConstructorDecl>(GD.getDecl())) {
95       switch (GD.getCtorType()) {
96       case Ctor_Complete:
97         return true;
98 
99       case Ctor_Base:
100         return false;
101 
102       case Ctor_CopyingClosure:
103       case Ctor_DefaultClosure:
104         llvm_unreachable("closure ctors in Itanium ABI?");
105 
106       case Ctor_Comdat:
107         llvm_unreachable("emitting ctor comdat as function?");
108       }
109       llvm_unreachable("bad dtor kind");
110     }
111 
112     // No other kinds.
113     return false;
114   }
115 
116   bool isZeroInitializable(const MemberPointerType *MPT) override;
117 
118   llvm::Type *ConvertMemberPointerType(const MemberPointerType *MPT) override;
119 
120   CGCallee
121     EmitLoadOfMemberFunctionPointer(CodeGenFunction &CGF,
122                                     const Expr *E,
123                                     Address This,
124                                     llvm::Value *&ThisPtrForCall,
125                                     llvm::Value *MemFnPtr,
126                                     const MemberPointerType *MPT) override;
127 
128   llvm::Value *
129     EmitMemberDataPointerAddress(CodeGenFunction &CGF, const Expr *E,
130                                  Address Base,
131                                  llvm::Value *MemPtr,
132                                  const MemberPointerType *MPT) override;
133 
134   llvm::Value *EmitMemberPointerConversion(CodeGenFunction &CGF,
135                                            const CastExpr *E,
136                                            llvm::Value *Src) override;
137   llvm::Constant *EmitMemberPointerConversion(const CastExpr *E,
138                                               llvm::Constant *Src) override;
139 
140   llvm::Constant *EmitNullMemberPointer(const MemberPointerType *MPT) override;
141 
142   llvm::Constant *EmitMemberFunctionPointer(const CXXMethodDecl *MD) override;
143   llvm::Constant *EmitMemberDataPointer(const MemberPointerType *MPT,
144                                         CharUnits offset) override;
145   llvm::Constant *EmitMemberPointer(const APValue &MP, QualType MPT) override;
146   llvm::Constant *BuildMemberPointer(const CXXMethodDecl *MD,
147                                      CharUnits ThisAdjustment);
148 
149   llvm::Value *EmitMemberPointerComparison(CodeGenFunction &CGF,
150                                            llvm::Value *L, llvm::Value *R,
151                                            const MemberPointerType *MPT,
152                                            bool Inequality) override;
153 
154   llvm::Value *EmitMemberPointerIsNotNull(CodeGenFunction &CGF,
155                                          llvm::Value *Addr,
156                                          const MemberPointerType *MPT) override;
157 
158   void emitVirtualObjectDelete(CodeGenFunction &CGF, const CXXDeleteExpr *DE,
159                                Address Ptr, QualType ElementType,
160                                const CXXDestructorDecl *Dtor) override;
161 
162   void emitRethrow(CodeGenFunction &CGF, bool isNoReturn) override;
163   void emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) override;
164 
165   void emitBeginCatch(CodeGenFunction &CGF, const CXXCatchStmt *C) override;
166 
167   llvm::CallInst *
168   emitTerminateForUnexpectedException(CodeGenFunction &CGF,
169                                       llvm::Value *Exn) override;
170 
171   void EmitFundamentalRTTIDescriptors(const CXXRecordDecl *RD);
172   llvm::Constant *getAddrOfRTTIDescriptor(QualType Ty) override;
173   CatchTypeInfo
174   getAddrOfCXXCatchHandlerType(QualType Ty,
175                                QualType CatchHandlerType) override {
176     return CatchTypeInfo{getAddrOfRTTIDescriptor(Ty), 0};
177   }
178 
179   bool shouldTypeidBeNullChecked(bool IsDeref, QualType SrcRecordTy) override;
180   void EmitBadTypeidCall(CodeGenFunction &CGF) override;
181   llvm::Value *EmitTypeid(CodeGenFunction &CGF, QualType SrcRecordTy,
182                           Address ThisPtr,
183                           llvm::Type *StdTypeInfoPtrTy) override;
184 
185   bool shouldDynamicCastCallBeNullChecked(bool SrcIsPtr,
186                                           QualType SrcRecordTy) override;
187 
188   llvm::Value *EmitDynamicCastCall(CodeGenFunction &CGF, Address Value,
189                                    QualType SrcRecordTy, QualType DestTy,
190                                    QualType DestRecordTy,
191                                    llvm::BasicBlock *CastEnd) override;
192 
193   llvm::Value *EmitDynamicCastToVoid(CodeGenFunction &CGF, Address Value,
194                                      QualType SrcRecordTy,
195                                      QualType DestTy) override;
196 
197   bool EmitBadCastCall(CodeGenFunction &CGF) override;
198 
199   llvm::Value *
200     GetVirtualBaseClassOffset(CodeGenFunction &CGF, Address This,
201                               const CXXRecordDecl *ClassDecl,
202                               const CXXRecordDecl *BaseClassDecl) override;
203 
204   void EmitCXXConstructors(const CXXConstructorDecl *D) override;
205 
206   AddedStructorArgCounts
207   buildStructorSignature(GlobalDecl GD,
208                          SmallVectorImpl<CanQualType> &ArgTys) override;
209 
210   bool useThunkForDtorVariant(const CXXDestructorDecl *Dtor,
211                               CXXDtorType DT) const override {
212     // Itanium does not emit any destructor variant as an inline thunk.
213     // Delegating may occur as an optimization, but all variants are either
214     // emitted with external linkage or as linkonce if they are inline and used.
215     return false;
216   }
217 
218   void EmitCXXDestructors(const CXXDestructorDecl *D) override;
219 
220   void addImplicitStructorParams(CodeGenFunction &CGF, QualType &ResTy,
221                                  FunctionArgList &Params) override;
222 
223   void EmitInstanceFunctionProlog(CodeGenFunction &CGF) override;
224 
225   AddedStructorArgs getImplicitConstructorArgs(CodeGenFunction &CGF,
226                                                const CXXConstructorDecl *D,
227                                                CXXCtorType Type,
228                                                bool ForVirtualBase,
229                                                bool Delegating) override;
230 
231   llvm::Value *getCXXDestructorImplicitParam(CodeGenFunction &CGF,
232                                              const CXXDestructorDecl *DD,
233                                              CXXDtorType Type,
234                                              bool ForVirtualBase,
235                                              bool Delegating) override;
236 
237   void EmitDestructorCall(CodeGenFunction &CGF, const CXXDestructorDecl *DD,
238                           CXXDtorType Type, bool ForVirtualBase,
239                           bool Delegating, Address This,
240                           QualType ThisTy) override;
241 
242   void emitVTableDefinitions(CodeGenVTables &CGVT,
243                              const CXXRecordDecl *RD) override;
244 
245   bool isVirtualOffsetNeededForVTableField(CodeGenFunction &CGF,
246                                            CodeGenFunction::VPtr Vptr) override;
247 
248   bool doStructorsInitializeVPtrs(const CXXRecordDecl *VTableClass) override {
249     return true;
250   }
251 
252   llvm::Constant *
253   getVTableAddressPoint(BaseSubobject Base,
254                         const CXXRecordDecl *VTableClass) override;
255 
256   llvm::Value *getVTableAddressPointInStructor(
257       CodeGenFunction &CGF, const CXXRecordDecl *VTableClass,
258       BaseSubobject Base, const CXXRecordDecl *NearestVBase) override;
259 
260   llvm::Value *getVTableAddressPointInStructorWithVTT(
261       CodeGenFunction &CGF, const CXXRecordDecl *VTableClass,
262       BaseSubobject Base, const CXXRecordDecl *NearestVBase);
263 
264   llvm::Constant *
265   getVTableAddressPointForConstExpr(BaseSubobject Base,
266                                     const CXXRecordDecl *VTableClass) override;
267 
268   llvm::GlobalVariable *getAddrOfVTable(const CXXRecordDecl *RD,
269                                         CharUnits VPtrOffset) override;
270 
271   CGCallee getVirtualFunctionPointer(CodeGenFunction &CGF, GlobalDecl GD,
272                                      Address This, llvm::Type *Ty,
273                                      SourceLocation Loc) override;
274 
275   llvm::Value *EmitVirtualDestructorCall(CodeGenFunction &CGF,
276                                          const CXXDestructorDecl *Dtor,
277                                          CXXDtorType DtorType, Address This,
278                                          DeleteOrMemberCallExpr E) override;
279 
280   void emitVirtualInheritanceTables(const CXXRecordDecl *RD) override;
281 
282   bool canSpeculativelyEmitVTable(const CXXRecordDecl *RD) const override;
283   bool canSpeculativelyEmitVTableAsBaseClass(const CXXRecordDecl *RD) const;
284 
285   void setThunkLinkage(llvm::Function *Thunk, bool ForVTable, GlobalDecl GD,
286                        bool ReturnAdjustment) override {
287     // Allow inlining of thunks by emitting them with available_externally
288     // linkage together with vtables when needed.
289     if (ForVTable && !Thunk->hasLocalLinkage())
290       Thunk->setLinkage(llvm::GlobalValue::AvailableExternallyLinkage);
291     CGM.setGVProperties(Thunk, GD);
292   }
293 
294   bool exportThunk() override { return true; }
295 
296   llvm::Value *performThisAdjustment(CodeGenFunction &CGF, Address This,
297                                      const ThisAdjustment &TA) override;
298 
299   llvm::Value *performReturnAdjustment(CodeGenFunction &CGF, Address Ret,
300                                        const ReturnAdjustment &RA) override;
301 
302   size_t getSrcArgforCopyCtor(const CXXConstructorDecl *,
303                               FunctionArgList &Args) const override {
304     assert(!Args.empty() && "expected the arglist to not be empty!");
305     return Args.size() - 1;
306   }
307 
308   StringRef GetPureVirtualCallName() override { return "__cxa_pure_virtual"; }
309   StringRef GetDeletedVirtualCallName() override
310     { return "__cxa_deleted_virtual"; }
311 
312   CharUnits getArrayCookieSizeImpl(QualType elementType) override;
313   Address InitializeArrayCookie(CodeGenFunction &CGF,
314                                 Address NewPtr,
315                                 llvm::Value *NumElements,
316                                 const CXXNewExpr *expr,
317                                 QualType ElementType) override;
318   llvm::Value *readArrayCookieImpl(CodeGenFunction &CGF,
319                                    Address allocPtr,
320                                    CharUnits cookieSize) override;
321 
322   void EmitGuardedInit(CodeGenFunction &CGF, const VarDecl &D,
323                        llvm::GlobalVariable *DeclPtr,
324                        bool PerformInit) override;
325   void registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
326                           llvm::FunctionCallee dtor,
327                           llvm::Constant *addr) override;
328 
329   llvm::Function *getOrCreateThreadLocalWrapper(const VarDecl *VD,
330                                                 llvm::Value *Val);
331   void EmitThreadLocalInitFuncs(
332       CodeGenModule &CGM,
333       ArrayRef<const VarDecl *> CXXThreadLocals,
334       ArrayRef<llvm::Function *> CXXThreadLocalInits,
335       ArrayRef<const VarDecl *> CXXThreadLocalInitVars) override;
336 
337   bool usesThreadWrapperFunction(const VarDecl *VD) const override {
338     return !isEmittedWithConstantInitializer(VD) ||
339            mayNeedDestruction(VD);
340   }
341   LValue EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF, const VarDecl *VD,
342                                       QualType LValType) override;
343 
344   bool NeedsVTTParameter(GlobalDecl GD) override;
345 
346   /**************************** RTTI Uniqueness ******************************/
347 
348 protected:
349   /// Returns true if the ABI requires RTTI type_info objects to be unique
350   /// across a program.
351   virtual bool shouldRTTIBeUnique() const { return true; }
352 
353 public:
354   /// What sort of unique-RTTI behavior should we use?
355   enum RTTIUniquenessKind {
356     /// We are guaranteeing, or need to guarantee, that the RTTI string
357     /// is unique.
358     RUK_Unique,
359 
360     /// We are not guaranteeing uniqueness for the RTTI string, so we
361     /// can demote to hidden visibility but must use string comparisons.
362     RUK_NonUniqueHidden,
363 
364     /// We are not guaranteeing uniqueness for the RTTI string, so we
365     /// have to use string comparisons, but we also have to emit it with
366     /// non-hidden visibility.
367     RUK_NonUniqueVisible
368   };
369 
370   /// Return the required visibility status for the given type and linkage in
371   /// the current ABI.
372   RTTIUniquenessKind
373   classifyRTTIUniqueness(QualType CanTy,
374                          llvm::GlobalValue::LinkageTypes Linkage) const;
375   friend class ItaniumRTTIBuilder;
376 
377   void emitCXXStructor(GlobalDecl GD) override;
378 
379   std::pair<llvm::Value *, const CXXRecordDecl *>
380   LoadVTablePtr(CodeGenFunction &CGF, Address This,
381                 const CXXRecordDecl *RD) override;
382 
383  private:
384    bool hasAnyUnusedVirtualInlineFunction(const CXXRecordDecl *RD) const {
385      const auto &VtableLayout =
386          CGM.getItaniumVTableContext().getVTableLayout(RD);
387 
388      for (const auto &VtableComponent : VtableLayout.vtable_components()) {
389        // Skip empty slot.
390        if (!VtableComponent.isUsedFunctionPointerKind())
391          continue;
392 
393        const CXXMethodDecl *Method = VtableComponent.getFunctionDecl();
394        if (!Method->getCanonicalDecl()->isInlined())
395          continue;
396 
397        StringRef Name = CGM.getMangledName(VtableComponent.getGlobalDecl());
398        auto *Entry = CGM.GetGlobalValue(Name);
399        // This checks if virtual inline function has already been emitted.
400        // Note that it is possible that this inline function would be emitted
401        // after trying to emit vtable speculatively. Because of this we do
402        // an extra pass after emitting all deferred vtables to find and emit
403        // these vtables opportunistically.
404        if (!Entry || Entry->isDeclaration())
405          return true;
406      }
407      return false;
408   }
409 
410   bool isVTableHidden(const CXXRecordDecl *RD) const {
411     const auto &VtableLayout =
412             CGM.getItaniumVTableContext().getVTableLayout(RD);
413 
414     for (const auto &VtableComponent : VtableLayout.vtable_components()) {
415       if (VtableComponent.isRTTIKind()) {
416         const CXXRecordDecl *RTTIDecl = VtableComponent.getRTTIDecl();
417         if (RTTIDecl->getVisibility() == Visibility::HiddenVisibility)
418           return true;
419       } else if (VtableComponent.isUsedFunctionPointerKind()) {
420         const CXXMethodDecl *Method = VtableComponent.getFunctionDecl();
421         if (Method->getVisibility() == Visibility::HiddenVisibility &&
422             !Method->isDefined())
423           return true;
424       }
425     }
426     return false;
427   }
428 };
429 
430 class ARMCXXABI : public ItaniumCXXABI {
431 public:
432   ARMCXXABI(CodeGen::CodeGenModule &CGM) :
433     ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true,
434                   /*UseARMGuardVarABI=*/true) {}
435 
436   bool constructorsAndDestructorsReturnThis() const override { return true; }
437 
438   void EmitReturnFromThunk(CodeGenFunction &CGF, RValue RV,
439                            QualType ResTy) override;
440 
441   CharUnits getArrayCookieSizeImpl(QualType elementType) override;
442   Address InitializeArrayCookie(CodeGenFunction &CGF,
443                                 Address NewPtr,
444                                 llvm::Value *NumElements,
445                                 const CXXNewExpr *expr,
446                                 QualType ElementType) override;
447   llvm::Value *readArrayCookieImpl(CodeGenFunction &CGF, Address allocPtr,
448                                    CharUnits cookieSize) override;
449 };
450 
451 class AppleARM64CXXABI : public ARMCXXABI {
452 public:
453   AppleARM64CXXABI(CodeGen::CodeGenModule &CGM) : ARMCXXABI(CGM) {
454     Use32BitVTableOffsetABI = true;
455   }
456 
457   // ARM64 libraries are prepared for non-unique RTTI.
458   bool shouldRTTIBeUnique() const override { return false; }
459 };
460 
461 class FuchsiaCXXABI final : public ItaniumCXXABI {
462 public:
463   explicit FuchsiaCXXABI(CodeGen::CodeGenModule &CGM)
464       : ItaniumCXXABI(CGM) {}
465 
466 private:
467   bool constructorsAndDestructorsReturnThis() const override { return true; }
468 };
469 
470 class WebAssemblyCXXABI final : public ItaniumCXXABI {
471 public:
472   explicit WebAssemblyCXXABI(CodeGen::CodeGenModule &CGM)
473       : ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true,
474                       /*UseARMGuardVarABI=*/true) {}
475   void emitBeginCatch(CodeGenFunction &CGF, const CXXCatchStmt *C) override;
476   llvm::CallInst *
477   emitTerminateForUnexpectedException(CodeGenFunction &CGF,
478                                       llvm::Value *Exn) override;
479 
480 private:
481   bool constructorsAndDestructorsReturnThis() const override { return true; }
482   bool canCallMismatchedFunctionType() const override { return false; }
483 };
484 
485 class XLCXXABI final : public ItaniumCXXABI {
486 public:
487   explicit XLCXXABI(CodeGen::CodeGenModule &CGM)
488       : ItaniumCXXABI(CGM) {}
489 
490   void registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
491                           llvm::FunctionCallee dtor,
492                           llvm::Constant *addr) override;
493 
494   bool useSinitAndSterm() const override { return true; }
495 
496 private:
497   void emitCXXStermFinalizer(const VarDecl &D, llvm::Function *dtorStub,
498                              llvm::Constant *addr);
499 };
500 }
501 
502 CodeGen::CGCXXABI *CodeGen::CreateItaniumCXXABI(CodeGenModule &CGM) {
503   switch (CGM.getContext().getCXXABIKind()) {
504   // For IR-generation purposes, there's no significant difference
505   // between the ARM and iOS ABIs.
506   case TargetCXXABI::GenericARM:
507   case TargetCXXABI::iOS:
508   case TargetCXXABI::WatchOS:
509     return new ARMCXXABI(CGM);
510 
511   case TargetCXXABI::AppleARM64:
512     return new AppleARM64CXXABI(CGM);
513 
514   case TargetCXXABI::Fuchsia:
515     return new FuchsiaCXXABI(CGM);
516 
517   // Note that AArch64 uses the generic ItaniumCXXABI class since it doesn't
518   // include the other 32-bit ARM oddities: constructor/destructor return values
519   // and array cookies.
520   case TargetCXXABI::GenericAArch64:
521     return new ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true,
522                              /*UseARMGuardVarABI=*/true);
523 
524   case TargetCXXABI::GenericMIPS:
525     return new ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true);
526 
527   case TargetCXXABI::WebAssembly:
528     return new WebAssemblyCXXABI(CGM);
529 
530   case TargetCXXABI::XL:
531     return new XLCXXABI(CGM);
532 
533   case TargetCXXABI::GenericItanium:
534     if (CGM.getContext().getTargetInfo().getTriple().getArch()
535         == llvm::Triple::le32) {
536       // For PNaCl, use ARM-style method pointers so that PNaCl code
537       // does not assume anything about the alignment of function
538       // pointers.
539       return new ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true);
540     }
541     return new ItaniumCXXABI(CGM);
542 
543   case TargetCXXABI::Microsoft:
544     llvm_unreachable("Microsoft ABI is not Itanium-based");
545   }
546   llvm_unreachable("bad ABI kind");
547 }
548 
549 llvm::Type *
550 ItaniumCXXABI::ConvertMemberPointerType(const MemberPointerType *MPT) {
551   if (MPT->isMemberDataPointer())
552     return CGM.PtrDiffTy;
553   return llvm::StructType::get(CGM.PtrDiffTy, CGM.PtrDiffTy);
554 }
555 
556 /// In the Itanium and ARM ABIs, method pointers have the form:
557 ///   struct { ptrdiff_t ptr; ptrdiff_t adj; } memptr;
558 ///
559 /// In the Itanium ABI:
560 ///  - method pointers are virtual if (memptr.ptr & 1) is nonzero
561 ///  - the this-adjustment is (memptr.adj)
562 ///  - the virtual offset is (memptr.ptr - 1)
563 ///
564 /// In the ARM ABI:
565 ///  - method pointers are virtual if (memptr.adj & 1) is nonzero
566 ///  - the this-adjustment is (memptr.adj >> 1)
567 ///  - the virtual offset is (memptr.ptr)
568 /// ARM uses 'adj' for the virtual flag because Thumb functions
569 /// may be only single-byte aligned.
570 ///
571 /// If the member is virtual, the adjusted 'this' pointer points
572 /// to a vtable pointer from which the virtual offset is applied.
573 ///
574 /// If the member is non-virtual, memptr.ptr is the address of
575 /// the function to call.
576 CGCallee ItaniumCXXABI::EmitLoadOfMemberFunctionPointer(
577     CodeGenFunction &CGF, const Expr *E, Address ThisAddr,
578     llvm::Value *&ThisPtrForCall,
579     llvm::Value *MemFnPtr, const MemberPointerType *MPT) {
580   CGBuilderTy &Builder = CGF.Builder;
581 
582   const FunctionProtoType *FPT =
583     MPT->getPointeeType()->getAs<FunctionProtoType>();
584   auto *RD =
585       cast<CXXRecordDecl>(MPT->getClass()->castAs<RecordType>()->getDecl());
586 
587   llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(
588       CGM.getTypes().arrangeCXXMethodType(RD, FPT, /*FD=*/nullptr));
589 
590   llvm::Constant *ptrdiff_1 = llvm::ConstantInt::get(CGM.PtrDiffTy, 1);
591 
592   llvm::BasicBlock *FnVirtual = CGF.createBasicBlock("memptr.virtual");
593   llvm::BasicBlock *FnNonVirtual = CGF.createBasicBlock("memptr.nonvirtual");
594   llvm::BasicBlock *FnEnd = CGF.createBasicBlock("memptr.end");
595 
596   // Extract memptr.adj, which is in the second field.
597   llvm::Value *RawAdj = Builder.CreateExtractValue(MemFnPtr, 1, "memptr.adj");
598 
599   // Compute the true adjustment.
600   llvm::Value *Adj = RawAdj;
601   if (UseARMMethodPtrABI)
602     Adj = Builder.CreateAShr(Adj, ptrdiff_1, "memptr.adj.shifted");
603 
604   // Apply the adjustment and cast back to the original struct type
605   // for consistency.
606   llvm::Value *This = ThisAddr.getPointer();
607   llvm::Value *Ptr = Builder.CreateBitCast(This, Builder.getInt8PtrTy());
608   Ptr = Builder.CreateInBoundsGEP(Builder.getInt8Ty(), Ptr, Adj);
609   This = Builder.CreateBitCast(Ptr, This->getType(), "this.adjusted");
610   ThisPtrForCall = This;
611 
612   // Load the function pointer.
613   llvm::Value *FnAsInt = Builder.CreateExtractValue(MemFnPtr, 0, "memptr.ptr");
614 
615   // If the LSB in the function pointer is 1, the function pointer points to
616   // a virtual function.
617   llvm::Value *IsVirtual;
618   if (UseARMMethodPtrABI)
619     IsVirtual = Builder.CreateAnd(RawAdj, ptrdiff_1);
620   else
621     IsVirtual = Builder.CreateAnd(FnAsInt, ptrdiff_1);
622   IsVirtual = Builder.CreateIsNotNull(IsVirtual, "memptr.isvirtual");
623   Builder.CreateCondBr(IsVirtual, FnVirtual, FnNonVirtual);
624 
625   // In the virtual path, the adjustment left 'This' pointing to the
626   // vtable of the correct base subobject.  The "function pointer" is an
627   // offset within the vtable (+1 for the virtual flag on non-ARM).
628   CGF.EmitBlock(FnVirtual);
629 
630   // Cast the adjusted this to a pointer to vtable pointer and load.
631   llvm::Type *VTableTy = Builder.getInt8PtrTy();
632   CharUnits VTablePtrAlign =
633     CGF.CGM.getDynamicOffsetAlignment(ThisAddr.getAlignment(), RD,
634                                       CGF.getPointerAlign());
635   llvm::Value *VTable = CGF.GetVTablePtr(
636       Address(This, ThisAddr.getElementType(), VTablePtrAlign), VTableTy, RD);
637 
638   // Apply the offset.
639   // On ARM64, to reserve extra space in virtual member function pointers,
640   // we only pay attention to the low 32 bits of the offset.
641   llvm::Value *VTableOffset = FnAsInt;
642   if (!UseARMMethodPtrABI)
643     VTableOffset = Builder.CreateSub(VTableOffset, ptrdiff_1);
644   if (Use32BitVTableOffsetABI) {
645     VTableOffset = Builder.CreateTrunc(VTableOffset, CGF.Int32Ty);
646     VTableOffset = Builder.CreateZExt(VTableOffset, CGM.PtrDiffTy);
647   }
648 
649   // Check the address of the function pointer if CFI on member function
650   // pointers is enabled.
651   llvm::Constant *CheckSourceLocation;
652   llvm::Constant *CheckTypeDesc;
653   bool ShouldEmitCFICheck = CGF.SanOpts.has(SanitizerKind::CFIMFCall) &&
654                             CGM.HasHiddenLTOVisibility(RD);
655   bool ShouldEmitVFEInfo = CGM.getCodeGenOpts().VirtualFunctionElimination &&
656                            CGM.HasHiddenLTOVisibility(RD);
657   bool ShouldEmitWPDInfo =
658       CGM.getCodeGenOpts().WholeProgramVTables &&
659       // Don't insert type tests if we are forcing public visibility.
660       !CGM.AlwaysHasLTOVisibilityPublic(RD);
661   llvm::Value *VirtualFn = nullptr;
662 
663   {
664     CodeGenFunction::SanitizerScope SanScope(&CGF);
665     llvm::Value *TypeId = nullptr;
666     llvm::Value *CheckResult = nullptr;
667 
668     if (ShouldEmitCFICheck || ShouldEmitVFEInfo || ShouldEmitWPDInfo) {
669       // If doing CFI, VFE or WPD, we will need the metadata node to check
670       // against.
671       llvm::Metadata *MD =
672           CGM.CreateMetadataIdentifierForVirtualMemPtrType(QualType(MPT, 0));
673       TypeId = llvm::MetadataAsValue::get(CGF.getLLVMContext(), MD);
674     }
675 
676     if (ShouldEmitVFEInfo) {
677       llvm::Value *VFPAddr =
678           Builder.CreateGEP(CGF.Int8Ty, VTable, VTableOffset);
679 
680       // If doing VFE, load from the vtable with a type.checked.load intrinsic
681       // call. Note that we use the GEP to calculate the address to load from
682       // and pass 0 as the offset to the intrinsic. This is because every
683       // vtable slot of the correct type is marked with matching metadata, and
684       // we know that the load must be from one of these slots.
685       llvm::Value *CheckedLoad = Builder.CreateCall(
686           CGM.getIntrinsic(llvm::Intrinsic::type_checked_load),
687           {VFPAddr, llvm::ConstantInt::get(CGM.Int32Ty, 0), TypeId});
688       CheckResult = Builder.CreateExtractValue(CheckedLoad, 1);
689       VirtualFn = Builder.CreateExtractValue(CheckedLoad, 0);
690       VirtualFn = Builder.CreateBitCast(VirtualFn, FTy->getPointerTo(),
691                                         "memptr.virtualfn");
692     } else {
693       // When not doing VFE, emit a normal load, as it allows more
694       // optimisations than type.checked.load.
695       if (ShouldEmitCFICheck || ShouldEmitWPDInfo) {
696         llvm::Value *VFPAddr =
697             Builder.CreateGEP(CGF.Int8Ty, VTable, VTableOffset);
698         llvm::Intrinsic::ID IID = CGM.HasHiddenLTOVisibility(RD)
699                                       ? llvm::Intrinsic::type_test
700                                       : llvm::Intrinsic::public_type_test;
701 
702         CheckResult = Builder.CreateCall(
703             CGM.getIntrinsic(IID),
704             {Builder.CreateBitCast(VFPAddr, CGF.Int8PtrTy), TypeId});
705       }
706 
707       if (CGM.getItaniumVTableContext().isRelativeLayout()) {
708         VirtualFn = CGF.Builder.CreateCall(
709             CGM.getIntrinsic(llvm::Intrinsic::load_relative,
710                              {VTableOffset->getType()}),
711             {VTable, VTableOffset});
712         VirtualFn = CGF.Builder.CreateBitCast(VirtualFn, FTy->getPointerTo());
713       } else {
714         llvm::Value *VFPAddr =
715             CGF.Builder.CreateGEP(CGF.Int8Ty, VTable, VTableOffset);
716         VFPAddr = CGF.Builder.CreateBitCast(
717             VFPAddr, FTy->getPointerTo()->getPointerTo());
718         VirtualFn = CGF.Builder.CreateAlignedLoad(
719             FTy->getPointerTo(), VFPAddr, CGF.getPointerAlign(),
720             "memptr.virtualfn");
721       }
722     }
723     assert(VirtualFn && "Virtual fuction pointer not created!");
724     assert((!ShouldEmitCFICheck || !ShouldEmitVFEInfo || !ShouldEmitWPDInfo ||
725             CheckResult) &&
726            "Check result required but not created!");
727 
728     if (ShouldEmitCFICheck) {
729       // If doing CFI, emit the check.
730       CheckSourceLocation = CGF.EmitCheckSourceLocation(E->getBeginLoc());
731       CheckTypeDesc = CGF.EmitCheckTypeDescriptor(QualType(MPT, 0));
732       llvm::Constant *StaticData[] = {
733           llvm::ConstantInt::get(CGF.Int8Ty, CodeGenFunction::CFITCK_VMFCall),
734           CheckSourceLocation,
735           CheckTypeDesc,
736       };
737 
738       if (CGM.getCodeGenOpts().SanitizeTrap.has(SanitizerKind::CFIMFCall)) {
739         CGF.EmitTrapCheck(CheckResult, SanitizerHandler::CFICheckFail);
740       } else {
741         llvm::Value *AllVtables = llvm::MetadataAsValue::get(
742             CGM.getLLVMContext(),
743             llvm::MDString::get(CGM.getLLVMContext(), "all-vtables"));
744         llvm::Value *ValidVtable = Builder.CreateCall(
745             CGM.getIntrinsic(llvm::Intrinsic::type_test), {VTable, AllVtables});
746         CGF.EmitCheck(std::make_pair(CheckResult, SanitizerKind::CFIMFCall),
747                       SanitizerHandler::CFICheckFail, StaticData,
748                       {VTable, ValidVtable});
749       }
750 
751       FnVirtual = Builder.GetInsertBlock();
752     }
753   } // End of sanitizer scope
754 
755   CGF.EmitBranch(FnEnd);
756 
757   // In the non-virtual path, the function pointer is actually a
758   // function pointer.
759   CGF.EmitBlock(FnNonVirtual);
760   llvm::Value *NonVirtualFn =
761     Builder.CreateIntToPtr(FnAsInt, FTy->getPointerTo(), "memptr.nonvirtualfn");
762 
763   // Check the function pointer if CFI on member function pointers is enabled.
764   if (ShouldEmitCFICheck) {
765     CXXRecordDecl *RD = MPT->getClass()->getAsCXXRecordDecl();
766     if (RD->hasDefinition()) {
767       CodeGenFunction::SanitizerScope SanScope(&CGF);
768 
769       llvm::Constant *StaticData[] = {
770           llvm::ConstantInt::get(CGF.Int8Ty, CodeGenFunction::CFITCK_NVMFCall),
771           CheckSourceLocation,
772           CheckTypeDesc,
773       };
774 
775       llvm::Value *Bit = Builder.getFalse();
776       llvm::Value *CastedNonVirtualFn =
777           Builder.CreateBitCast(NonVirtualFn, CGF.Int8PtrTy);
778       for (const CXXRecordDecl *Base : CGM.getMostBaseClasses(RD)) {
779         llvm::Metadata *MD = CGM.CreateMetadataIdentifierForType(
780             getContext().getMemberPointerType(
781                 MPT->getPointeeType(),
782                 getContext().getRecordType(Base).getTypePtr()));
783         llvm::Value *TypeId =
784             llvm::MetadataAsValue::get(CGF.getLLVMContext(), MD);
785 
786         llvm::Value *TypeTest =
787             Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::type_test),
788                                {CastedNonVirtualFn, TypeId});
789         Bit = Builder.CreateOr(Bit, TypeTest);
790       }
791 
792       CGF.EmitCheck(std::make_pair(Bit, SanitizerKind::CFIMFCall),
793                     SanitizerHandler::CFICheckFail, StaticData,
794                     {CastedNonVirtualFn, llvm::UndefValue::get(CGF.IntPtrTy)});
795 
796       FnNonVirtual = Builder.GetInsertBlock();
797     }
798   }
799 
800   // We're done.
801   CGF.EmitBlock(FnEnd);
802   llvm::PHINode *CalleePtr = Builder.CreatePHI(FTy->getPointerTo(), 2);
803   CalleePtr->addIncoming(VirtualFn, FnVirtual);
804   CalleePtr->addIncoming(NonVirtualFn, FnNonVirtual);
805 
806   CGCallee Callee(FPT, CalleePtr);
807   return Callee;
808 }
809 
810 /// Compute an l-value by applying the given pointer-to-member to a
811 /// base object.
812 llvm::Value *ItaniumCXXABI::EmitMemberDataPointerAddress(
813     CodeGenFunction &CGF, const Expr *E, Address Base, llvm::Value *MemPtr,
814     const MemberPointerType *MPT) {
815   assert(MemPtr->getType() == CGM.PtrDiffTy);
816 
817   CGBuilderTy &Builder = CGF.Builder;
818 
819   // Cast to char*.
820   Base = Builder.CreateElementBitCast(Base, CGF.Int8Ty);
821 
822   // Apply the offset, which we assume is non-null.
823   llvm::Value *Addr = Builder.CreateInBoundsGEP(
824       Base.getElementType(), Base.getPointer(), MemPtr, "memptr.offset");
825 
826   // Cast the address to the appropriate pointer type, adopting the
827   // address space of the base pointer.
828   llvm::Type *PType = CGF.ConvertTypeForMem(MPT->getPointeeType())
829                             ->getPointerTo(Base.getAddressSpace());
830   return Builder.CreateBitCast(Addr, PType);
831 }
832 
833 /// Perform a bitcast, derived-to-base, or base-to-derived member pointer
834 /// conversion.
835 ///
836 /// Bitcast conversions are always a no-op under Itanium.
837 ///
838 /// Obligatory offset/adjustment diagram:
839 ///         <-- offset -->          <-- adjustment -->
840 ///   |--------------------------|----------------------|--------------------|
841 ///   ^Derived address point     ^Base address point    ^Member address point
842 ///
843 /// So when converting a base member pointer to a derived member pointer,
844 /// we add the offset to the adjustment because the address point has
845 /// decreased;  and conversely, when converting a derived MP to a base MP
846 /// we subtract the offset from the adjustment because the address point
847 /// has increased.
848 ///
849 /// The standard forbids (at compile time) conversion to and from
850 /// virtual bases, which is why we don't have to consider them here.
851 ///
852 /// The standard forbids (at run time) casting a derived MP to a base
853 /// MP when the derived MP does not point to a member of the base.
854 /// This is why -1 is a reasonable choice for null data member
855 /// pointers.
856 llvm::Value *
857 ItaniumCXXABI::EmitMemberPointerConversion(CodeGenFunction &CGF,
858                                            const CastExpr *E,
859                                            llvm::Value *src) {
860   assert(E->getCastKind() == CK_DerivedToBaseMemberPointer ||
861          E->getCastKind() == CK_BaseToDerivedMemberPointer ||
862          E->getCastKind() == CK_ReinterpretMemberPointer);
863 
864   // Under Itanium, reinterprets don't require any additional processing.
865   if (E->getCastKind() == CK_ReinterpretMemberPointer) return src;
866 
867   // Use constant emission if we can.
868   if (isa<llvm::Constant>(src))
869     return EmitMemberPointerConversion(E, cast<llvm::Constant>(src));
870 
871   llvm::Constant *adj = getMemberPointerAdjustment(E);
872   if (!adj) return src;
873 
874   CGBuilderTy &Builder = CGF.Builder;
875   bool isDerivedToBase = (E->getCastKind() == CK_DerivedToBaseMemberPointer);
876 
877   const MemberPointerType *destTy =
878     E->getType()->castAs<MemberPointerType>();
879 
880   // For member data pointers, this is just a matter of adding the
881   // offset if the source is non-null.
882   if (destTy->isMemberDataPointer()) {
883     llvm::Value *dst;
884     if (isDerivedToBase)
885       dst = Builder.CreateNSWSub(src, adj, "adj");
886     else
887       dst = Builder.CreateNSWAdd(src, adj, "adj");
888 
889     // Null check.
890     llvm::Value *null = llvm::Constant::getAllOnesValue(src->getType());
891     llvm::Value *isNull = Builder.CreateICmpEQ(src, null, "memptr.isnull");
892     return Builder.CreateSelect(isNull, src, dst);
893   }
894 
895   // The this-adjustment is left-shifted by 1 on ARM.
896   if (UseARMMethodPtrABI) {
897     uint64_t offset = cast<llvm::ConstantInt>(adj)->getZExtValue();
898     offset <<= 1;
899     adj = llvm::ConstantInt::get(adj->getType(), offset);
900   }
901 
902   llvm::Value *srcAdj = Builder.CreateExtractValue(src, 1, "src.adj");
903   llvm::Value *dstAdj;
904   if (isDerivedToBase)
905     dstAdj = Builder.CreateNSWSub(srcAdj, adj, "adj");
906   else
907     dstAdj = Builder.CreateNSWAdd(srcAdj, adj, "adj");
908 
909   return Builder.CreateInsertValue(src, dstAdj, 1);
910 }
911 
912 llvm::Constant *
913 ItaniumCXXABI::EmitMemberPointerConversion(const CastExpr *E,
914                                            llvm::Constant *src) {
915   assert(E->getCastKind() == CK_DerivedToBaseMemberPointer ||
916          E->getCastKind() == CK_BaseToDerivedMemberPointer ||
917          E->getCastKind() == CK_ReinterpretMemberPointer);
918 
919   // Under Itanium, reinterprets don't require any additional processing.
920   if (E->getCastKind() == CK_ReinterpretMemberPointer) return src;
921 
922   // If the adjustment is trivial, we don't need to do anything.
923   llvm::Constant *adj = getMemberPointerAdjustment(E);
924   if (!adj) return src;
925 
926   bool isDerivedToBase = (E->getCastKind() == CK_DerivedToBaseMemberPointer);
927 
928   const MemberPointerType *destTy =
929     E->getType()->castAs<MemberPointerType>();
930 
931   // For member data pointers, this is just a matter of adding the
932   // offset if the source is non-null.
933   if (destTy->isMemberDataPointer()) {
934     // null maps to null.
935     if (src->isAllOnesValue()) return src;
936 
937     if (isDerivedToBase)
938       return llvm::ConstantExpr::getNSWSub(src, adj);
939     else
940       return llvm::ConstantExpr::getNSWAdd(src, adj);
941   }
942 
943   // The this-adjustment is left-shifted by 1 on ARM.
944   if (UseARMMethodPtrABI) {
945     uint64_t offset = cast<llvm::ConstantInt>(adj)->getZExtValue();
946     offset <<= 1;
947     adj = llvm::ConstantInt::get(adj->getType(), offset);
948   }
949 
950   llvm::Constant *srcAdj = src->getAggregateElement(1);
951   llvm::Constant *dstAdj;
952   if (isDerivedToBase)
953     dstAdj = llvm::ConstantExpr::getNSWSub(srcAdj, adj);
954   else
955     dstAdj = llvm::ConstantExpr::getNSWAdd(srcAdj, adj);
956 
957   llvm::Constant *res = ConstantFoldInsertValueInstruction(src, dstAdj, 1);
958   assert(res != nullptr && "Folding must succeed");
959   return res;
960 }
961 
962 llvm::Constant *
963 ItaniumCXXABI::EmitNullMemberPointer(const MemberPointerType *MPT) {
964   // Itanium C++ ABI 2.3:
965   //   A NULL pointer is represented as -1.
966   if (MPT->isMemberDataPointer())
967     return llvm::ConstantInt::get(CGM.PtrDiffTy, -1ULL, /*isSigned=*/true);
968 
969   llvm::Constant *Zero = llvm::ConstantInt::get(CGM.PtrDiffTy, 0);
970   llvm::Constant *Values[2] = { Zero, Zero };
971   return llvm::ConstantStruct::getAnon(Values);
972 }
973 
974 llvm::Constant *
975 ItaniumCXXABI::EmitMemberDataPointer(const MemberPointerType *MPT,
976                                      CharUnits offset) {
977   // Itanium C++ ABI 2.3:
978   //   A pointer to data member is an offset from the base address of
979   //   the class object containing it, represented as a ptrdiff_t
980   return llvm::ConstantInt::get(CGM.PtrDiffTy, offset.getQuantity());
981 }
982 
983 llvm::Constant *
984 ItaniumCXXABI::EmitMemberFunctionPointer(const CXXMethodDecl *MD) {
985   return BuildMemberPointer(MD, CharUnits::Zero());
986 }
987 
988 llvm::Constant *ItaniumCXXABI::BuildMemberPointer(const CXXMethodDecl *MD,
989                                                   CharUnits ThisAdjustment) {
990   assert(MD->isInstance() && "Member function must not be static!");
991 
992   CodeGenTypes &Types = CGM.getTypes();
993 
994   // Get the function pointer (or index if this is a virtual function).
995   llvm::Constant *MemPtr[2];
996   if (MD->isVirtual()) {
997     uint64_t Index = CGM.getItaniumVTableContext().getMethodVTableIndex(MD);
998     uint64_t VTableOffset;
999     if (CGM.getItaniumVTableContext().isRelativeLayout()) {
1000       // Multiply by 4-byte relative offsets.
1001       VTableOffset = Index * 4;
1002     } else {
1003       const ASTContext &Context = getContext();
1004       CharUnits PointerWidth = Context.toCharUnitsFromBits(
1005           Context.getTargetInfo().getPointerWidth(LangAS::Default));
1006       VTableOffset = Index * PointerWidth.getQuantity();
1007     }
1008 
1009     if (UseARMMethodPtrABI) {
1010       // ARM C++ ABI 3.2.1:
1011       //   This ABI specifies that adj contains twice the this
1012       //   adjustment, plus 1 if the member function is virtual. The
1013       //   least significant bit of adj then makes exactly the same
1014       //   discrimination as the least significant bit of ptr does for
1015       //   Itanium.
1016       MemPtr[0] = llvm::ConstantInt::get(CGM.PtrDiffTy, VTableOffset);
1017       MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy,
1018                                          2 * ThisAdjustment.getQuantity() + 1);
1019     } else {
1020       // Itanium C++ ABI 2.3:
1021       //   For a virtual function, [the pointer field] is 1 plus the
1022       //   virtual table offset (in bytes) of the function,
1023       //   represented as a ptrdiff_t.
1024       MemPtr[0] = llvm::ConstantInt::get(CGM.PtrDiffTy, VTableOffset + 1);
1025       MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy,
1026                                          ThisAdjustment.getQuantity());
1027     }
1028   } else {
1029     const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
1030     llvm::Type *Ty;
1031     // Check whether the function has a computable LLVM signature.
1032     if (Types.isFuncTypeConvertible(FPT)) {
1033       // The function has a computable LLVM signature; use the correct type.
1034       Ty = Types.GetFunctionType(Types.arrangeCXXMethodDeclaration(MD));
1035     } else {
1036       // Use an arbitrary non-function type to tell GetAddrOfFunction that the
1037       // function type is incomplete.
1038       Ty = CGM.PtrDiffTy;
1039     }
1040     llvm::Constant *addr = CGM.GetAddrOfFunction(MD, Ty);
1041 
1042     MemPtr[0] = llvm::ConstantExpr::getPtrToInt(addr, CGM.PtrDiffTy);
1043     MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy,
1044                                        (UseARMMethodPtrABI ? 2 : 1) *
1045                                        ThisAdjustment.getQuantity());
1046   }
1047 
1048   return llvm::ConstantStruct::getAnon(MemPtr);
1049 }
1050 
1051 llvm::Constant *ItaniumCXXABI::EmitMemberPointer(const APValue &MP,
1052                                                  QualType MPType) {
1053   const MemberPointerType *MPT = MPType->castAs<MemberPointerType>();
1054   const ValueDecl *MPD = MP.getMemberPointerDecl();
1055   if (!MPD)
1056     return EmitNullMemberPointer(MPT);
1057 
1058   CharUnits ThisAdjustment = getContext().getMemberPointerPathAdjustment(MP);
1059 
1060   if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(MPD))
1061     return BuildMemberPointer(MD, ThisAdjustment);
1062 
1063   CharUnits FieldOffset =
1064     getContext().toCharUnitsFromBits(getContext().getFieldOffset(MPD));
1065   return EmitMemberDataPointer(MPT, ThisAdjustment + FieldOffset);
1066 }
1067 
1068 /// The comparison algorithm is pretty easy: the member pointers are
1069 /// the same if they're either bitwise identical *or* both null.
1070 ///
1071 /// ARM is different here only because null-ness is more complicated.
1072 llvm::Value *
1073 ItaniumCXXABI::EmitMemberPointerComparison(CodeGenFunction &CGF,
1074                                            llvm::Value *L,
1075                                            llvm::Value *R,
1076                                            const MemberPointerType *MPT,
1077                                            bool Inequality) {
1078   CGBuilderTy &Builder = CGF.Builder;
1079 
1080   llvm::ICmpInst::Predicate Eq;
1081   llvm::Instruction::BinaryOps And, Or;
1082   if (Inequality) {
1083     Eq = llvm::ICmpInst::ICMP_NE;
1084     And = llvm::Instruction::Or;
1085     Or = llvm::Instruction::And;
1086   } else {
1087     Eq = llvm::ICmpInst::ICMP_EQ;
1088     And = llvm::Instruction::And;
1089     Or = llvm::Instruction::Or;
1090   }
1091 
1092   // Member data pointers are easy because there's a unique null
1093   // value, so it just comes down to bitwise equality.
1094   if (MPT->isMemberDataPointer())
1095     return Builder.CreateICmp(Eq, L, R);
1096 
1097   // For member function pointers, the tautologies are more complex.
1098   // The Itanium tautology is:
1099   //   (L == R) <==> (L.ptr == R.ptr && (L.ptr == 0 || L.adj == R.adj))
1100   // The ARM tautology is:
1101   //   (L == R) <==> (L.ptr == R.ptr &&
1102   //                  (L.adj == R.adj ||
1103   //                   (L.ptr == 0 && ((L.adj|R.adj) & 1) == 0)))
1104   // The inequality tautologies have exactly the same structure, except
1105   // applying De Morgan's laws.
1106 
1107   llvm::Value *LPtr = Builder.CreateExtractValue(L, 0, "lhs.memptr.ptr");
1108   llvm::Value *RPtr = Builder.CreateExtractValue(R, 0, "rhs.memptr.ptr");
1109 
1110   // This condition tests whether L.ptr == R.ptr.  This must always be
1111   // true for equality to hold.
1112   llvm::Value *PtrEq = Builder.CreateICmp(Eq, LPtr, RPtr, "cmp.ptr");
1113 
1114   // This condition, together with the assumption that L.ptr == R.ptr,
1115   // tests whether the pointers are both null.  ARM imposes an extra
1116   // condition.
1117   llvm::Value *Zero = llvm::Constant::getNullValue(LPtr->getType());
1118   llvm::Value *EqZero = Builder.CreateICmp(Eq, LPtr, Zero, "cmp.ptr.null");
1119 
1120   // This condition tests whether L.adj == R.adj.  If this isn't
1121   // true, the pointers are unequal unless they're both null.
1122   llvm::Value *LAdj = Builder.CreateExtractValue(L, 1, "lhs.memptr.adj");
1123   llvm::Value *RAdj = Builder.CreateExtractValue(R, 1, "rhs.memptr.adj");
1124   llvm::Value *AdjEq = Builder.CreateICmp(Eq, LAdj, RAdj, "cmp.adj");
1125 
1126   // Null member function pointers on ARM clear the low bit of Adj,
1127   // so the zero condition has to check that neither low bit is set.
1128   if (UseARMMethodPtrABI) {
1129     llvm::Value *One = llvm::ConstantInt::get(LPtr->getType(), 1);
1130 
1131     // Compute (l.adj | r.adj) & 1 and test it against zero.
1132     llvm::Value *OrAdj = Builder.CreateOr(LAdj, RAdj, "or.adj");
1133     llvm::Value *OrAdjAnd1 = Builder.CreateAnd(OrAdj, One);
1134     llvm::Value *OrAdjAnd1EqZero = Builder.CreateICmp(Eq, OrAdjAnd1, Zero,
1135                                                       "cmp.or.adj");
1136     EqZero = Builder.CreateBinOp(And, EqZero, OrAdjAnd1EqZero);
1137   }
1138 
1139   // Tie together all our conditions.
1140   llvm::Value *Result = Builder.CreateBinOp(Or, EqZero, AdjEq);
1141   Result = Builder.CreateBinOp(And, PtrEq, Result,
1142                                Inequality ? "memptr.ne" : "memptr.eq");
1143   return Result;
1144 }
1145 
1146 llvm::Value *
1147 ItaniumCXXABI::EmitMemberPointerIsNotNull(CodeGenFunction &CGF,
1148                                           llvm::Value *MemPtr,
1149                                           const MemberPointerType *MPT) {
1150   CGBuilderTy &Builder = CGF.Builder;
1151 
1152   /// For member data pointers, this is just a check against -1.
1153   if (MPT->isMemberDataPointer()) {
1154     assert(MemPtr->getType() == CGM.PtrDiffTy);
1155     llvm::Value *NegativeOne =
1156       llvm::Constant::getAllOnesValue(MemPtr->getType());
1157     return Builder.CreateICmpNE(MemPtr, NegativeOne, "memptr.tobool");
1158   }
1159 
1160   // In Itanium, a member function pointer is not null if 'ptr' is not null.
1161   llvm::Value *Ptr = Builder.CreateExtractValue(MemPtr, 0, "memptr.ptr");
1162 
1163   llvm::Constant *Zero = llvm::ConstantInt::get(Ptr->getType(), 0);
1164   llvm::Value *Result = Builder.CreateICmpNE(Ptr, Zero, "memptr.tobool");
1165 
1166   // On ARM, a member function pointer is also non-null if the low bit of 'adj'
1167   // (the virtual bit) is set.
1168   if (UseARMMethodPtrABI) {
1169     llvm::Constant *One = llvm::ConstantInt::get(Ptr->getType(), 1);
1170     llvm::Value *Adj = Builder.CreateExtractValue(MemPtr, 1, "memptr.adj");
1171     llvm::Value *VirtualBit = Builder.CreateAnd(Adj, One, "memptr.virtualbit");
1172     llvm::Value *IsVirtual = Builder.CreateICmpNE(VirtualBit, Zero,
1173                                                   "memptr.isvirtual");
1174     Result = Builder.CreateOr(Result, IsVirtual);
1175   }
1176 
1177   return Result;
1178 }
1179 
1180 bool ItaniumCXXABI::classifyReturnType(CGFunctionInfo &FI) const {
1181   const CXXRecordDecl *RD = FI.getReturnType()->getAsCXXRecordDecl();
1182   if (!RD)
1183     return false;
1184 
1185   // If C++ prohibits us from making a copy, return by address.
1186   if (!RD->canPassInRegisters()) {
1187     auto Align = CGM.getContext().getTypeAlignInChars(FI.getReturnType());
1188     FI.getReturnInfo() = ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
1189     return true;
1190   }
1191   return false;
1192 }
1193 
1194 /// The Itanium ABI requires non-zero initialization only for data
1195 /// member pointers, for which '0' is a valid offset.
1196 bool ItaniumCXXABI::isZeroInitializable(const MemberPointerType *MPT) {
1197   return MPT->isMemberFunctionPointer();
1198 }
1199 
1200 /// The Itanium ABI always places an offset to the complete object
1201 /// at entry -2 in the vtable.
1202 void ItaniumCXXABI::emitVirtualObjectDelete(CodeGenFunction &CGF,
1203                                             const CXXDeleteExpr *DE,
1204                                             Address Ptr,
1205                                             QualType ElementType,
1206                                             const CXXDestructorDecl *Dtor) {
1207   bool UseGlobalDelete = DE->isGlobalDelete();
1208   if (UseGlobalDelete) {
1209     // Derive the complete-object pointer, which is what we need
1210     // to pass to the deallocation function.
1211 
1212     // Grab the vtable pointer as an intptr_t*.
1213     auto *ClassDecl =
1214         cast<CXXRecordDecl>(ElementType->castAs<RecordType>()->getDecl());
1215     llvm::Value *VTable =
1216         CGF.GetVTablePtr(Ptr, CGF.IntPtrTy->getPointerTo(), ClassDecl);
1217 
1218     // Track back to entry -2 and pull out the offset there.
1219     llvm::Value *OffsetPtr = CGF.Builder.CreateConstInBoundsGEP1_64(
1220         CGF.IntPtrTy, VTable, -2, "complete-offset.ptr");
1221     llvm::Value *Offset = CGF.Builder.CreateAlignedLoad(CGF.IntPtrTy, OffsetPtr,                                                        CGF.getPointerAlign());
1222 
1223     // Apply the offset.
1224     llvm::Value *CompletePtr =
1225       CGF.Builder.CreateBitCast(Ptr.getPointer(), CGF.Int8PtrTy);
1226     CompletePtr =
1227         CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, CompletePtr, Offset);
1228 
1229     // If we're supposed to call the global delete, make sure we do so
1230     // even if the destructor throws.
1231     CGF.pushCallObjectDeleteCleanup(DE->getOperatorDelete(), CompletePtr,
1232                                     ElementType);
1233   }
1234 
1235   // FIXME: Provide a source location here even though there's no
1236   // CXXMemberCallExpr for dtor call.
1237   CXXDtorType DtorType = UseGlobalDelete ? Dtor_Complete : Dtor_Deleting;
1238   EmitVirtualDestructorCall(CGF, Dtor, DtorType, Ptr, DE);
1239 
1240   if (UseGlobalDelete)
1241     CGF.PopCleanupBlock();
1242 }
1243 
1244 void ItaniumCXXABI::emitRethrow(CodeGenFunction &CGF, bool isNoReturn) {
1245   // void __cxa_rethrow();
1246 
1247   llvm::FunctionType *FTy =
1248     llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
1249 
1250   llvm::FunctionCallee Fn = CGM.CreateRuntimeFunction(FTy, "__cxa_rethrow");
1251 
1252   if (isNoReturn)
1253     CGF.EmitNoreturnRuntimeCallOrInvoke(Fn, std::nullopt);
1254   else
1255     CGF.EmitRuntimeCallOrInvoke(Fn);
1256 }
1257 
1258 static llvm::FunctionCallee getAllocateExceptionFn(CodeGenModule &CGM) {
1259   // void *__cxa_allocate_exception(size_t thrown_size);
1260 
1261   llvm::FunctionType *FTy =
1262     llvm::FunctionType::get(CGM.Int8PtrTy, CGM.SizeTy, /*isVarArg=*/false);
1263 
1264   return CGM.CreateRuntimeFunction(FTy, "__cxa_allocate_exception");
1265 }
1266 
1267 static llvm::FunctionCallee getThrowFn(CodeGenModule &CGM) {
1268   // void __cxa_throw(void *thrown_exception, std::type_info *tinfo,
1269   //                  void (*dest) (void *));
1270 
1271   llvm::Type *Args[3] = { CGM.Int8PtrTy, CGM.Int8PtrTy, CGM.Int8PtrTy };
1272   llvm::FunctionType *FTy =
1273     llvm::FunctionType::get(CGM.VoidTy, Args, /*isVarArg=*/false);
1274 
1275   return CGM.CreateRuntimeFunction(FTy, "__cxa_throw");
1276 }
1277 
1278 void ItaniumCXXABI::emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) {
1279   QualType ThrowType = E->getSubExpr()->getType();
1280   // Now allocate the exception object.
1281   llvm::Type *SizeTy = CGF.ConvertType(getContext().getSizeType());
1282   uint64_t TypeSize = getContext().getTypeSizeInChars(ThrowType).getQuantity();
1283 
1284   llvm::FunctionCallee AllocExceptionFn = getAllocateExceptionFn(CGM);
1285   llvm::CallInst *ExceptionPtr = CGF.EmitNounwindRuntimeCall(
1286       AllocExceptionFn, llvm::ConstantInt::get(SizeTy, TypeSize), "exception");
1287 
1288   CharUnits ExnAlign = CGF.getContext().getExnObjectAlignment();
1289   CGF.EmitAnyExprToExn(
1290       E->getSubExpr(), Address(ExceptionPtr, CGM.Int8Ty, ExnAlign));
1291 
1292   // Now throw the exception.
1293   llvm::Constant *TypeInfo = CGM.GetAddrOfRTTIDescriptor(ThrowType,
1294                                                          /*ForEH=*/true);
1295 
1296   // The address of the destructor.  If the exception type has a
1297   // trivial destructor (or isn't a record), we just pass null.
1298   llvm::Constant *Dtor = nullptr;
1299   if (const RecordType *RecordTy = ThrowType->getAs<RecordType>()) {
1300     CXXRecordDecl *Record = cast<CXXRecordDecl>(RecordTy->getDecl());
1301     if (!Record->hasTrivialDestructor()) {
1302       CXXDestructorDecl *DtorD = Record->getDestructor();
1303       Dtor = CGM.getAddrOfCXXStructor(GlobalDecl(DtorD, Dtor_Complete));
1304       Dtor = llvm::ConstantExpr::getBitCast(Dtor, CGM.Int8PtrTy);
1305     }
1306   }
1307   if (!Dtor) Dtor = llvm::Constant::getNullValue(CGM.Int8PtrTy);
1308 
1309   llvm::Value *args[] = { ExceptionPtr, TypeInfo, Dtor };
1310   CGF.EmitNoreturnRuntimeCallOrInvoke(getThrowFn(CGM), args);
1311 }
1312 
1313 static llvm::FunctionCallee getItaniumDynamicCastFn(CodeGenFunction &CGF) {
1314   // void *__dynamic_cast(const void *sub,
1315   //                      const abi::__class_type_info *src,
1316   //                      const abi::__class_type_info *dst,
1317   //                      std::ptrdiff_t src2dst_offset);
1318 
1319   llvm::Type *Int8PtrTy = CGF.Int8PtrTy;
1320   llvm::Type *PtrDiffTy =
1321     CGF.ConvertType(CGF.getContext().getPointerDiffType());
1322 
1323   llvm::Type *Args[4] = { Int8PtrTy, Int8PtrTy, Int8PtrTy, PtrDiffTy };
1324 
1325   llvm::FunctionType *FTy = llvm::FunctionType::get(Int8PtrTy, Args, false);
1326 
1327   // Mark the function as nounwind readonly.
1328   llvm::AttrBuilder FuncAttrs(CGF.getLLVMContext());
1329   FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1330   FuncAttrs.addMemoryAttr(llvm::MemoryEffects::readOnly());
1331   llvm::AttributeList Attrs = llvm::AttributeList::get(
1332       CGF.getLLVMContext(), llvm::AttributeList::FunctionIndex, FuncAttrs);
1333 
1334   return CGF.CGM.CreateRuntimeFunction(FTy, "__dynamic_cast", Attrs);
1335 }
1336 
1337 static llvm::FunctionCallee getBadCastFn(CodeGenFunction &CGF) {
1338   // void __cxa_bad_cast();
1339   llvm::FunctionType *FTy = llvm::FunctionType::get(CGF.VoidTy, false);
1340   return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_cast");
1341 }
1342 
1343 /// Compute the src2dst_offset hint as described in the
1344 /// Itanium C++ ABI [2.9.7]
1345 static CharUnits computeOffsetHint(ASTContext &Context,
1346                                    const CXXRecordDecl *Src,
1347                                    const CXXRecordDecl *Dst) {
1348   CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true,
1349                      /*DetectVirtual=*/false);
1350 
1351   // If Dst is not derived from Src we can skip the whole computation below and
1352   // return that Src is not a public base of Dst.  Record all inheritance paths.
1353   if (!Dst->isDerivedFrom(Src, Paths))
1354     return CharUnits::fromQuantity(-2ULL);
1355 
1356   unsigned NumPublicPaths = 0;
1357   CharUnits Offset;
1358 
1359   // Now walk all possible inheritance paths.
1360   for (const CXXBasePath &Path : Paths) {
1361     if (Path.Access != AS_public)  // Ignore non-public inheritance.
1362       continue;
1363 
1364     ++NumPublicPaths;
1365 
1366     for (const CXXBasePathElement &PathElement : Path) {
1367       // If the path contains a virtual base class we can't give any hint.
1368       // -1: no hint.
1369       if (PathElement.Base->isVirtual())
1370         return CharUnits::fromQuantity(-1ULL);
1371 
1372       if (NumPublicPaths > 1) // Won't use offsets, skip computation.
1373         continue;
1374 
1375       // Accumulate the base class offsets.
1376       const ASTRecordLayout &L = Context.getASTRecordLayout(PathElement.Class);
1377       Offset += L.getBaseClassOffset(
1378           PathElement.Base->getType()->getAsCXXRecordDecl());
1379     }
1380   }
1381 
1382   // -2: Src is not a public base of Dst.
1383   if (NumPublicPaths == 0)
1384     return CharUnits::fromQuantity(-2ULL);
1385 
1386   // -3: Src is a multiple public base type but never a virtual base type.
1387   if (NumPublicPaths > 1)
1388     return CharUnits::fromQuantity(-3ULL);
1389 
1390   // Otherwise, the Src type is a unique public nonvirtual base type of Dst.
1391   // Return the offset of Src from the origin of Dst.
1392   return Offset;
1393 }
1394 
1395 static llvm::FunctionCallee getBadTypeidFn(CodeGenFunction &CGF) {
1396   // void __cxa_bad_typeid();
1397   llvm::FunctionType *FTy = llvm::FunctionType::get(CGF.VoidTy, false);
1398 
1399   return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_typeid");
1400 }
1401 
1402 bool ItaniumCXXABI::shouldTypeidBeNullChecked(bool IsDeref,
1403                                               QualType SrcRecordTy) {
1404   return IsDeref;
1405 }
1406 
1407 void ItaniumCXXABI::EmitBadTypeidCall(CodeGenFunction &CGF) {
1408   llvm::FunctionCallee Fn = getBadTypeidFn(CGF);
1409   llvm::CallBase *Call = CGF.EmitRuntimeCallOrInvoke(Fn);
1410   Call->setDoesNotReturn();
1411   CGF.Builder.CreateUnreachable();
1412 }
1413 
1414 llvm::Value *ItaniumCXXABI::EmitTypeid(CodeGenFunction &CGF,
1415                                        QualType SrcRecordTy,
1416                                        Address ThisPtr,
1417                                        llvm::Type *StdTypeInfoPtrTy) {
1418   auto *ClassDecl =
1419       cast<CXXRecordDecl>(SrcRecordTy->castAs<RecordType>()->getDecl());
1420   llvm::Value *Value =
1421       CGF.GetVTablePtr(ThisPtr, StdTypeInfoPtrTy->getPointerTo(), ClassDecl);
1422 
1423   if (CGM.getItaniumVTableContext().isRelativeLayout()) {
1424     // Load the type info.
1425     Value = CGF.Builder.CreateBitCast(Value, CGM.Int8PtrTy);
1426     Value = CGF.Builder.CreateCall(
1427         CGM.getIntrinsic(llvm::Intrinsic::load_relative, {CGM.Int32Ty}),
1428         {Value, llvm::ConstantInt::get(CGM.Int32Ty, -4)});
1429 
1430     // Setup to dereference again since this is a proxy we accessed.
1431     Value = CGF.Builder.CreateBitCast(Value, StdTypeInfoPtrTy->getPointerTo());
1432   } else {
1433     // Load the type info.
1434     Value =
1435         CGF.Builder.CreateConstInBoundsGEP1_64(StdTypeInfoPtrTy, Value, -1ULL);
1436   }
1437   return CGF.Builder.CreateAlignedLoad(StdTypeInfoPtrTy, Value,
1438                                        CGF.getPointerAlign());
1439 }
1440 
1441 bool ItaniumCXXABI::shouldDynamicCastCallBeNullChecked(bool SrcIsPtr,
1442                                                        QualType SrcRecordTy) {
1443   return SrcIsPtr;
1444 }
1445 
1446 llvm::Value *ItaniumCXXABI::EmitDynamicCastCall(
1447     CodeGenFunction &CGF, Address ThisAddr, QualType SrcRecordTy,
1448     QualType DestTy, QualType DestRecordTy, llvm::BasicBlock *CastEnd) {
1449   llvm::Type *PtrDiffLTy =
1450       CGF.ConvertType(CGF.getContext().getPointerDiffType());
1451   llvm::Type *DestLTy = CGF.ConvertType(DestTy);
1452 
1453   llvm::Value *SrcRTTI =
1454       CGF.CGM.GetAddrOfRTTIDescriptor(SrcRecordTy.getUnqualifiedType());
1455   llvm::Value *DestRTTI =
1456       CGF.CGM.GetAddrOfRTTIDescriptor(DestRecordTy.getUnqualifiedType());
1457 
1458   // Compute the offset hint.
1459   const CXXRecordDecl *SrcDecl = SrcRecordTy->getAsCXXRecordDecl();
1460   const CXXRecordDecl *DestDecl = DestRecordTy->getAsCXXRecordDecl();
1461   llvm::Value *OffsetHint = llvm::ConstantInt::get(
1462       PtrDiffLTy,
1463       computeOffsetHint(CGF.getContext(), SrcDecl, DestDecl).getQuantity());
1464 
1465   // Emit the call to __dynamic_cast.
1466   llvm::Value *Value = ThisAddr.getPointer();
1467   Value = CGF.EmitCastToVoidPtr(Value);
1468 
1469   llvm::Value *args[] = {Value, SrcRTTI, DestRTTI, OffsetHint};
1470   Value = CGF.EmitNounwindRuntimeCall(getItaniumDynamicCastFn(CGF), args);
1471   Value = CGF.Builder.CreateBitCast(Value, DestLTy);
1472 
1473   /// C++ [expr.dynamic.cast]p9:
1474   ///   A failed cast to reference type throws std::bad_cast
1475   if (DestTy->isReferenceType()) {
1476     llvm::BasicBlock *BadCastBlock =
1477         CGF.createBasicBlock("dynamic_cast.bad_cast");
1478 
1479     llvm::Value *IsNull = CGF.Builder.CreateIsNull(Value);
1480     CGF.Builder.CreateCondBr(IsNull, BadCastBlock, CastEnd);
1481 
1482     CGF.EmitBlock(BadCastBlock);
1483     EmitBadCastCall(CGF);
1484   }
1485 
1486   return Value;
1487 }
1488 
1489 llvm::Value *ItaniumCXXABI::EmitDynamicCastToVoid(CodeGenFunction &CGF,
1490                                                   Address ThisAddr,
1491                                                   QualType SrcRecordTy,
1492                                                   QualType DestTy) {
1493   llvm::Type *DestLTy = CGF.ConvertType(DestTy);
1494   auto *ClassDecl =
1495       cast<CXXRecordDecl>(SrcRecordTy->castAs<RecordType>()->getDecl());
1496   llvm::Value *OffsetToTop;
1497   if (CGM.getItaniumVTableContext().isRelativeLayout()) {
1498     // Get the vtable pointer.
1499     llvm::Value *VTable =
1500         CGF.GetVTablePtr(ThisAddr, CGM.Int32Ty->getPointerTo(), ClassDecl);
1501 
1502     // Get the offset-to-top from the vtable.
1503     OffsetToTop =
1504         CGF.Builder.CreateConstInBoundsGEP1_32(CGM.Int32Ty, VTable, -2U);
1505     OffsetToTop = CGF.Builder.CreateAlignedLoad(
1506         CGM.Int32Ty, OffsetToTop, CharUnits::fromQuantity(4), "offset.to.top");
1507   } else {
1508     llvm::Type *PtrDiffLTy =
1509         CGF.ConvertType(CGF.getContext().getPointerDiffType());
1510 
1511     // Get the vtable pointer.
1512     llvm::Value *VTable =
1513         CGF.GetVTablePtr(ThisAddr, PtrDiffLTy->getPointerTo(), ClassDecl);
1514 
1515     // Get the offset-to-top from the vtable.
1516     OffsetToTop =
1517         CGF.Builder.CreateConstInBoundsGEP1_64(PtrDiffLTy, VTable, -2ULL);
1518     OffsetToTop = CGF.Builder.CreateAlignedLoad(
1519         PtrDiffLTy, OffsetToTop, CGF.getPointerAlign(), "offset.to.top");
1520   }
1521   // Finally, add the offset to the pointer.
1522   llvm::Value *Value = ThisAddr.getPointer();
1523   Value = CGF.EmitCastToVoidPtr(Value);
1524   Value = CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, Value, OffsetToTop);
1525   return CGF.Builder.CreateBitCast(Value, DestLTy);
1526 }
1527 
1528 bool ItaniumCXXABI::EmitBadCastCall(CodeGenFunction &CGF) {
1529   llvm::FunctionCallee Fn = getBadCastFn(CGF);
1530   llvm::CallBase *Call = CGF.EmitRuntimeCallOrInvoke(Fn);
1531   Call->setDoesNotReturn();
1532   CGF.Builder.CreateUnreachable();
1533   return true;
1534 }
1535 
1536 llvm::Value *
1537 ItaniumCXXABI::GetVirtualBaseClassOffset(CodeGenFunction &CGF,
1538                                          Address This,
1539                                          const CXXRecordDecl *ClassDecl,
1540                                          const CXXRecordDecl *BaseClassDecl) {
1541   llvm::Value *VTablePtr = CGF.GetVTablePtr(This, CGM.Int8PtrTy, ClassDecl);
1542   CharUnits VBaseOffsetOffset =
1543       CGM.getItaniumVTableContext().getVirtualBaseOffsetOffset(ClassDecl,
1544                                                                BaseClassDecl);
1545   llvm::Value *VBaseOffsetPtr =
1546     CGF.Builder.CreateConstGEP1_64(
1547         CGF.Int8Ty, VTablePtr, VBaseOffsetOffset.getQuantity(),
1548         "vbase.offset.ptr");
1549 
1550   llvm::Value *VBaseOffset;
1551   if (CGM.getItaniumVTableContext().isRelativeLayout()) {
1552     VBaseOffsetPtr =
1553         CGF.Builder.CreateBitCast(VBaseOffsetPtr, CGF.Int32Ty->getPointerTo());
1554     VBaseOffset = CGF.Builder.CreateAlignedLoad(
1555         CGF.Int32Ty, VBaseOffsetPtr, CharUnits::fromQuantity(4),
1556         "vbase.offset");
1557   } else {
1558     VBaseOffsetPtr = CGF.Builder.CreateBitCast(VBaseOffsetPtr,
1559                                                CGM.PtrDiffTy->getPointerTo());
1560     VBaseOffset = CGF.Builder.CreateAlignedLoad(
1561         CGM.PtrDiffTy, VBaseOffsetPtr, CGF.getPointerAlign(), "vbase.offset");
1562   }
1563   return VBaseOffset;
1564 }
1565 
1566 void ItaniumCXXABI::EmitCXXConstructors(const CXXConstructorDecl *D) {
1567   // Just make sure we're in sync with TargetCXXABI.
1568   assert(CGM.getTarget().getCXXABI().hasConstructorVariants());
1569 
1570   // The constructor used for constructing this as a base class;
1571   // ignores virtual bases.
1572   CGM.EmitGlobal(GlobalDecl(D, Ctor_Base));
1573 
1574   // The constructor used for constructing this as a complete class;
1575   // constructs the virtual bases, then calls the base constructor.
1576   if (!D->getParent()->isAbstract()) {
1577     // We don't need to emit the complete ctor if the class is abstract.
1578     CGM.EmitGlobal(GlobalDecl(D, Ctor_Complete));
1579   }
1580 }
1581 
1582 CGCXXABI::AddedStructorArgCounts
1583 ItaniumCXXABI::buildStructorSignature(GlobalDecl GD,
1584                                       SmallVectorImpl<CanQualType> &ArgTys) {
1585   ASTContext &Context = getContext();
1586 
1587   // All parameters are already in place except VTT, which goes after 'this'.
1588   // These are Clang types, so we don't need to worry about sret yet.
1589 
1590   // Check if we need to add a VTT parameter (which has type void **).
1591   if ((isa<CXXConstructorDecl>(GD.getDecl()) ? GD.getCtorType() == Ctor_Base
1592                                              : GD.getDtorType() == Dtor_Base) &&
1593       cast<CXXMethodDecl>(GD.getDecl())->getParent()->getNumVBases() != 0) {
1594     ArgTys.insert(ArgTys.begin() + 1,
1595                   Context.getPointerType(Context.VoidPtrTy));
1596     return AddedStructorArgCounts::prefix(1);
1597   }
1598   return AddedStructorArgCounts{};
1599 }
1600 
1601 void ItaniumCXXABI::EmitCXXDestructors(const CXXDestructorDecl *D) {
1602   // The destructor used for destructing this as a base class; ignores
1603   // virtual bases.
1604   CGM.EmitGlobal(GlobalDecl(D, Dtor_Base));
1605 
1606   // The destructor used for destructing this as a most-derived class;
1607   // call the base destructor and then destructs any virtual bases.
1608   CGM.EmitGlobal(GlobalDecl(D, Dtor_Complete));
1609 
1610   // The destructor in a virtual table is always a 'deleting'
1611   // destructor, which calls the complete destructor and then uses the
1612   // appropriate operator delete.
1613   if (D->isVirtual())
1614     CGM.EmitGlobal(GlobalDecl(D, Dtor_Deleting));
1615 }
1616 
1617 void ItaniumCXXABI::addImplicitStructorParams(CodeGenFunction &CGF,
1618                                               QualType &ResTy,
1619                                               FunctionArgList &Params) {
1620   const CXXMethodDecl *MD = cast<CXXMethodDecl>(CGF.CurGD.getDecl());
1621   assert(isa<CXXConstructorDecl>(MD) || isa<CXXDestructorDecl>(MD));
1622 
1623   // Check if we need a VTT parameter as well.
1624   if (NeedsVTTParameter(CGF.CurGD)) {
1625     ASTContext &Context = getContext();
1626 
1627     // FIXME: avoid the fake decl
1628     QualType T = Context.getPointerType(Context.VoidPtrTy);
1629     auto *VTTDecl = ImplicitParamDecl::Create(
1630         Context, /*DC=*/nullptr, MD->getLocation(), &Context.Idents.get("vtt"),
1631         T, ImplicitParamDecl::CXXVTT);
1632     Params.insert(Params.begin() + 1, VTTDecl);
1633     getStructorImplicitParamDecl(CGF) = VTTDecl;
1634   }
1635 }
1636 
1637 void ItaniumCXXABI::EmitInstanceFunctionProlog(CodeGenFunction &CGF) {
1638   // Naked functions have no prolog.
1639   if (CGF.CurFuncDecl && CGF.CurFuncDecl->hasAttr<NakedAttr>())
1640     return;
1641 
1642   /// Initialize the 'this' slot. In the Itanium C++ ABI, no prologue
1643   /// adjustments are required, because they are all handled by thunks.
1644   setCXXABIThisValue(CGF, loadIncomingCXXThis(CGF));
1645 
1646   /// Initialize the 'vtt' slot if needed.
1647   if (getStructorImplicitParamDecl(CGF)) {
1648     getStructorImplicitParamValue(CGF) = CGF.Builder.CreateLoad(
1649         CGF.GetAddrOfLocalVar(getStructorImplicitParamDecl(CGF)), "vtt");
1650   }
1651 
1652   /// If this is a function that the ABI specifies returns 'this', initialize
1653   /// the return slot to 'this' at the start of the function.
1654   ///
1655   /// Unlike the setting of return types, this is done within the ABI
1656   /// implementation instead of by clients of CGCXXABI because:
1657   /// 1) getThisValue is currently protected
1658   /// 2) in theory, an ABI could implement 'this' returns some other way;
1659   ///    HasThisReturn only specifies a contract, not the implementation
1660   if (HasThisReturn(CGF.CurGD))
1661     CGF.Builder.CreateStore(getThisValue(CGF), CGF.ReturnValue);
1662 }
1663 
1664 CGCXXABI::AddedStructorArgs ItaniumCXXABI::getImplicitConstructorArgs(
1665     CodeGenFunction &CGF, const CXXConstructorDecl *D, CXXCtorType Type,
1666     bool ForVirtualBase, bool Delegating) {
1667   if (!NeedsVTTParameter(GlobalDecl(D, Type)))
1668     return AddedStructorArgs{};
1669 
1670   // Insert the implicit 'vtt' argument as the second argument.
1671   llvm::Value *VTT =
1672       CGF.GetVTTParameter(GlobalDecl(D, Type), ForVirtualBase, Delegating);
1673   QualType VTTTy = getContext().getPointerType(getContext().VoidPtrTy);
1674   return AddedStructorArgs::prefix({{VTT, VTTTy}});
1675 }
1676 
1677 llvm::Value *ItaniumCXXABI::getCXXDestructorImplicitParam(
1678     CodeGenFunction &CGF, const CXXDestructorDecl *DD, CXXDtorType Type,
1679     bool ForVirtualBase, bool Delegating) {
1680   GlobalDecl GD(DD, Type);
1681   return CGF.GetVTTParameter(GD, ForVirtualBase, Delegating);
1682 }
1683 
1684 void ItaniumCXXABI::EmitDestructorCall(CodeGenFunction &CGF,
1685                                        const CXXDestructorDecl *DD,
1686                                        CXXDtorType Type, bool ForVirtualBase,
1687                                        bool Delegating, Address This,
1688                                        QualType ThisTy) {
1689   GlobalDecl GD(DD, Type);
1690   llvm::Value *VTT =
1691       getCXXDestructorImplicitParam(CGF, DD, Type, ForVirtualBase, Delegating);
1692   QualType VTTTy = getContext().getPointerType(getContext().VoidPtrTy);
1693 
1694   CGCallee Callee;
1695   if (getContext().getLangOpts().AppleKext &&
1696       Type != Dtor_Base && DD->isVirtual())
1697     Callee = CGF.BuildAppleKextVirtualDestructorCall(DD, Type, DD->getParent());
1698   else
1699     Callee = CGCallee::forDirect(CGM.getAddrOfCXXStructor(GD), GD);
1700 
1701   CGF.EmitCXXDestructorCall(GD, Callee, This.getPointer(), ThisTy, VTT, VTTTy,
1702                             nullptr);
1703 }
1704 
1705 void ItaniumCXXABI::emitVTableDefinitions(CodeGenVTables &CGVT,
1706                                           const CXXRecordDecl *RD) {
1707   llvm::GlobalVariable *VTable = getAddrOfVTable(RD, CharUnits());
1708   if (VTable->hasInitializer())
1709     return;
1710 
1711   ItaniumVTableContext &VTContext = CGM.getItaniumVTableContext();
1712   const VTableLayout &VTLayout = VTContext.getVTableLayout(RD);
1713   llvm::GlobalVariable::LinkageTypes Linkage = CGM.getVTableLinkage(RD);
1714   llvm::Constant *RTTI =
1715       CGM.GetAddrOfRTTIDescriptor(CGM.getContext().getTagDeclType(RD));
1716 
1717   // Create and set the initializer.
1718   ConstantInitBuilder builder(CGM);
1719   auto components = builder.beginStruct();
1720   CGVT.createVTableInitializer(components, VTLayout, RTTI,
1721                                llvm::GlobalValue::isLocalLinkage(Linkage));
1722   components.finishAndSetAsInitializer(VTable);
1723 
1724   // Set the correct linkage.
1725   VTable->setLinkage(Linkage);
1726 
1727   if (CGM.supportsCOMDAT() && VTable->isWeakForLinker())
1728     VTable->setComdat(CGM.getModule().getOrInsertComdat(VTable->getName()));
1729 
1730   // Set the right visibility.
1731   CGM.setGVProperties(VTable, RD);
1732 
1733   // If this is the magic class __cxxabiv1::__fundamental_type_info,
1734   // we will emit the typeinfo for the fundamental types. This is the
1735   // same behaviour as GCC.
1736   const DeclContext *DC = RD->getDeclContext();
1737   if (RD->getIdentifier() &&
1738       RD->getIdentifier()->isStr("__fundamental_type_info") &&
1739       isa<NamespaceDecl>(DC) && cast<NamespaceDecl>(DC)->getIdentifier() &&
1740       cast<NamespaceDecl>(DC)->getIdentifier()->isStr("__cxxabiv1") &&
1741       DC->getParent()->isTranslationUnit())
1742     EmitFundamentalRTTIDescriptors(RD);
1743 
1744   // Always emit type metadata on non-available_externally definitions, and on
1745   // available_externally definitions if we are performing whole program
1746   // devirtualization. For WPD we need the type metadata on all vtable
1747   // definitions to ensure we associate derived classes with base classes
1748   // defined in headers but with a strong definition only in a shared library.
1749   if (!VTable->isDeclarationForLinker() ||
1750       CGM.getCodeGenOpts().WholeProgramVTables) {
1751     CGM.EmitVTableTypeMetadata(RD, VTable, VTLayout);
1752     // For available_externally definitions, add the vtable to
1753     // @llvm.compiler.used so that it isn't deleted before whole program
1754     // analysis.
1755     if (VTable->isDeclarationForLinker()) {
1756       assert(CGM.getCodeGenOpts().WholeProgramVTables);
1757       CGM.addCompilerUsedGlobal(VTable);
1758     }
1759   }
1760 
1761   if (VTContext.isRelativeLayout()) {
1762     CGVT.RemoveHwasanMetadata(VTable);
1763     if (!VTable->isDSOLocal())
1764       CGVT.GenerateRelativeVTableAlias(VTable, VTable->getName());
1765   }
1766 }
1767 
1768 bool ItaniumCXXABI::isVirtualOffsetNeededForVTableField(
1769     CodeGenFunction &CGF, CodeGenFunction::VPtr Vptr) {
1770   if (Vptr.NearestVBase == nullptr)
1771     return false;
1772   return NeedsVTTParameter(CGF.CurGD);
1773 }
1774 
1775 llvm::Value *ItaniumCXXABI::getVTableAddressPointInStructor(
1776     CodeGenFunction &CGF, const CXXRecordDecl *VTableClass, BaseSubobject Base,
1777     const CXXRecordDecl *NearestVBase) {
1778 
1779   if ((Base.getBase()->getNumVBases() || NearestVBase != nullptr) &&
1780       NeedsVTTParameter(CGF.CurGD)) {
1781     return getVTableAddressPointInStructorWithVTT(CGF, VTableClass, Base,
1782                                                   NearestVBase);
1783   }
1784   return getVTableAddressPoint(Base, VTableClass);
1785 }
1786 
1787 llvm::Constant *
1788 ItaniumCXXABI::getVTableAddressPoint(BaseSubobject Base,
1789                                      const CXXRecordDecl *VTableClass) {
1790   llvm::GlobalValue *VTable = getAddrOfVTable(VTableClass, CharUnits());
1791 
1792   // Find the appropriate vtable within the vtable group, and the address point
1793   // within that vtable.
1794   VTableLayout::AddressPointLocation AddressPoint =
1795       CGM.getItaniumVTableContext()
1796           .getVTableLayout(VTableClass)
1797           .getAddressPoint(Base);
1798   llvm::Value *Indices[] = {
1799     llvm::ConstantInt::get(CGM.Int32Ty, 0),
1800     llvm::ConstantInt::get(CGM.Int32Ty, AddressPoint.VTableIndex),
1801     llvm::ConstantInt::get(CGM.Int32Ty, AddressPoint.AddressPointIndex),
1802   };
1803 
1804   return llvm::ConstantExpr::getGetElementPtr(VTable->getValueType(), VTable,
1805                                               Indices, /*InBounds=*/true,
1806                                               /*InRangeIndex=*/1);
1807 }
1808 
1809 // Check whether all the non-inline virtual methods for the class have the
1810 // specified attribute.
1811 template <typename T>
1812 static bool CXXRecordAllNonInlineVirtualsHaveAttr(const CXXRecordDecl *RD) {
1813   bool FoundNonInlineVirtualMethodWithAttr = false;
1814   for (const auto *D : RD->noload_decls()) {
1815     if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
1816       if (!FD->isVirtualAsWritten() || FD->isInlineSpecified() ||
1817           FD->doesThisDeclarationHaveABody())
1818         continue;
1819       if (!D->hasAttr<T>())
1820         return false;
1821       FoundNonInlineVirtualMethodWithAttr = true;
1822     }
1823   }
1824 
1825   // We didn't find any non-inline virtual methods missing the attribute.  We
1826   // will return true when we found at least one non-inline virtual with the
1827   // attribute.  (This lets our caller know that the attribute needs to be
1828   // propagated up to the vtable.)
1829   return FoundNonInlineVirtualMethodWithAttr;
1830 }
1831 
1832 llvm::Value *ItaniumCXXABI::getVTableAddressPointInStructorWithVTT(
1833     CodeGenFunction &CGF, const CXXRecordDecl *VTableClass, BaseSubobject Base,
1834     const CXXRecordDecl *NearestVBase) {
1835   assert((Base.getBase()->getNumVBases() || NearestVBase != nullptr) &&
1836          NeedsVTTParameter(CGF.CurGD) && "This class doesn't have VTT");
1837 
1838   // Get the secondary vpointer index.
1839   uint64_t VirtualPointerIndex =
1840       CGM.getVTables().getSecondaryVirtualPointerIndex(VTableClass, Base);
1841 
1842   /// Load the VTT.
1843   llvm::Value *VTT = CGF.LoadCXXVTT();
1844   if (VirtualPointerIndex)
1845     VTT = CGF.Builder.CreateConstInBoundsGEP1_64(
1846         CGF.VoidPtrTy, VTT, VirtualPointerIndex);
1847 
1848   // And load the address point from the VTT.
1849   return CGF.Builder.CreateAlignedLoad(CGF.VoidPtrTy, VTT,
1850                                        CGF.getPointerAlign());
1851 }
1852 
1853 llvm::Constant *ItaniumCXXABI::getVTableAddressPointForConstExpr(
1854     BaseSubobject Base, const CXXRecordDecl *VTableClass) {
1855   return getVTableAddressPoint(Base, VTableClass);
1856 }
1857 
1858 llvm::GlobalVariable *ItaniumCXXABI::getAddrOfVTable(const CXXRecordDecl *RD,
1859                                                      CharUnits VPtrOffset) {
1860   assert(VPtrOffset.isZero() && "Itanium ABI only supports zero vptr offsets");
1861 
1862   llvm::GlobalVariable *&VTable = VTables[RD];
1863   if (VTable)
1864     return VTable;
1865 
1866   // Queue up this vtable for possible deferred emission.
1867   CGM.addDeferredVTable(RD);
1868 
1869   SmallString<256> Name;
1870   llvm::raw_svector_ostream Out(Name);
1871   getMangleContext().mangleCXXVTable(RD, Out);
1872 
1873   const VTableLayout &VTLayout =
1874       CGM.getItaniumVTableContext().getVTableLayout(RD);
1875   llvm::Type *VTableType = CGM.getVTables().getVTableType(VTLayout);
1876 
1877   // Use pointer alignment for the vtable. Otherwise we would align them based
1878   // on the size of the initializer which doesn't make sense as only single
1879   // values are read.
1880   unsigned PAlign = CGM.getItaniumVTableContext().isRelativeLayout()
1881                         ? 32
1882                         : CGM.getTarget().getPointerAlign(LangAS::Default);
1883 
1884   VTable = CGM.CreateOrReplaceCXXRuntimeVariable(
1885       Name, VTableType, llvm::GlobalValue::ExternalLinkage,
1886       getContext().toCharUnitsFromBits(PAlign).getAsAlign());
1887   VTable->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
1888 
1889   // In MS C++ if you have a class with virtual functions in which you are using
1890   // selective member import/export, then all virtual functions must be exported
1891   // unless they are inline, otherwise a link error will result. To match this
1892   // behavior, for such classes, we dllimport the vtable if it is defined
1893   // externally and all the non-inline virtual methods are marked dllimport, and
1894   // we dllexport the vtable if it is defined in this TU and all the non-inline
1895   // virtual methods are marked dllexport.
1896   if (CGM.getTarget().hasPS4DLLImportExport()) {
1897     if ((!RD->hasAttr<DLLImportAttr>()) && (!RD->hasAttr<DLLExportAttr>())) {
1898       if (CGM.getVTables().isVTableExternal(RD)) {
1899         if (CXXRecordAllNonInlineVirtualsHaveAttr<DLLImportAttr>(RD))
1900           VTable->setDLLStorageClass(llvm::GlobalValue::DLLImportStorageClass);
1901       } else {
1902         if (CXXRecordAllNonInlineVirtualsHaveAttr<DLLExportAttr>(RD))
1903           VTable->setDLLStorageClass(llvm::GlobalValue::DLLExportStorageClass);
1904       }
1905     }
1906   }
1907   CGM.setGVProperties(VTable, RD);
1908 
1909   return VTable;
1910 }
1911 
1912 CGCallee ItaniumCXXABI::getVirtualFunctionPointer(CodeGenFunction &CGF,
1913                                                   GlobalDecl GD,
1914                                                   Address This,
1915                                                   llvm::Type *Ty,
1916                                                   SourceLocation Loc) {
1917   llvm::Type *TyPtr = Ty->getPointerTo();
1918   auto *MethodDecl = cast<CXXMethodDecl>(GD.getDecl());
1919   llvm::Value *VTable = CGF.GetVTablePtr(
1920       This, TyPtr->getPointerTo(), MethodDecl->getParent());
1921 
1922   uint64_t VTableIndex = CGM.getItaniumVTableContext().getMethodVTableIndex(GD);
1923   llvm::Value *VFunc;
1924   if (CGF.ShouldEmitVTableTypeCheckedLoad(MethodDecl->getParent())) {
1925     VFunc = CGF.EmitVTableTypeCheckedLoad(
1926         MethodDecl->getParent(), VTable, TyPtr,
1927         VTableIndex *
1928             CGM.getContext().getTargetInfo().getPointerWidth(LangAS::Default) /
1929             8);
1930   } else {
1931     CGF.EmitTypeMetadataCodeForVCall(MethodDecl->getParent(), VTable, Loc);
1932 
1933     llvm::Value *VFuncLoad;
1934     if (CGM.getItaniumVTableContext().isRelativeLayout()) {
1935       VTable = CGF.Builder.CreateBitCast(VTable, CGM.Int8PtrTy);
1936       llvm::Value *Load = CGF.Builder.CreateCall(
1937           CGM.getIntrinsic(llvm::Intrinsic::load_relative, {CGM.Int32Ty}),
1938           {VTable, llvm::ConstantInt::get(CGM.Int32Ty, 4 * VTableIndex)});
1939       VFuncLoad = CGF.Builder.CreateBitCast(Load, TyPtr);
1940     } else {
1941       VTable =
1942           CGF.Builder.CreateBitCast(VTable, TyPtr->getPointerTo());
1943       llvm::Value *VTableSlotPtr = CGF.Builder.CreateConstInBoundsGEP1_64(
1944           TyPtr, VTable, VTableIndex, "vfn");
1945       VFuncLoad =
1946           CGF.Builder.CreateAlignedLoad(TyPtr, VTableSlotPtr,
1947                                         CGF.getPointerAlign());
1948     }
1949 
1950     // Add !invariant.load md to virtual function load to indicate that
1951     // function didn't change inside vtable.
1952     // It's safe to add it without -fstrict-vtable-pointers, but it would not
1953     // help in devirtualization because it will only matter if we will have 2
1954     // the same virtual function loads from the same vtable load, which won't
1955     // happen without enabled devirtualization with -fstrict-vtable-pointers.
1956     if (CGM.getCodeGenOpts().OptimizationLevel > 0 &&
1957         CGM.getCodeGenOpts().StrictVTablePointers) {
1958       if (auto *VFuncLoadInstr = dyn_cast<llvm::Instruction>(VFuncLoad)) {
1959         VFuncLoadInstr->setMetadata(
1960             llvm::LLVMContext::MD_invariant_load,
1961             llvm::MDNode::get(CGM.getLLVMContext(),
1962                               llvm::ArrayRef<llvm::Metadata *>()));
1963       }
1964     }
1965     VFunc = VFuncLoad;
1966   }
1967 
1968   CGCallee Callee(GD, VFunc);
1969   return Callee;
1970 }
1971 
1972 llvm::Value *ItaniumCXXABI::EmitVirtualDestructorCall(
1973     CodeGenFunction &CGF, const CXXDestructorDecl *Dtor, CXXDtorType DtorType,
1974     Address This, DeleteOrMemberCallExpr E) {
1975   auto *CE = E.dyn_cast<const CXXMemberCallExpr *>();
1976   auto *D = E.dyn_cast<const CXXDeleteExpr *>();
1977   assert((CE != nullptr) ^ (D != nullptr));
1978   assert(CE == nullptr || CE->arg_begin() == CE->arg_end());
1979   assert(DtorType == Dtor_Deleting || DtorType == Dtor_Complete);
1980 
1981   GlobalDecl GD(Dtor, DtorType);
1982   const CGFunctionInfo *FInfo =
1983       &CGM.getTypes().arrangeCXXStructorDeclaration(GD);
1984   llvm::FunctionType *Ty = CGF.CGM.getTypes().GetFunctionType(*FInfo);
1985   CGCallee Callee = CGCallee::forVirtual(CE, GD, This, Ty);
1986 
1987   QualType ThisTy;
1988   if (CE) {
1989     ThisTy = CE->getObjectType();
1990   } else {
1991     ThisTy = D->getDestroyedType();
1992   }
1993 
1994   CGF.EmitCXXDestructorCall(GD, Callee, This.getPointer(), ThisTy, nullptr,
1995                             QualType(), nullptr);
1996   return nullptr;
1997 }
1998 
1999 void ItaniumCXXABI::emitVirtualInheritanceTables(const CXXRecordDecl *RD) {
2000   CodeGenVTables &VTables = CGM.getVTables();
2001   llvm::GlobalVariable *VTT = VTables.GetAddrOfVTT(RD);
2002   VTables.EmitVTTDefinition(VTT, CGM.getVTableLinkage(RD), RD);
2003 }
2004 
2005 bool ItaniumCXXABI::canSpeculativelyEmitVTableAsBaseClass(
2006     const CXXRecordDecl *RD) const {
2007   // We don't emit available_externally vtables if we are in -fapple-kext mode
2008   // because kext mode does not permit devirtualization.
2009   if (CGM.getLangOpts().AppleKext)
2010     return false;
2011 
2012   // If the vtable is hidden then it is not safe to emit an available_externally
2013   // copy of vtable.
2014   if (isVTableHidden(RD))
2015     return false;
2016 
2017   if (CGM.getCodeGenOpts().ForceEmitVTables)
2018     return true;
2019 
2020   // If we don't have any not emitted inline virtual function then we are safe
2021   // to emit an available_externally copy of vtable.
2022   // FIXME we can still emit a copy of the vtable if we
2023   // can emit definition of the inline functions.
2024   if (hasAnyUnusedVirtualInlineFunction(RD))
2025     return false;
2026 
2027   // For a class with virtual bases, we must also be able to speculatively
2028   // emit the VTT, because CodeGen doesn't have separate notions of "can emit
2029   // the vtable" and "can emit the VTT". For a base subobject, this means we
2030   // need to be able to emit non-virtual base vtables.
2031   if (RD->getNumVBases()) {
2032     for (const auto &B : RD->bases()) {
2033       auto *BRD = B.getType()->getAsCXXRecordDecl();
2034       assert(BRD && "no class for base specifier");
2035       if (B.isVirtual() || !BRD->isDynamicClass())
2036         continue;
2037       if (!canSpeculativelyEmitVTableAsBaseClass(BRD))
2038         return false;
2039     }
2040   }
2041 
2042   return true;
2043 }
2044 
2045 bool ItaniumCXXABI::canSpeculativelyEmitVTable(const CXXRecordDecl *RD) const {
2046   if (!canSpeculativelyEmitVTableAsBaseClass(RD))
2047     return false;
2048 
2049   // For a complete-object vtable (or more specifically, for the VTT), we need
2050   // to be able to speculatively emit the vtables of all dynamic virtual bases.
2051   for (const auto &B : RD->vbases()) {
2052     auto *BRD = B.getType()->getAsCXXRecordDecl();
2053     assert(BRD && "no class for base specifier");
2054     if (!BRD->isDynamicClass())
2055       continue;
2056     if (!canSpeculativelyEmitVTableAsBaseClass(BRD))
2057       return false;
2058   }
2059 
2060   return true;
2061 }
2062 static llvm::Value *performTypeAdjustment(CodeGenFunction &CGF,
2063                                           Address InitialPtr,
2064                                           int64_t NonVirtualAdjustment,
2065                                           int64_t VirtualAdjustment,
2066                                           bool IsReturnAdjustment) {
2067   if (!NonVirtualAdjustment && !VirtualAdjustment)
2068     return InitialPtr.getPointer();
2069 
2070   Address V = CGF.Builder.CreateElementBitCast(InitialPtr, CGF.Int8Ty);
2071 
2072   // In a base-to-derived cast, the non-virtual adjustment is applied first.
2073   if (NonVirtualAdjustment && !IsReturnAdjustment) {
2074     V = CGF.Builder.CreateConstInBoundsByteGEP(V,
2075                               CharUnits::fromQuantity(NonVirtualAdjustment));
2076   }
2077 
2078   // Perform the virtual adjustment if we have one.
2079   llvm::Value *ResultPtr;
2080   if (VirtualAdjustment) {
2081     Address VTablePtrPtr = CGF.Builder.CreateElementBitCast(V, CGF.Int8PtrTy);
2082     llvm::Value *VTablePtr = CGF.Builder.CreateLoad(VTablePtrPtr);
2083 
2084     llvm::Value *Offset;
2085     llvm::Value *OffsetPtr = CGF.Builder.CreateConstInBoundsGEP1_64(
2086         CGF.Int8Ty, VTablePtr, VirtualAdjustment);
2087     if (CGF.CGM.getItaniumVTableContext().isRelativeLayout()) {
2088       // Load the adjustment offset from the vtable as a 32-bit int.
2089       OffsetPtr =
2090           CGF.Builder.CreateBitCast(OffsetPtr, CGF.Int32Ty->getPointerTo());
2091       Offset =
2092           CGF.Builder.CreateAlignedLoad(CGF.Int32Ty, OffsetPtr,
2093                                         CharUnits::fromQuantity(4));
2094     } else {
2095       llvm::Type *PtrDiffTy =
2096           CGF.ConvertType(CGF.getContext().getPointerDiffType());
2097 
2098       OffsetPtr =
2099           CGF.Builder.CreateBitCast(OffsetPtr, PtrDiffTy->getPointerTo());
2100 
2101       // Load the adjustment offset from the vtable.
2102       Offset = CGF.Builder.CreateAlignedLoad(PtrDiffTy, OffsetPtr,
2103                                              CGF.getPointerAlign());
2104     }
2105     // Adjust our pointer.
2106     ResultPtr = CGF.Builder.CreateInBoundsGEP(
2107         V.getElementType(), V.getPointer(), Offset);
2108   } else {
2109     ResultPtr = V.getPointer();
2110   }
2111 
2112   // In a derived-to-base conversion, the non-virtual adjustment is
2113   // applied second.
2114   if (NonVirtualAdjustment && IsReturnAdjustment) {
2115     ResultPtr = CGF.Builder.CreateConstInBoundsGEP1_64(CGF.Int8Ty, ResultPtr,
2116                                                        NonVirtualAdjustment);
2117   }
2118 
2119   // Cast back to the original type.
2120   return CGF.Builder.CreateBitCast(ResultPtr, InitialPtr.getType());
2121 }
2122 
2123 llvm::Value *ItaniumCXXABI::performThisAdjustment(CodeGenFunction &CGF,
2124                                                   Address This,
2125                                                   const ThisAdjustment &TA) {
2126   return performTypeAdjustment(CGF, This, TA.NonVirtual,
2127                                TA.Virtual.Itanium.VCallOffsetOffset,
2128                                /*IsReturnAdjustment=*/false);
2129 }
2130 
2131 llvm::Value *
2132 ItaniumCXXABI::performReturnAdjustment(CodeGenFunction &CGF, Address Ret,
2133                                        const ReturnAdjustment &RA) {
2134   return performTypeAdjustment(CGF, Ret, RA.NonVirtual,
2135                                RA.Virtual.Itanium.VBaseOffsetOffset,
2136                                /*IsReturnAdjustment=*/true);
2137 }
2138 
2139 void ARMCXXABI::EmitReturnFromThunk(CodeGenFunction &CGF,
2140                                     RValue RV, QualType ResultType) {
2141   if (!isa<CXXDestructorDecl>(CGF.CurGD.getDecl()))
2142     return ItaniumCXXABI::EmitReturnFromThunk(CGF, RV, ResultType);
2143 
2144   // Destructor thunks in the ARM ABI have indeterminate results.
2145   llvm::Type *T = CGF.ReturnValue.getElementType();
2146   RValue Undef = RValue::get(llvm::UndefValue::get(T));
2147   return ItaniumCXXABI::EmitReturnFromThunk(CGF, Undef, ResultType);
2148 }
2149 
2150 /************************** Array allocation cookies **************************/
2151 
2152 CharUnits ItaniumCXXABI::getArrayCookieSizeImpl(QualType elementType) {
2153   // The array cookie is a size_t; pad that up to the element alignment.
2154   // The cookie is actually right-justified in that space.
2155   return std::max(CharUnits::fromQuantity(CGM.SizeSizeInBytes),
2156                   CGM.getContext().getPreferredTypeAlignInChars(elementType));
2157 }
2158 
2159 Address ItaniumCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
2160                                              Address NewPtr,
2161                                              llvm::Value *NumElements,
2162                                              const CXXNewExpr *expr,
2163                                              QualType ElementType) {
2164   assert(requiresArrayCookie(expr));
2165 
2166   unsigned AS = NewPtr.getAddressSpace();
2167 
2168   ASTContext &Ctx = getContext();
2169   CharUnits SizeSize = CGF.getSizeSize();
2170 
2171   // The size of the cookie.
2172   CharUnits CookieSize =
2173       std::max(SizeSize, Ctx.getPreferredTypeAlignInChars(ElementType));
2174   assert(CookieSize == getArrayCookieSizeImpl(ElementType));
2175 
2176   // Compute an offset to the cookie.
2177   Address CookiePtr = NewPtr;
2178   CharUnits CookieOffset = CookieSize - SizeSize;
2179   if (!CookieOffset.isZero())
2180     CookiePtr = CGF.Builder.CreateConstInBoundsByteGEP(CookiePtr, CookieOffset);
2181 
2182   // Write the number of elements into the appropriate slot.
2183   Address NumElementsPtr =
2184       CGF.Builder.CreateElementBitCast(CookiePtr, CGF.SizeTy);
2185   llvm::Instruction *SI = CGF.Builder.CreateStore(NumElements, NumElementsPtr);
2186 
2187   // Handle the array cookie specially in ASan.
2188   if (CGM.getLangOpts().Sanitize.has(SanitizerKind::Address) && AS == 0 &&
2189       (expr->getOperatorNew()->isReplaceableGlobalAllocationFunction() ||
2190        CGM.getCodeGenOpts().SanitizeAddressPoisonCustomArrayCookie)) {
2191     // The store to the CookiePtr does not need to be instrumented.
2192     CGM.getSanitizerMetadata()->disableSanitizerForInstruction(SI);
2193     llvm::FunctionType *FTy =
2194         llvm::FunctionType::get(CGM.VoidTy, NumElementsPtr.getType(), false);
2195     llvm::FunctionCallee F =
2196         CGM.CreateRuntimeFunction(FTy, "__asan_poison_cxx_array_cookie");
2197     CGF.Builder.CreateCall(F, NumElementsPtr.getPointer());
2198   }
2199 
2200   // Finally, compute a pointer to the actual data buffer by skipping
2201   // over the cookie completely.
2202   return CGF.Builder.CreateConstInBoundsByteGEP(NewPtr, CookieSize);
2203 }
2204 
2205 llvm::Value *ItaniumCXXABI::readArrayCookieImpl(CodeGenFunction &CGF,
2206                                                 Address allocPtr,
2207                                                 CharUnits cookieSize) {
2208   // The element size is right-justified in the cookie.
2209   Address numElementsPtr = allocPtr;
2210   CharUnits numElementsOffset = cookieSize - CGF.getSizeSize();
2211   if (!numElementsOffset.isZero())
2212     numElementsPtr =
2213       CGF.Builder.CreateConstInBoundsByteGEP(numElementsPtr, numElementsOffset);
2214 
2215   unsigned AS = allocPtr.getAddressSpace();
2216   numElementsPtr = CGF.Builder.CreateElementBitCast(numElementsPtr, CGF.SizeTy);
2217   if (!CGM.getLangOpts().Sanitize.has(SanitizerKind::Address) || AS != 0)
2218     return CGF.Builder.CreateLoad(numElementsPtr);
2219   // In asan mode emit a function call instead of a regular load and let the
2220   // run-time deal with it: if the shadow is properly poisoned return the
2221   // cookie, otherwise return 0 to avoid an infinite loop calling DTORs.
2222   // We can't simply ignore this load using nosanitize metadata because
2223   // the metadata may be lost.
2224   llvm::FunctionType *FTy =
2225       llvm::FunctionType::get(CGF.SizeTy, CGF.SizeTy->getPointerTo(0), false);
2226   llvm::FunctionCallee F =
2227       CGM.CreateRuntimeFunction(FTy, "__asan_load_cxx_array_cookie");
2228   return CGF.Builder.CreateCall(F, numElementsPtr.getPointer());
2229 }
2230 
2231 CharUnits ARMCXXABI::getArrayCookieSizeImpl(QualType elementType) {
2232   // ARM says that the cookie is always:
2233   //   struct array_cookie {
2234   //     std::size_t element_size; // element_size != 0
2235   //     std::size_t element_count;
2236   //   };
2237   // But the base ABI doesn't give anything an alignment greater than
2238   // 8, so we can dismiss this as typical ABI-author blindness to
2239   // actual language complexity and round up to the element alignment.
2240   return std::max(CharUnits::fromQuantity(2 * CGM.SizeSizeInBytes),
2241                   CGM.getContext().getTypeAlignInChars(elementType));
2242 }
2243 
2244 Address ARMCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
2245                                          Address newPtr,
2246                                          llvm::Value *numElements,
2247                                          const CXXNewExpr *expr,
2248                                          QualType elementType) {
2249   assert(requiresArrayCookie(expr));
2250 
2251   // The cookie is always at the start of the buffer.
2252   Address cookie = newPtr;
2253 
2254   // The first element is the element size.
2255   cookie = CGF.Builder.CreateElementBitCast(cookie, CGF.SizeTy);
2256   llvm::Value *elementSize = llvm::ConstantInt::get(CGF.SizeTy,
2257                  getContext().getTypeSizeInChars(elementType).getQuantity());
2258   CGF.Builder.CreateStore(elementSize, cookie);
2259 
2260   // The second element is the element count.
2261   cookie = CGF.Builder.CreateConstInBoundsGEP(cookie, 1);
2262   CGF.Builder.CreateStore(numElements, cookie);
2263 
2264   // Finally, compute a pointer to the actual data buffer by skipping
2265   // over the cookie completely.
2266   CharUnits cookieSize = ARMCXXABI::getArrayCookieSizeImpl(elementType);
2267   return CGF.Builder.CreateConstInBoundsByteGEP(newPtr, cookieSize);
2268 }
2269 
2270 llvm::Value *ARMCXXABI::readArrayCookieImpl(CodeGenFunction &CGF,
2271                                             Address allocPtr,
2272                                             CharUnits cookieSize) {
2273   // The number of elements is at offset sizeof(size_t) relative to
2274   // the allocated pointer.
2275   Address numElementsPtr
2276     = CGF.Builder.CreateConstInBoundsByteGEP(allocPtr, CGF.getSizeSize());
2277 
2278   numElementsPtr = CGF.Builder.CreateElementBitCast(numElementsPtr, CGF.SizeTy);
2279   return CGF.Builder.CreateLoad(numElementsPtr);
2280 }
2281 
2282 /*********************** Static local initialization **************************/
2283 
2284 static llvm::FunctionCallee getGuardAcquireFn(CodeGenModule &CGM,
2285                                               llvm::PointerType *GuardPtrTy) {
2286   // int __cxa_guard_acquire(__guard *guard_object);
2287   llvm::FunctionType *FTy =
2288     llvm::FunctionType::get(CGM.getTypes().ConvertType(CGM.getContext().IntTy),
2289                             GuardPtrTy, /*isVarArg=*/false);
2290   return CGM.CreateRuntimeFunction(
2291       FTy, "__cxa_guard_acquire",
2292       llvm::AttributeList::get(CGM.getLLVMContext(),
2293                                llvm::AttributeList::FunctionIndex,
2294                                llvm::Attribute::NoUnwind));
2295 }
2296 
2297 static llvm::FunctionCallee getGuardReleaseFn(CodeGenModule &CGM,
2298                                               llvm::PointerType *GuardPtrTy) {
2299   // void __cxa_guard_release(__guard *guard_object);
2300   llvm::FunctionType *FTy =
2301     llvm::FunctionType::get(CGM.VoidTy, GuardPtrTy, /*isVarArg=*/false);
2302   return CGM.CreateRuntimeFunction(
2303       FTy, "__cxa_guard_release",
2304       llvm::AttributeList::get(CGM.getLLVMContext(),
2305                                llvm::AttributeList::FunctionIndex,
2306                                llvm::Attribute::NoUnwind));
2307 }
2308 
2309 static llvm::FunctionCallee getGuardAbortFn(CodeGenModule &CGM,
2310                                             llvm::PointerType *GuardPtrTy) {
2311   // void __cxa_guard_abort(__guard *guard_object);
2312   llvm::FunctionType *FTy =
2313     llvm::FunctionType::get(CGM.VoidTy, GuardPtrTy, /*isVarArg=*/false);
2314   return CGM.CreateRuntimeFunction(
2315       FTy, "__cxa_guard_abort",
2316       llvm::AttributeList::get(CGM.getLLVMContext(),
2317                                llvm::AttributeList::FunctionIndex,
2318                                llvm::Attribute::NoUnwind));
2319 }
2320 
2321 namespace {
2322   struct CallGuardAbort final : EHScopeStack::Cleanup {
2323     llvm::GlobalVariable *Guard;
2324     CallGuardAbort(llvm::GlobalVariable *Guard) : Guard(Guard) {}
2325 
2326     void Emit(CodeGenFunction &CGF, Flags flags) override {
2327       CGF.EmitNounwindRuntimeCall(getGuardAbortFn(CGF.CGM, Guard->getType()),
2328                                   Guard);
2329     }
2330   };
2331 }
2332 
2333 /// The ARM code here follows the Itanium code closely enough that we
2334 /// just special-case it at particular places.
2335 void ItaniumCXXABI::EmitGuardedInit(CodeGenFunction &CGF,
2336                                     const VarDecl &D,
2337                                     llvm::GlobalVariable *var,
2338                                     bool shouldPerformInit) {
2339   CGBuilderTy &Builder = CGF.Builder;
2340 
2341   // Inline variables that weren't instantiated from variable templates have
2342   // partially-ordered initialization within their translation unit.
2343   bool NonTemplateInline =
2344       D.isInline() &&
2345       !isTemplateInstantiation(D.getTemplateSpecializationKind());
2346 
2347   // We only need to use thread-safe statics for local non-TLS variables and
2348   // inline variables; other global initialization is always single-threaded
2349   // or (through lazy dynamic loading in multiple threads) unsequenced.
2350   bool threadsafe = getContext().getLangOpts().ThreadsafeStatics &&
2351                     (D.isLocalVarDecl() || NonTemplateInline) &&
2352                     !D.getTLSKind();
2353 
2354   // If we have a global variable with internal linkage and thread-safe statics
2355   // are disabled, we can just let the guard variable be of type i8.
2356   bool useInt8GuardVariable = !threadsafe && var->hasInternalLinkage();
2357 
2358   llvm::IntegerType *guardTy;
2359   CharUnits guardAlignment;
2360   if (useInt8GuardVariable) {
2361     guardTy = CGF.Int8Ty;
2362     guardAlignment = CharUnits::One();
2363   } else {
2364     // Guard variables are 64 bits in the generic ABI and size width on ARM
2365     // (i.e. 32-bit on AArch32, 64-bit on AArch64).
2366     if (UseARMGuardVarABI) {
2367       guardTy = CGF.SizeTy;
2368       guardAlignment = CGF.getSizeAlign();
2369     } else {
2370       guardTy = CGF.Int64Ty;
2371       guardAlignment =
2372           CharUnits::fromQuantity(CGM.getDataLayout().getABITypeAlign(guardTy));
2373     }
2374   }
2375   llvm::PointerType *guardPtrTy = guardTy->getPointerTo(
2376       CGF.CGM.getDataLayout().getDefaultGlobalsAddressSpace());
2377 
2378   // Create the guard variable if we don't already have it (as we
2379   // might if we're double-emitting this function body).
2380   llvm::GlobalVariable *guard = CGM.getStaticLocalDeclGuardAddress(&D);
2381   if (!guard) {
2382     // Mangle the name for the guard.
2383     SmallString<256> guardName;
2384     {
2385       llvm::raw_svector_ostream out(guardName);
2386       getMangleContext().mangleStaticGuardVariable(&D, out);
2387     }
2388 
2389     // Create the guard variable with a zero-initializer.
2390     // Just absorb linkage, visibility and dll storage class  from the guarded
2391     // variable.
2392     guard = new llvm::GlobalVariable(CGM.getModule(), guardTy,
2393                                      false, var->getLinkage(),
2394                                      llvm::ConstantInt::get(guardTy, 0),
2395                                      guardName.str());
2396     guard->setDSOLocal(var->isDSOLocal());
2397     guard->setVisibility(var->getVisibility());
2398     guard->setDLLStorageClass(var->getDLLStorageClass());
2399     // If the variable is thread-local, so is its guard variable.
2400     guard->setThreadLocalMode(var->getThreadLocalMode());
2401     guard->setAlignment(guardAlignment.getAsAlign());
2402 
2403     // The ABI says: "It is suggested that it be emitted in the same COMDAT
2404     // group as the associated data object." In practice, this doesn't work for
2405     // non-ELF and non-Wasm object formats, so only do it for ELF and Wasm.
2406     llvm::Comdat *C = var->getComdat();
2407     if (!D.isLocalVarDecl() && C &&
2408         (CGM.getTarget().getTriple().isOSBinFormatELF() ||
2409          CGM.getTarget().getTriple().isOSBinFormatWasm())) {
2410       guard->setComdat(C);
2411     } else if (CGM.supportsCOMDAT() && guard->isWeakForLinker()) {
2412       guard->setComdat(CGM.getModule().getOrInsertComdat(guard->getName()));
2413     }
2414 
2415     CGM.setStaticLocalDeclGuardAddress(&D, guard);
2416   }
2417 
2418   Address guardAddr = Address(guard, guard->getValueType(), guardAlignment);
2419 
2420   // Test whether the variable has completed initialization.
2421   //
2422   // Itanium C++ ABI 3.3.2:
2423   //   The following is pseudo-code showing how these functions can be used:
2424   //     if (obj_guard.first_byte == 0) {
2425   //       if ( __cxa_guard_acquire (&obj_guard) ) {
2426   //         try {
2427   //           ... initialize the object ...;
2428   //         } catch (...) {
2429   //            __cxa_guard_abort (&obj_guard);
2430   //            throw;
2431   //         }
2432   //         ... queue object destructor with __cxa_atexit() ...;
2433   //         __cxa_guard_release (&obj_guard);
2434   //       }
2435   //     }
2436   //
2437   // If threadsafe statics are enabled, but we don't have inline atomics, just
2438   // call __cxa_guard_acquire unconditionally.  The "inline" check isn't
2439   // actually inline, and the user might not expect calls to __atomic libcalls.
2440 
2441   unsigned MaxInlineWidthInBits = CGF.getTarget().getMaxAtomicInlineWidth();
2442   llvm::BasicBlock *EndBlock = CGF.createBasicBlock("init.end");
2443   if (!threadsafe || MaxInlineWidthInBits) {
2444     // Load the first byte of the guard variable.
2445     llvm::LoadInst *LI =
2446         Builder.CreateLoad(Builder.CreateElementBitCast(guardAddr, CGM.Int8Ty));
2447 
2448     // Itanium ABI:
2449     //   An implementation supporting thread-safety on multiprocessor
2450     //   systems must also guarantee that references to the initialized
2451     //   object do not occur before the load of the initialization flag.
2452     //
2453     // In LLVM, we do this by marking the load Acquire.
2454     if (threadsafe)
2455       LI->setAtomic(llvm::AtomicOrdering::Acquire);
2456 
2457     // For ARM, we should only check the first bit, rather than the entire byte:
2458     //
2459     // ARM C++ ABI 3.2.3.1:
2460     //   To support the potential use of initialization guard variables
2461     //   as semaphores that are the target of ARM SWP and LDREX/STREX
2462     //   synchronizing instructions we define a static initialization
2463     //   guard variable to be a 4-byte aligned, 4-byte word with the
2464     //   following inline access protocol.
2465     //     #define INITIALIZED 1
2466     //     if ((obj_guard & INITIALIZED) != INITIALIZED) {
2467     //       if (__cxa_guard_acquire(&obj_guard))
2468     //         ...
2469     //     }
2470     //
2471     // and similarly for ARM64:
2472     //
2473     // ARM64 C++ ABI 3.2.2:
2474     //   This ABI instead only specifies the value bit 0 of the static guard
2475     //   variable; all other bits are platform defined. Bit 0 shall be 0 when the
2476     //   variable is not initialized and 1 when it is.
2477     llvm::Value *V =
2478         (UseARMGuardVarABI && !useInt8GuardVariable)
2479             ? Builder.CreateAnd(LI, llvm::ConstantInt::get(CGM.Int8Ty, 1))
2480             : LI;
2481     llvm::Value *NeedsInit = Builder.CreateIsNull(V, "guard.uninitialized");
2482 
2483     llvm::BasicBlock *InitCheckBlock = CGF.createBasicBlock("init.check");
2484 
2485     // Check if the first byte of the guard variable is zero.
2486     CGF.EmitCXXGuardedInitBranch(NeedsInit, InitCheckBlock, EndBlock,
2487                                  CodeGenFunction::GuardKind::VariableGuard, &D);
2488 
2489     CGF.EmitBlock(InitCheckBlock);
2490   }
2491 
2492   // The semantics of dynamic initialization of variables with static or thread
2493   // storage duration depends on whether they are declared at block-scope. The
2494   // initialization of such variables at block-scope can be aborted with an
2495   // exception and later retried (per C++20 [stmt.dcl]p4), and recursive entry
2496   // to their initialization has undefined behavior (also per C++20
2497   // [stmt.dcl]p4). For such variables declared at non-block scope, exceptions
2498   // lead to termination (per C++20 [except.terminate]p1), and recursive
2499   // references to the variables are governed only by the lifetime rules (per
2500   // C++20 [class.cdtor]p2), which means such references are perfectly fine as
2501   // long as they avoid touching memory. As a result, block-scope variables must
2502   // not be marked as initialized until after initialization completes (unless
2503   // the mark is reverted following an exception), but non-block-scope variables
2504   // must be marked prior to initialization so that recursive accesses during
2505   // initialization do not restart initialization.
2506 
2507   // Variables used when coping with thread-safe statics and exceptions.
2508   if (threadsafe) {
2509     // Call __cxa_guard_acquire.
2510     llvm::Value *V
2511       = CGF.EmitNounwindRuntimeCall(getGuardAcquireFn(CGM, guardPtrTy), guard);
2512 
2513     llvm::BasicBlock *InitBlock = CGF.createBasicBlock("init");
2514 
2515     Builder.CreateCondBr(Builder.CreateIsNotNull(V, "tobool"),
2516                          InitBlock, EndBlock);
2517 
2518     // Call __cxa_guard_abort along the exceptional edge.
2519     CGF.EHStack.pushCleanup<CallGuardAbort>(EHCleanup, guard);
2520 
2521     CGF.EmitBlock(InitBlock);
2522   } else if (!D.isLocalVarDecl()) {
2523     // For non-local variables, store 1 into the first byte of the guard
2524     // variable before the object initialization begins so that references
2525     // to the variable during initialization don't restart initialization.
2526     Builder.CreateStore(llvm::ConstantInt::get(CGM.Int8Ty, 1),
2527                         Builder.CreateElementBitCast(guardAddr, CGM.Int8Ty));
2528   }
2529 
2530   // Emit the initializer and add a global destructor if appropriate.
2531   CGF.EmitCXXGlobalVarDeclInit(D, var, shouldPerformInit);
2532 
2533   if (threadsafe) {
2534     // Pop the guard-abort cleanup if we pushed one.
2535     CGF.PopCleanupBlock();
2536 
2537     // Call __cxa_guard_release.  This cannot throw.
2538     CGF.EmitNounwindRuntimeCall(getGuardReleaseFn(CGM, guardPtrTy),
2539                                 guardAddr.getPointer());
2540   } else if (D.isLocalVarDecl()) {
2541     // For local variables, store 1 into the first byte of the guard variable
2542     // after the object initialization completes so that initialization is
2543     // retried if initialization is interrupted by an exception.
2544     Builder.CreateStore(llvm::ConstantInt::get(CGM.Int8Ty, 1),
2545                         Builder.CreateElementBitCast(guardAddr, CGM.Int8Ty));
2546   }
2547 
2548   CGF.EmitBlock(EndBlock);
2549 }
2550 
2551 /// Register a global destructor using __cxa_atexit.
2552 static void emitGlobalDtorWithCXAAtExit(CodeGenFunction &CGF,
2553                                         llvm::FunctionCallee dtor,
2554                                         llvm::Constant *addr, bool TLS) {
2555   assert(!CGF.getTarget().getTriple().isOSAIX() &&
2556          "unexpected call to emitGlobalDtorWithCXAAtExit");
2557   assert((TLS || CGF.getTypes().getCodeGenOpts().CXAAtExit) &&
2558          "__cxa_atexit is disabled");
2559   const char *Name = "__cxa_atexit";
2560   if (TLS) {
2561     const llvm::Triple &T = CGF.getTarget().getTriple();
2562     Name = T.isOSDarwin() ?  "_tlv_atexit" : "__cxa_thread_atexit";
2563   }
2564 
2565   // We're assuming that the destructor function is something we can
2566   // reasonably call with the default CC.  Go ahead and cast it to the
2567   // right prototype.
2568   llvm::Type *dtorTy =
2569     llvm::FunctionType::get(CGF.VoidTy, CGF.Int8PtrTy, false)->getPointerTo();
2570 
2571   // Preserve address space of addr.
2572   auto AddrAS = addr ? addr->getType()->getPointerAddressSpace() : 0;
2573   auto AddrInt8PtrTy =
2574       AddrAS ? CGF.Int8Ty->getPointerTo(AddrAS) : CGF.Int8PtrTy;
2575 
2576   // Create a variable that binds the atexit to this shared object.
2577   llvm::Constant *handle =
2578       CGF.CGM.CreateRuntimeVariable(CGF.Int8Ty, "__dso_handle");
2579   auto *GV = cast<llvm::GlobalValue>(handle->stripPointerCasts());
2580   GV->setVisibility(llvm::GlobalValue::HiddenVisibility);
2581 
2582   // extern "C" int __cxa_atexit(void (*f)(void *), void *p, void *d);
2583   llvm::Type *paramTys[] = {dtorTy, AddrInt8PtrTy, handle->getType()};
2584   llvm::FunctionType *atexitTy =
2585     llvm::FunctionType::get(CGF.IntTy, paramTys, false);
2586 
2587   // Fetch the actual function.
2588   llvm::FunctionCallee atexit = CGF.CGM.CreateRuntimeFunction(atexitTy, Name);
2589   if (llvm::Function *fn = dyn_cast<llvm::Function>(atexit.getCallee()))
2590     fn->setDoesNotThrow();
2591 
2592   if (!addr)
2593     // addr is null when we are trying to register a dtor annotated with
2594     // __attribute__((destructor)) in a constructor function. Using null here is
2595     // okay because this argument is just passed back to the destructor
2596     // function.
2597     addr = llvm::Constant::getNullValue(CGF.Int8PtrTy);
2598 
2599   llvm::Value *args[] = {llvm::ConstantExpr::getBitCast(
2600                              cast<llvm::Constant>(dtor.getCallee()), dtorTy),
2601                          llvm::ConstantExpr::getBitCast(addr, AddrInt8PtrTy),
2602                          handle};
2603   CGF.EmitNounwindRuntimeCall(atexit, args);
2604 }
2605 
2606 static llvm::Function *createGlobalInitOrCleanupFn(CodeGen::CodeGenModule &CGM,
2607                                                    StringRef FnName) {
2608   // Create a function that registers/unregisters destructors that have the same
2609   // priority.
2610   llvm::FunctionType *FTy = llvm::FunctionType::get(CGM.VoidTy, false);
2611   llvm::Function *GlobalInitOrCleanupFn = CGM.CreateGlobalInitOrCleanUpFunction(
2612       FTy, FnName, CGM.getTypes().arrangeNullaryFunction(), SourceLocation());
2613 
2614   return GlobalInitOrCleanupFn;
2615 }
2616 
2617 void CodeGenModule::unregisterGlobalDtorsWithUnAtExit() {
2618   for (const auto &I : DtorsUsingAtExit) {
2619     int Priority = I.first;
2620     std::string GlobalCleanupFnName =
2621         std::string("__GLOBAL_cleanup_") + llvm::to_string(Priority);
2622 
2623     llvm::Function *GlobalCleanupFn =
2624         createGlobalInitOrCleanupFn(*this, GlobalCleanupFnName);
2625 
2626     CodeGenFunction CGF(*this);
2627     CGF.StartFunction(GlobalDecl(), getContext().VoidTy, GlobalCleanupFn,
2628                       getTypes().arrangeNullaryFunction(), FunctionArgList(),
2629                       SourceLocation(), SourceLocation());
2630     auto AL = ApplyDebugLocation::CreateArtificial(CGF);
2631 
2632     // Get the destructor function type, void(*)(void).
2633     llvm::FunctionType *dtorFuncTy = llvm::FunctionType::get(CGF.VoidTy, false);
2634     llvm::Type *dtorTy = dtorFuncTy->getPointerTo();
2635 
2636     // Destructor functions are run/unregistered in non-ascending
2637     // order of their priorities.
2638     const llvm::TinyPtrVector<llvm::Function *> &Dtors = I.second;
2639     auto itv = Dtors.rbegin();
2640     while (itv != Dtors.rend()) {
2641       llvm::Function *Dtor = *itv;
2642 
2643       // We're assuming that the destructor function is something we can
2644       // reasonably call with the correct CC.  Go ahead and cast it to the
2645       // right prototype.
2646       llvm::Constant *dtor = llvm::ConstantExpr::getBitCast(Dtor, dtorTy);
2647       llvm::Value *V = CGF.unregisterGlobalDtorWithUnAtExit(dtor);
2648       llvm::Value *NeedsDestruct =
2649           CGF.Builder.CreateIsNull(V, "needs_destruct");
2650 
2651       llvm::BasicBlock *DestructCallBlock =
2652           CGF.createBasicBlock("destruct.call");
2653       llvm::BasicBlock *EndBlock = CGF.createBasicBlock(
2654           (itv + 1) != Dtors.rend() ? "unatexit.call" : "destruct.end");
2655       // Check if unatexit returns a value of 0. If it does, jump to
2656       // DestructCallBlock, otherwise jump to EndBlock directly.
2657       CGF.Builder.CreateCondBr(NeedsDestruct, DestructCallBlock, EndBlock);
2658 
2659       CGF.EmitBlock(DestructCallBlock);
2660 
2661       // Emit the call to casted Dtor.
2662       llvm::CallInst *CI = CGF.Builder.CreateCall(dtorFuncTy, dtor);
2663       // Make sure the call and the callee agree on calling convention.
2664       CI->setCallingConv(Dtor->getCallingConv());
2665 
2666       CGF.EmitBlock(EndBlock);
2667 
2668       itv++;
2669     }
2670 
2671     CGF.FinishFunction();
2672     AddGlobalDtor(GlobalCleanupFn, Priority);
2673   }
2674 }
2675 
2676 void CodeGenModule::registerGlobalDtorsWithAtExit() {
2677   for (const auto &I : DtorsUsingAtExit) {
2678     int Priority = I.first;
2679     std::string GlobalInitFnName =
2680         std::string("__GLOBAL_init_") + llvm::to_string(Priority);
2681     llvm::Function *GlobalInitFn =
2682         createGlobalInitOrCleanupFn(*this, GlobalInitFnName);
2683 
2684     CodeGenFunction CGF(*this);
2685     CGF.StartFunction(GlobalDecl(), getContext().VoidTy, GlobalInitFn,
2686                       getTypes().arrangeNullaryFunction(), FunctionArgList(),
2687                       SourceLocation(), SourceLocation());
2688     auto AL = ApplyDebugLocation::CreateArtificial(CGF);
2689 
2690     // Since constructor functions are run in non-descending order of their
2691     // priorities, destructors are registered in non-descending order of their
2692     // priorities, and since destructor functions are run in the reverse order
2693     // of their registration, destructor functions are run in non-ascending
2694     // order of their priorities.
2695     const llvm::TinyPtrVector<llvm::Function *> &Dtors = I.second;
2696     for (auto *Dtor : Dtors) {
2697       // Register the destructor function calling __cxa_atexit if it is
2698       // available. Otherwise fall back on calling atexit.
2699       if (getCodeGenOpts().CXAAtExit) {
2700         emitGlobalDtorWithCXAAtExit(CGF, Dtor, nullptr, false);
2701       } else {
2702         // Get the destructor function type, void(*)(void).
2703         llvm::Type *dtorTy =
2704             llvm::FunctionType::get(CGF.VoidTy, false)->getPointerTo();
2705 
2706         // We're assuming that the destructor function is something we can
2707         // reasonably call with the correct CC.  Go ahead and cast it to the
2708         // right prototype.
2709         CGF.registerGlobalDtorWithAtExit(
2710             llvm::ConstantExpr::getBitCast(Dtor, dtorTy));
2711       }
2712     }
2713 
2714     CGF.FinishFunction();
2715     AddGlobalCtor(GlobalInitFn, Priority);
2716   }
2717 
2718   if (getCXXABI().useSinitAndSterm())
2719     unregisterGlobalDtorsWithUnAtExit();
2720 }
2721 
2722 /// Register a global destructor as best as we know how.
2723 void ItaniumCXXABI::registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
2724                                        llvm::FunctionCallee dtor,
2725                                        llvm::Constant *addr) {
2726   if (D.isNoDestroy(CGM.getContext()))
2727     return;
2728 
2729   // emitGlobalDtorWithCXAAtExit will emit a call to either __cxa_thread_atexit
2730   // or __cxa_atexit depending on whether this VarDecl is a thread-local storage
2731   // or not. CXAAtExit controls only __cxa_atexit, so use it if it is enabled.
2732   // We can always use __cxa_thread_atexit.
2733   if (CGM.getCodeGenOpts().CXAAtExit || D.getTLSKind())
2734     return emitGlobalDtorWithCXAAtExit(CGF, dtor, addr, D.getTLSKind());
2735 
2736   // In Apple kexts, we want to add a global destructor entry.
2737   // FIXME: shouldn't this be guarded by some variable?
2738   if (CGM.getLangOpts().AppleKext) {
2739     // Generate a global destructor entry.
2740     return CGM.AddCXXDtorEntry(dtor, addr);
2741   }
2742 
2743   CGF.registerGlobalDtorWithAtExit(D, dtor, addr);
2744 }
2745 
2746 static bool isThreadWrapperReplaceable(const VarDecl *VD,
2747                                        CodeGen::CodeGenModule &CGM) {
2748   assert(!VD->isStaticLocal() && "static local VarDecls don't need wrappers!");
2749   // Darwin prefers to have references to thread local variables to go through
2750   // the thread wrapper instead of directly referencing the backing variable.
2751   return VD->getTLSKind() == VarDecl::TLS_Dynamic &&
2752          CGM.getTarget().getTriple().isOSDarwin();
2753 }
2754 
2755 /// Get the appropriate linkage for the wrapper function. This is essentially
2756 /// the weak form of the variable's linkage; every translation unit which needs
2757 /// the wrapper emits a copy, and we want the linker to merge them.
2758 static llvm::GlobalValue::LinkageTypes
2759 getThreadLocalWrapperLinkage(const VarDecl *VD, CodeGen::CodeGenModule &CGM) {
2760   llvm::GlobalValue::LinkageTypes VarLinkage =
2761       CGM.getLLVMLinkageVarDefinition(VD, /*IsConstant=*/false);
2762 
2763   // For internal linkage variables, we don't need an external or weak wrapper.
2764   if (llvm::GlobalValue::isLocalLinkage(VarLinkage))
2765     return VarLinkage;
2766 
2767   // If the thread wrapper is replaceable, give it appropriate linkage.
2768   if (isThreadWrapperReplaceable(VD, CGM))
2769     if (!llvm::GlobalVariable::isLinkOnceLinkage(VarLinkage) &&
2770         !llvm::GlobalVariable::isWeakODRLinkage(VarLinkage))
2771       return VarLinkage;
2772   return llvm::GlobalValue::WeakODRLinkage;
2773 }
2774 
2775 llvm::Function *
2776 ItaniumCXXABI::getOrCreateThreadLocalWrapper(const VarDecl *VD,
2777                                              llvm::Value *Val) {
2778   // Mangle the name for the thread_local wrapper function.
2779   SmallString<256> WrapperName;
2780   {
2781     llvm::raw_svector_ostream Out(WrapperName);
2782     getMangleContext().mangleItaniumThreadLocalWrapper(VD, Out);
2783   }
2784 
2785   // FIXME: If VD is a definition, we should regenerate the function attributes
2786   // before returning.
2787   if (llvm::Value *V = CGM.getModule().getNamedValue(WrapperName))
2788     return cast<llvm::Function>(V);
2789 
2790   QualType RetQT = VD->getType();
2791   if (RetQT->isReferenceType())
2792     RetQT = RetQT.getNonReferenceType();
2793 
2794   const CGFunctionInfo &FI = CGM.getTypes().arrangeBuiltinFunctionDeclaration(
2795       getContext().getPointerType(RetQT), FunctionArgList());
2796 
2797   llvm::FunctionType *FnTy = CGM.getTypes().GetFunctionType(FI);
2798   llvm::Function *Wrapper =
2799       llvm::Function::Create(FnTy, getThreadLocalWrapperLinkage(VD, CGM),
2800                              WrapperName.str(), &CGM.getModule());
2801 
2802   if (CGM.supportsCOMDAT() && Wrapper->isWeakForLinker())
2803     Wrapper->setComdat(CGM.getModule().getOrInsertComdat(Wrapper->getName()));
2804 
2805   CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, Wrapper, /*IsThunk=*/false);
2806 
2807   // Always resolve references to the wrapper at link time.
2808   if (!Wrapper->hasLocalLinkage())
2809     if (!isThreadWrapperReplaceable(VD, CGM) ||
2810         llvm::GlobalVariable::isLinkOnceLinkage(Wrapper->getLinkage()) ||
2811         llvm::GlobalVariable::isWeakODRLinkage(Wrapper->getLinkage()) ||
2812         VD->getVisibility() == HiddenVisibility)
2813       Wrapper->setVisibility(llvm::GlobalValue::HiddenVisibility);
2814 
2815   if (isThreadWrapperReplaceable(VD, CGM)) {
2816     Wrapper->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
2817     Wrapper->addFnAttr(llvm::Attribute::NoUnwind);
2818   }
2819 
2820   ThreadWrappers.push_back({VD, Wrapper});
2821   return Wrapper;
2822 }
2823 
2824 void ItaniumCXXABI::EmitThreadLocalInitFuncs(
2825     CodeGenModule &CGM, ArrayRef<const VarDecl *> CXXThreadLocals,
2826     ArrayRef<llvm::Function *> CXXThreadLocalInits,
2827     ArrayRef<const VarDecl *> CXXThreadLocalInitVars) {
2828   llvm::Function *InitFunc = nullptr;
2829 
2830   // Separate initializers into those with ordered (or partially-ordered)
2831   // initialization and those with unordered initialization.
2832   llvm::SmallVector<llvm::Function *, 8> OrderedInits;
2833   llvm::SmallDenseMap<const VarDecl *, llvm::Function *> UnorderedInits;
2834   for (unsigned I = 0; I != CXXThreadLocalInits.size(); ++I) {
2835     if (isTemplateInstantiation(
2836             CXXThreadLocalInitVars[I]->getTemplateSpecializationKind()))
2837       UnorderedInits[CXXThreadLocalInitVars[I]->getCanonicalDecl()] =
2838           CXXThreadLocalInits[I];
2839     else
2840       OrderedInits.push_back(CXXThreadLocalInits[I]);
2841   }
2842 
2843   if (!OrderedInits.empty()) {
2844     // Generate a guarded initialization function.
2845     llvm::FunctionType *FTy =
2846         llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
2847     const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
2848     InitFunc = CGM.CreateGlobalInitOrCleanUpFunction(FTy, "__tls_init", FI,
2849                                                      SourceLocation(),
2850                                                      /*TLS=*/true);
2851     llvm::GlobalVariable *Guard = new llvm::GlobalVariable(
2852         CGM.getModule(), CGM.Int8Ty, /*isConstant=*/false,
2853         llvm::GlobalVariable::InternalLinkage,
2854         llvm::ConstantInt::get(CGM.Int8Ty, 0), "__tls_guard");
2855     Guard->setThreadLocal(true);
2856     Guard->setThreadLocalMode(CGM.GetDefaultLLVMTLSModel());
2857 
2858     CharUnits GuardAlign = CharUnits::One();
2859     Guard->setAlignment(GuardAlign.getAsAlign());
2860 
2861     CodeGenFunction(CGM).GenerateCXXGlobalInitFunc(
2862         InitFunc, OrderedInits, ConstantAddress(Guard, CGM.Int8Ty, GuardAlign));
2863     // On Darwin platforms, use CXX_FAST_TLS calling convention.
2864     if (CGM.getTarget().getTriple().isOSDarwin()) {
2865       InitFunc->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
2866       InitFunc->addFnAttr(llvm::Attribute::NoUnwind);
2867     }
2868   }
2869 
2870   // Create declarations for thread wrappers for all thread-local variables
2871   // with non-discardable definitions in this translation unit.
2872   for (const VarDecl *VD : CXXThreadLocals) {
2873     if (VD->hasDefinition() &&
2874         !isDiscardableGVALinkage(getContext().GetGVALinkageForVariable(VD))) {
2875       llvm::GlobalValue *GV = CGM.GetGlobalValue(CGM.getMangledName(VD));
2876       getOrCreateThreadLocalWrapper(VD, GV);
2877     }
2878   }
2879 
2880   // Emit all referenced thread wrappers.
2881   for (auto VDAndWrapper : ThreadWrappers) {
2882     const VarDecl *VD = VDAndWrapper.first;
2883     llvm::GlobalVariable *Var =
2884         cast<llvm::GlobalVariable>(CGM.GetGlobalValue(CGM.getMangledName(VD)));
2885     llvm::Function *Wrapper = VDAndWrapper.second;
2886 
2887     // Some targets require that all access to thread local variables go through
2888     // the thread wrapper.  This means that we cannot attempt to create a thread
2889     // wrapper or a thread helper.
2890     if (!VD->hasDefinition()) {
2891       if (isThreadWrapperReplaceable(VD, CGM)) {
2892         Wrapper->setLinkage(llvm::Function::ExternalLinkage);
2893         continue;
2894       }
2895 
2896       // If this isn't a TU in which this variable is defined, the thread
2897       // wrapper is discardable.
2898       if (Wrapper->getLinkage() == llvm::Function::WeakODRLinkage)
2899         Wrapper->setLinkage(llvm::Function::LinkOnceODRLinkage);
2900     }
2901 
2902     CGM.SetLLVMFunctionAttributesForDefinition(nullptr, Wrapper);
2903 
2904     // Mangle the name for the thread_local initialization function.
2905     SmallString<256> InitFnName;
2906     {
2907       llvm::raw_svector_ostream Out(InitFnName);
2908       getMangleContext().mangleItaniumThreadLocalInit(VD, Out);
2909     }
2910 
2911     llvm::FunctionType *InitFnTy = llvm::FunctionType::get(CGM.VoidTy, false);
2912 
2913     // If we have a definition for the variable, emit the initialization
2914     // function as an alias to the global Init function (if any). Otherwise,
2915     // produce a declaration of the initialization function.
2916     llvm::GlobalValue *Init = nullptr;
2917     bool InitIsInitFunc = false;
2918     bool HasConstantInitialization = false;
2919     if (!usesThreadWrapperFunction(VD)) {
2920       HasConstantInitialization = true;
2921     } else if (VD->hasDefinition()) {
2922       InitIsInitFunc = true;
2923       llvm::Function *InitFuncToUse = InitFunc;
2924       if (isTemplateInstantiation(VD->getTemplateSpecializationKind()))
2925         InitFuncToUse = UnorderedInits.lookup(VD->getCanonicalDecl());
2926       if (InitFuncToUse)
2927         Init = llvm::GlobalAlias::create(Var->getLinkage(), InitFnName.str(),
2928                                          InitFuncToUse);
2929     } else {
2930       // Emit a weak global function referring to the initialization function.
2931       // This function will not exist if the TU defining the thread_local
2932       // variable in question does not need any dynamic initialization for
2933       // its thread_local variables.
2934       Init = llvm::Function::Create(InitFnTy,
2935                                     llvm::GlobalVariable::ExternalWeakLinkage,
2936                                     InitFnName.str(), &CGM.getModule());
2937       const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
2938       CGM.SetLLVMFunctionAttributes(
2939           GlobalDecl(), FI, cast<llvm::Function>(Init), /*IsThunk=*/false);
2940     }
2941 
2942     if (Init) {
2943       Init->setVisibility(Var->getVisibility());
2944       // Don't mark an extern_weak function DSO local on windows.
2945       if (!CGM.getTriple().isOSWindows() || !Init->hasExternalWeakLinkage())
2946         Init->setDSOLocal(Var->isDSOLocal());
2947     }
2948 
2949     llvm::LLVMContext &Context = CGM.getModule().getContext();
2950 
2951     // The linker on AIX is not happy with missing weak symbols.  However,
2952     // other TUs will not know whether the initialization routine exists
2953     // so create an empty, init function to satisfy the linker.
2954     // This is needed whenever a thread wrapper function is not used, and
2955     // also when the symbol is weak.
2956     if (CGM.getTriple().isOSAIX() && VD->hasDefinition() &&
2957         isEmittedWithConstantInitializer(VD, true) &&
2958         !mayNeedDestruction(VD)) {
2959       // Init should be null.  If it were non-null, then the logic above would
2960       // either be defining the function to be an alias or declaring the
2961       // function with the expectation that the definition of the variable
2962       // is elsewhere.
2963       assert(Init == nullptr && "Expected Init to be null.");
2964 
2965       llvm::Function *Func = llvm::Function::Create(
2966           InitFnTy, Var->getLinkage(), InitFnName.str(), &CGM.getModule());
2967       const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
2968       CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI,
2969                                     cast<llvm::Function>(Func),
2970                                     /*IsThunk=*/false);
2971       // Create a function body that just returns
2972       llvm::BasicBlock *Entry = llvm::BasicBlock::Create(Context, "", Func);
2973       CGBuilderTy Builder(CGM, Entry);
2974       Builder.CreateRetVoid();
2975     }
2976 
2977     llvm::BasicBlock *Entry = llvm::BasicBlock::Create(Context, "", Wrapper);
2978     CGBuilderTy Builder(CGM, Entry);
2979     if (HasConstantInitialization) {
2980       // No dynamic initialization to invoke.
2981     } else if (InitIsInitFunc) {
2982       if (Init) {
2983         llvm::CallInst *CallVal = Builder.CreateCall(InitFnTy, Init);
2984         if (isThreadWrapperReplaceable(VD, CGM)) {
2985           CallVal->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
2986           llvm::Function *Fn =
2987               cast<llvm::Function>(cast<llvm::GlobalAlias>(Init)->getAliasee());
2988           Fn->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
2989         }
2990       }
2991     } else if (CGM.getTriple().isOSAIX()) {
2992       // On AIX, except if constinit and also neither of class type or of
2993       // (possibly multi-dimensional) array of class type, thread_local vars
2994       // will have init routines regardless of whether they are
2995       // const-initialized.  Since the routine is guaranteed to exist, we can
2996       // unconditionally call it without testing for its existance.  This
2997       // avoids potentially unresolved weak symbols which the AIX linker
2998       // isn't happy with.
2999       Builder.CreateCall(InitFnTy, Init);
3000     } else {
3001       // Don't know whether we have an init function. Call it if it exists.
3002       llvm::Value *Have = Builder.CreateIsNotNull(Init);
3003       llvm::BasicBlock *InitBB = llvm::BasicBlock::Create(Context, "", Wrapper);
3004       llvm::BasicBlock *ExitBB = llvm::BasicBlock::Create(Context, "", Wrapper);
3005       Builder.CreateCondBr(Have, InitBB, ExitBB);
3006 
3007       Builder.SetInsertPoint(InitBB);
3008       Builder.CreateCall(InitFnTy, Init);
3009       Builder.CreateBr(ExitBB);
3010 
3011       Builder.SetInsertPoint(ExitBB);
3012     }
3013 
3014     // For a reference, the result of the wrapper function is a pointer to
3015     // the referenced object.
3016     llvm::Value *Val = Builder.CreateThreadLocalAddress(Var);
3017 
3018     if (VD->getType()->isReferenceType()) {
3019       CharUnits Align = CGM.getContext().getDeclAlign(VD);
3020       Val = Builder.CreateAlignedLoad(Var->getValueType(), Val, Align);
3021     }
3022     if (Val->getType() != Wrapper->getReturnType())
3023       Val = Builder.CreatePointerBitCastOrAddrSpaceCast(
3024           Val, Wrapper->getReturnType(), "");
3025 
3026     Builder.CreateRet(Val);
3027   }
3028 }
3029 
3030 LValue ItaniumCXXABI::EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF,
3031                                                    const VarDecl *VD,
3032                                                    QualType LValType) {
3033   llvm::Value *Val = CGF.CGM.GetAddrOfGlobalVar(VD);
3034   llvm::Function *Wrapper = getOrCreateThreadLocalWrapper(VD, Val);
3035 
3036   llvm::CallInst *CallVal = CGF.Builder.CreateCall(Wrapper);
3037   CallVal->setCallingConv(Wrapper->getCallingConv());
3038 
3039   LValue LV;
3040   if (VD->getType()->isReferenceType())
3041     LV = CGF.MakeNaturalAlignAddrLValue(CallVal, LValType);
3042   else
3043     LV = CGF.MakeAddrLValue(CallVal, LValType,
3044                             CGF.getContext().getDeclAlign(VD));
3045   // FIXME: need setObjCGCLValueClass?
3046   return LV;
3047 }
3048 
3049 /// Return whether the given global decl needs a VTT parameter, which it does
3050 /// if it's a base constructor or destructor with virtual bases.
3051 bool ItaniumCXXABI::NeedsVTTParameter(GlobalDecl GD) {
3052   const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
3053 
3054   // We don't have any virtual bases, just return early.
3055   if (!MD->getParent()->getNumVBases())
3056     return false;
3057 
3058   // Check if we have a base constructor.
3059   if (isa<CXXConstructorDecl>(MD) && GD.getCtorType() == Ctor_Base)
3060     return true;
3061 
3062   // Check if we have a base destructor.
3063   if (isa<CXXDestructorDecl>(MD) && GD.getDtorType() == Dtor_Base)
3064     return true;
3065 
3066   return false;
3067 }
3068 
3069 namespace {
3070 class ItaniumRTTIBuilder {
3071   CodeGenModule &CGM;  // Per-module state.
3072   llvm::LLVMContext &VMContext;
3073   const ItaniumCXXABI &CXXABI;  // Per-module state.
3074 
3075   /// Fields - The fields of the RTTI descriptor currently being built.
3076   SmallVector<llvm::Constant *, 16> Fields;
3077 
3078   /// GetAddrOfTypeName - Returns the mangled type name of the given type.
3079   llvm::GlobalVariable *
3080   GetAddrOfTypeName(QualType Ty, llvm::GlobalVariable::LinkageTypes Linkage);
3081 
3082   /// GetAddrOfExternalRTTIDescriptor - Returns the constant for the RTTI
3083   /// descriptor of the given type.
3084   llvm::Constant *GetAddrOfExternalRTTIDescriptor(QualType Ty);
3085 
3086   /// BuildVTablePointer - Build the vtable pointer for the given type.
3087   void BuildVTablePointer(const Type *Ty);
3088 
3089   /// BuildSIClassTypeInfo - Build an abi::__si_class_type_info, used for single
3090   /// inheritance, according to the Itanium C++ ABI, 2.9.5p6b.
3091   void BuildSIClassTypeInfo(const CXXRecordDecl *RD);
3092 
3093   /// BuildVMIClassTypeInfo - Build an abi::__vmi_class_type_info, used for
3094   /// classes with bases that do not satisfy the abi::__si_class_type_info
3095   /// constraints, according ti the Itanium C++ ABI, 2.9.5p5c.
3096   void BuildVMIClassTypeInfo(const CXXRecordDecl *RD);
3097 
3098   /// BuildPointerTypeInfo - Build an abi::__pointer_type_info struct, used
3099   /// for pointer types.
3100   void BuildPointerTypeInfo(QualType PointeeTy);
3101 
3102   /// BuildObjCObjectTypeInfo - Build the appropriate kind of
3103   /// type_info for an object type.
3104   void BuildObjCObjectTypeInfo(const ObjCObjectType *Ty);
3105 
3106   /// BuildPointerToMemberTypeInfo - Build an abi::__pointer_to_member_type_info
3107   /// struct, used for member pointer types.
3108   void BuildPointerToMemberTypeInfo(const MemberPointerType *Ty);
3109 
3110 public:
3111   ItaniumRTTIBuilder(const ItaniumCXXABI &ABI)
3112       : CGM(ABI.CGM), VMContext(CGM.getModule().getContext()), CXXABI(ABI) {}
3113 
3114   // Pointer type info flags.
3115   enum {
3116     /// PTI_Const - Type has const qualifier.
3117     PTI_Const = 0x1,
3118 
3119     /// PTI_Volatile - Type has volatile qualifier.
3120     PTI_Volatile = 0x2,
3121 
3122     /// PTI_Restrict - Type has restrict qualifier.
3123     PTI_Restrict = 0x4,
3124 
3125     /// PTI_Incomplete - Type is incomplete.
3126     PTI_Incomplete = 0x8,
3127 
3128     /// PTI_ContainingClassIncomplete - Containing class is incomplete.
3129     /// (in pointer to member).
3130     PTI_ContainingClassIncomplete = 0x10,
3131 
3132     /// PTI_TransactionSafe - Pointee is transaction_safe function (C++ TM TS).
3133     //PTI_TransactionSafe = 0x20,
3134 
3135     /// PTI_Noexcept - Pointee is noexcept function (C++1z).
3136     PTI_Noexcept = 0x40,
3137   };
3138 
3139   // VMI type info flags.
3140   enum {
3141     /// VMI_NonDiamondRepeat - Class has non-diamond repeated inheritance.
3142     VMI_NonDiamondRepeat = 0x1,
3143 
3144     /// VMI_DiamondShaped - Class is diamond shaped.
3145     VMI_DiamondShaped = 0x2
3146   };
3147 
3148   // Base class type info flags.
3149   enum {
3150     /// BCTI_Virtual - Base class is virtual.
3151     BCTI_Virtual = 0x1,
3152 
3153     /// BCTI_Public - Base class is public.
3154     BCTI_Public = 0x2
3155   };
3156 
3157   /// BuildTypeInfo - Build the RTTI type info struct for the given type, or
3158   /// link to an existing RTTI descriptor if one already exists.
3159   llvm::Constant *BuildTypeInfo(QualType Ty);
3160 
3161   /// BuildTypeInfo - Build the RTTI type info struct for the given type.
3162   llvm::Constant *BuildTypeInfo(
3163       QualType Ty,
3164       llvm::GlobalVariable::LinkageTypes Linkage,
3165       llvm::GlobalValue::VisibilityTypes Visibility,
3166       llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass);
3167 };
3168 }
3169 
3170 llvm::GlobalVariable *ItaniumRTTIBuilder::GetAddrOfTypeName(
3171     QualType Ty, llvm::GlobalVariable::LinkageTypes Linkage) {
3172   SmallString<256> Name;
3173   llvm::raw_svector_ostream Out(Name);
3174   CGM.getCXXABI().getMangleContext().mangleCXXRTTIName(Ty, Out);
3175 
3176   // We know that the mangled name of the type starts at index 4 of the
3177   // mangled name of the typename, so we can just index into it in order to
3178   // get the mangled name of the type.
3179   llvm::Constant *Init = llvm::ConstantDataArray::getString(VMContext,
3180                                                             Name.substr(4));
3181   auto Align = CGM.getContext().getTypeAlignInChars(CGM.getContext().CharTy);
3182 
3183   llvm::GlobalVariable *GV = CGM.CreateOrReplaceCXXRuntimeVariable(
3184       Name, Init->getType(), Linkage, Align.getAsAlign());
3185 
3186   GV->setInitializer(Init);
3187 
3188   return GV;
3189 }
3190 
3191 llvm::Constant *
3192 ItaniumRTTIBuilder::GetAddrOfExternalRTTIDescriptor(QualType Ty) {
3193   // Mangle the RTTI name.
3194   SmallString<256> Name;
3195   llvm::raw_svector_ostream Out(Name);
3196   CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out);
3197 
3198   // Look for an existing global.
3199   llvm::GlobalVariable *GV = CGM.getModule().getNamedGlobal(Name);
3200 
3201   if (!GV) {
3202     // Create a new global variable.
3203     // Note for the future: If we would ever like to do deferred emission of
3204     // RTTI, check if emitting vtables opportunistically need any adjustment.
3205 
3206     GV = new llvm::GlobalVariable(CGM.getModule(), CGM.Int8PtrTy,
3207                                   /*isConstant=*/true,
3208                                   llvm::GlobalValue::ExternalLinkage, nullptr,
3209                                   Name);
3210     const CXXRecordDecl *RD = Ty->getAsCXXRecordDecl();
3211     CGM.setGVProperties(GV, RD);
3212     // Import the typeinfo symbol when all non-inline virtual methods are
3213     // imported.
3214     if (CGM.getTarget().hasPS4DLLImportExport()) {
3215       if (RD && CXXRecordAllNonInlineVirtualsHaveAttr<DLLImportAttr>(RD)) {
3216         GV->setDLLStorageClass(llvm::GlobalVariable::DLLImportStorageClass);
3217         CGM.setDSOLocal(GV);
3218       }
3219     }
3220   }
3221 
3222   return llvm::ConstantExpr::getBitCast(GV, CGM.Int8PtrTy);
3223 }
3224 
3225 /// TypeInfoIsInStandardLibrary - Given a builtin type, returns whether the type
3226 /// info for that type is defined in the standard library.
3227 static bool TypeInfoIsInStandardLibrary(const BuiltinType *Ty) {
3228   // Itanium C++ ABI 2.9.2:
3229   //   Basic type information (e.g. for "int", "bool", etc.) will be kept in
3230   //   the run-time support library. Specifically, the run-time support
3231   //   library should contain type_info objects for the types X, X* and
3232   //   X const*, for every X in: void, std::nullptr_t, bool, wchar_t, char,
3233   //   unsigned char, signed char, short, unsigned short, int, unsigned int,
3234   //   long, unsigned long, long long, unsigned long long, float, double,
3235   //   long double, char16_t, char32_t, and the IEEE 754r decimal and
3236   //   half-precision floating point types.
3237   //
3238   // GCC also emits RTTI for __int128.
3239   // FIXME: We do not emit RTTI information for decimal types here.
3240 
3241   // Types added here must also be added to EmitFundamentalRTTIDescriptors.
3242   switch (Ty->getKind()) {
3243     case BuiltinType::Void:
3244     case BuiltinType::NullPtr:
3245     case BuiltinType::Bool:
3246     case BuiltinType::WChar_S:
3247     case BuiltinType::WChar_U:
3248     case BuiltinType::Char_U:
3249     case BuiltinType::Char_S:
3250     case BuiltinType::UChar:
3251     case BuiltinType::SChar:
3252     case BuiltinType::Short:
3253     case BuiltinType::UShort:
3254     case BuiltinType::Int:
3255     case BuiltinType::UInt:
3256     case BuiltinType::Long:
3257     case BuiltinType::ULong:
3258     case BuiltinType::LongLong:
3259     case BuiltinType::ULongLong:
3260     case BuiltinType::Half:
3261     case BuiltinType::Float:
3262     case BuiltinType::Double:
3263     case BuiltinType::LongDouble:
3264     case BuiltinType::Float16:
3265     case BuiltinType::Float128:
3266     case BuiltinType::Ibm128:
3267     case BuiltinType::Char8:
3268     case BuiltinType::Char16:
3269     case BuiltinType::Char32:
3270     case BuiltinType::Int128:
3271     case BuiltinType::UInt128:
3272       return true;
3273 
3274 #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
3275     case BuiltinType::Id:
3276 #include "clang/Basic/OpenCLImageTypes.def"
3277 #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
3278     case BuiltinType::Id:
3279 #include "clang/Basic/OpenCLExtensionTypes.def"
3280     case BuiltinType::OCLSampler:
3281     case BuiltinType::OCLEvent:
3282     case BuiltinType::OCLClkEvent:
3283     case BuiltinType::OCLQueue:
3284     case BuiltinType::OCLReserveID:
3285 #define SVE_TYPE(Name, Id, SingletonId) \
3286     case BuiltinType::Id:
3287 #include "clang/Basic/AArch64SVEACLETypes.def"
3288 #define PPC_VECTOR_TYPE(Name, Id, Size) \
3289     case BuiltinType::Id:
3290 #include "clang/Basic/PPCTypes.def"
3291 #define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
3292 #include "clang/Basic/RISCVVTypes.def"
3293     case BuiltinType::ShortAccum:
3294     case BuiltinType::Accum:
3295     case BuiltinType::LongAccum:
3296     case BuiltinType::UShortAccum:
3297     case BuiltinType::UAccum:
3298     case BuiltinType::ULongAccum:
3299     case BuiltinType::ShortFract:
3300     case BuiltinType::Fract:
3301     case BuiltinType::LongFract:
3302     case BuiltinType::UShortFract:
3303     case BuiltinType::UFract:
3304     case BuiltinType::ULongFract:
3305     case BuiltinType::SatShortAccum:
3306     case BuiltinType::SatAccum:
3307     case BuiltinType::SatLongAccum:
3308     case BuiltinType::SatUShortAccum:
3309     case BuiltinType::SatUAccum:
3310     case BuiltinType::SatULongAccum:
3311     case BuiltinType::SatShortFract:
3312     case BuiltinType::SatFract:
3313     case BuiltinType::SatLongFract:
3314     case BuiltinType::SatUShortFract:
3315     case BuiltinType::SatUFract:
3316     case BuiltinType::SatULongFract:
3317     case BuiltinType::BFloat16:
3318       return false;
3319 
3320     case BuiltinType::Dependent:
3321 #define BUILTIN_TYPE(Id, SingletonId)
3322 #define PLACEHOLDER_TYPE(Id, SingletonId) \
3323     case BuiltinType::Id:
3324 #include "clang/AST/BuiltinTypes.def"
3325       llvm_unreachable("asking for RRTI for a placeholder type!");
3326 
3327     case BuiltinType::ObjCId:
3328     case BuiltinType::ObjCClass:
3329     case BuiltinType::ObjCSel:
3330       llvm_unreachable("FIXME: Objective-C types are unsupported!");
3331   }
3332 
3333   llvm_unreachable("Invalid BuiltinType Kind!");
3334 }
3335 
3336 static bool TypeInfoIsInStandardLibrary(const PointerType *PointerTy) {
3337   QualType PointeeTy = PointerTy->getPointeeType();
3338   const BuiltinType *BuiltinTy = dyn_cast<BuiltinType>(PointeeTy);
3339   if (!BuiltinTy)
3340     return false;
3341 
3342   // Check the qualifiers.
3343   Qualifiers Quals = PointeeTy.getQualifiers();
3344   Quals.removeConst();
3345 
3346   if (!Quals.empty())
3347     return false;
3348 
3349   return TypeInfoIsInStandardLibrary(BuiltinTy);
3350 }
3351 
3352 /// IsStandardLibraryRTTIDescriptor - Returns whether the type
3353 /// information for the given type exists in the standard library.
3354 static bool IsStandardLibraryRTTIDescriptor(QualType Ty) {
3355   // Type info for builtin types is defined in the standard library.
3356   if (const BuiltinType *BuiltinTy = dyn_cast<BuiltinType>(Ty))
3357     return TypeInfoIsInStandardLibrary(BuiltinTy);
3358 
3359   // Type info for some pointer types to builtin types is defined in the
3360   // standard library.
3361   if (const PointerType *PointerTy = dyn_cast<PointerType>(Ty))
3362     return TypeInfoIsInStandardLibrary(PointerTy);
3363 
3364   return false;
3365 }
3366 
3367 /// ShouldUseExternalRTTIDescriptor - Returns whether the type information for
3368 /// the given type exists somewhere else, and that we should not emit the type
3369 /// information in this translation unit.  Assumes that it is not a
3370 /// standard-library type.
3371 static bool ShouldUseExternalRTTIDescriptor(CodeGenModule &CGM,
3372                                             QualType Ty) {
3373   ASTContext &Context = CGM.getContext();
3374 
3375   // If RTTI is disabled, assume it might be disabled in the
3376   // translation unit that defines any potential key function, too.
3377   if (!Context.getLangOpts().RTTI) return false;
3378 
3379   if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) {
3380     const CXXRecordDecl *RD = cast<CXXRecordDecl>(RecordTy->getDecl());
3381     if (!RD->hasDefinition())
3382       return false;
3383 
3384     if (!RD->isDynamicClass())
3385       return false;
3386 
3387     // FIXME: this may need to be reconsidered if the key function
3388     // changes.
3389     // N.B. We must always emit the RTTI data ourselves if there exists a key
3390     // function.
3391     bool IsDLLImport = RD->hasAttr<DLLImportAttr>();
3392 
3393     // Don't import the RTTI but emit it locally.
3394     if (CGM.getTriple().isWindowsGNUEnvironment())
3395       return false;
3396 
3397     if (CGM.getVTables().isVTableExternal(RD)) {
3398       if (CGM.getTarget().hasPS4DLLImportExport())
3399         return true;
3400 
3401       return IsDLLImport && !CGM.getTriple().isWindowsItaniumEnvironment()
3402                  ? false
3403                  : true;
3404     }
3405     if (IsDLLImport)
3406       return true;
3407   }
3408 
3409   return false;
3410 }
3411 
3412 /// IsIncompleteClassType - Returns whether the given record type is incomplete.
3413 static bool IsIncompleteClassType(const RecordType *RecordTy) {
3414   return !RecordTy->getDecl()->isCompleteDefinition();
3415 }
3416 
3417 /// ContainsIncompleteClassType - Returns whether the given type contains an
3418 /// incomplete class type. This is true if
3419 ///
3420 ///   * The given type is an incomplete class type.
3421 ///   * The given type is a pointer type whose pointee type contains an
3422 ///     incomplete class type.
3423 ///   * The given type is a member pointer type whose class is an incomplete
3424 ///     class type.
3425 ///   * The given type is a member pointer type whoise pointee type contains an
3426 ///     incomplete class type.
3427 /// is an indirect or direct pointer to an incomplete class type.
3428 static bool ContainsIncompleteClassType(QualType Ty) {
3429   if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) {
3430     if (IsIncompleteClassType(RecordTy))
3431       return true;
3432   }
3433 
3434   if (const PointerType *PointerTy = dyn_cast<PointerType>(Ty))
3435     return ContainsIncompleteClassType(PointerTy->getPointeeType());
3436 
3437   if (const MemberPointerType *MemberPointerTy =
3438       dyn_cast<MemberPointerType>(Ty)) {
3439     // Check if the class type is incomplete.
3440     const RecordType *ClassType = cast<RecordType>(MemberPointerTy->getClass());
3441     if (IsIncompleteClassType(ClassType))
3442       return true;
3443 
3444     return ContainsIncompleteClassType(MemberPointerTy->getPointeeType());
3445   }
3446 
3447   return false;
3448 }
3449 
3450 // CanUseSingleInheritance - Return whether the given record decl has a "single,
3451 // public, non-virtual base at offset zero (i.e. the derived class is dynamic
3452 // iff the base is)", according to Itanium C++ ABI, 2.95p6b.
3453 static bool CanUseSingleInheritance(const CXXRecordDecl *RD) {
3454   // Check the number of bases.
3455   if (RD->getNumBases() != 1)
3456     return false;
3457 
3458   // Get the base.
3459   CXXRecordDecl::base_class_const_iterator Base = RD->bases_begin();
3460 
3461   // Check that the base is not virtual.
3462   if (Base->isVirtual())
3463     return false;
3464 
3465   // Check that the base is public.
3466   if (Base->getAccessSpecifier() != AS_public)
3467     return false;
3468 
3469   // Check that the class is dynamic iff the base is.
3470   auto *BaseDecl =
3471       cast<CXXRecordDecl>(Base->getType()->castAs<RecordType>()->getDecl());
3472   if (!BaseDecl->isEmpty() &&
3473       BaseDecl->isDynamicClass() != RD->isDynamicClass())
3474     return false;
3475 
3476   return true;
3477 }
3478 
3479 void ItaniumRTTIBuilder::BuildVTablePointer(const Type *Ty) {
3480   // abi::__class_type_info.
3481   static const char * const ClassTypeInfo =
3482     "_ZTVN10__cxxabiv117__class_type_infoE";
3483   // abi::__si_class_type_info.
3484   static const char * const SIClassTypeInfo =
3485     "_ZTVN10__cxxabiv120__si_class_type_infoE";
3486   // abi::__vmi_class_type_info.
3487   static const char * const VMIClassTypeInfo =
3488     "_ZTVN10__cxxabiv121__vmi_class_type_infoE";
3489 
3490   const char *VTableName = nullptr;
3491 
3492   switch (Ty->getTypeClass()) {
3493 #define TYPE(Class, Base)
3494 #define ABSTRACT_TYPE(Class, Base)
3495 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
3496 #define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
3497 #define DEPENDENT_TYPE(Class, Base) case Type::Class:
3498 #include "clang/AST/TypeNodes.inc"
3499     llvm_unreachable("Non-canonical and dependent types shouldn't get here");
3500 
3501   case Type::LValueReference:
3502   case Type::RValueReference:
3503     llvm_unreachable("References shouldn't get here");
3504 
3505   case Type::Auto:
3506   case Type::DeducedTemplateSpecialization:
3507     llvm_unreachable("Undeduced type shouldn't get here");
3508 
3509   case Type::Pipe:
3510     llvm_unreachable("Pipe types shouldn't get here");
3511 
3512   case Type::Builtin:
3513   case Type::BitInt:
3514   // GCC treats vector and complex types as fundamental types.
3515   case Type::Vector:
3516   case Type::ExtVector:
3517   case Type::ConstantMatrix:
3518   case Type::Complex:
3519   case Type::Atomic:
3520   // FIXME: GCC treats block pointers as fundamental types?!
3521   case Type::BlockPointer:
3522     // abi::__fundamental_type_info.
3523     VTableName = "_ZTVN10__cxxabiv123__fundamental_type_infoE";
3524     break;
3525 
3526   case Type::ConstantArray:
3527   case Type::IncompleteArray:
3528   case Type::VariableArray:
3529     // abi::__array_type_info.
3530     VTableName = "_ZTVN10__cxxabiv117__array_type_infoE";
3531     break;
3532 
3533   case Type::FunctionNoProto:
3534   case Type::FunctionProto:
3535     // abi::__function_type_info.
3536     VTableName = "_ZTVN10__cxxabiv120__function_type_infoE";
3537     break;
3538 
3539   case Type::Enum:
3540     // abi::__enum_type_info.
3541     VTableName = "_ZTVN10__cxxabiv116__enum_type_infoE";
3542     break;
3543 
3544   case Type::Record: {
3545     const CXXRecordDecl *RD =
3546       cast<CXXRecordDecl>(cast<RecordType>(Ty)->getDecl());
3547 
3548     if (!RD->hasDefinition() || !RD->getNumBases()) {
3549       VTableName = ClassTypeInfo;
3550     } else if (CanUseSingleInheritance(RD)) {
3551       VTableName = SIClassTypeInfo;
3552     } else {
3553       VTableName = VMIClassTypeInfo;
3554     }
3555 
3556     break;
3557   }
3558 
3559   case Type::ObjCObject:
3560     // Ignore protocol qualifiers.
3561     Ty = cast<ObjCObjectType>(Ty)->getBaseType().getTypePtr();
3562 
3563     // Handle id and Class.
3564     if (isa<BuiltinType>(Ty)) {
3565       VTableName = ClassTypeInfo;
3566       break;
3567     }
3568 
3569     assert(isa<ObjCInterfaceType>(Ty));
3570     [[fallthrough]];
3571 
3572   case Type::ObjCInterface:
3573     if (cast<ObjCInterfaceType>(Ty)->getDecl()->getSuperClass()) {
3574       VTableName = SIClassTypeInfo;
3575     } else {
3576       VTableName = ClassTypeInfo;
3577     }
3578     break;
3579 
3580   case Type::ObjCObjectPointer:
3581   case Type::Pointer:
3582     // abi::__pointer_type_info.
3583     VTableName = "_ZTVN10__cxxabiv119__pointer_type_infoE";
3584     break;
3585 
3586   case Type::MemberPointer:
3587     // abi::__pointer_to_member_type_info.
3588     VTableName = "_ZTVN10__cxxabiv129__pointer_to_member_type_infoE";
3589     break;
3590   }
3591 
3592   llvm::Constant *VTable = nullptr;
3593 
3594   // Check if the alias exists. If it doesn't, then get or create the global.
3595   if (CGM.getItaniumVTableContext().isRelativeLayout())
3596     VTable = CGM.getModule().getNamedAlias(VTableName);
3597   if (!VTable)
3598     VTable = CGM.getModule().getOrInsertGlobal(VTableName, CGM.Int8PtrTy);
3599 
3600   CGM.setDSOLocal(cast<llvm::GlobalValue>(VTable->stripPointerCasts()));
3601 
3602   llvm::Type *PtrDiffTy =
3603       CGM.getTypes().ConvertType(CGM.getContext().getPointerDiffType());
3604 
3605   // The vtable address point is 2.
3606   if (CGM.getItaniumVTableContext().isRelativeLayout()) {
3607     // The vtable address point is 8 bytes after its start:
3608     // 4 for the offset to top + 4 for the relative offset to rtti.
3609     llvm::Constant *Eight = llvm::ConstantInt::get(CGM.Int32Ty, 8);
3610     VTable = llvm::ConstantExpr::getBitCast(VTable, CGM.Int8PtrTy);
3611     VTable =
3612         llvm::ConstantExpr::getInBoundsGetElementPtr(CGM.Int8Ty, VTable, Eight);
3613   } else {
3614     llvm::Constant *Two = llvm::ConstantInt::get(PtrDiffTy, 2);
3615     VTable = llvm::ConstantExpr::getInBoundsGetElementPtr(CGM.Int8PtrTy, VTable,
3616                                                           Two);
3617   }
3618   VTable = llvm::ConstantExpr::getBitCast(VTable, CGM.Int8PtrTy);
3619 
3620   Fields.push_back(VTable);
3621 }
3622 
3623 /// Return the linkage that the type info and type info name constants
3624 /// should have for the given type.
3625 static llvm::GlobalVariable::LinkageTypes getTypeInfoLinkage(CodeGenModule &CGM,
3626                                                              QualType Ty) {
3627   // Itanium C++ ABI 2.9.5p7:
3628   //   In addition, it and all of the intermediate abi::__pointer_type_info
3629   //   structs in the chain down to the abi::__class_type_info for the
3630   //   incomplete class type must be prevented from resolving to the
3631   //   corresponding type_info structs for the complete class type, possibly
3632   //   by making them local static objects. Finally, a dummy class RTTI is
3633   //   generated for the incomplete type that will not resolve to the final
3634   //   complete class RTTI (because the latter need not exist), possibly by
3635   //   making it a local static object.
3636   if (ContainsIncompleteClassType(Ty))
3637     return llvm::GlobalValue::InternalLinkage;
3638 
3639   switch (Ty->getLinkage()) {
3640   case NoLinkage:
3641   case InternalLinkage:
3642   case UniqueExternalLinkage:
3643     return llvm::GlobalValue::InternalLinkage;
3644 
3645   case VisibleNoLinkage:
3646   case ModuleInternalLinkage:
3647   case ModuleLinkage:
3648   case ExternalLinkage:
3649     // RTTI is not enabled, which means that this type info struct is going
3650     // to be used for exception handling. Give it linkonce_odr linkage.
3651     if (!CGM.getLangOpts().RTTI)
3652       return llvm::GlobalValue::LinkOnceODRLinkage;
3653 
3654     if (const RecordType *Record = dyn_cast<RecordType>(Ty)) {
3655       const CXXRecordDecl *RD = cast<CXXRecordDecl>(Record->getDecl());
3656       if (RD->hasAttr<WeakAttr>())
3657         return llvm::GlobalValue::WeakODRLinkage;
3658       if (CGM.getTriple().isWindowsItaniumEnvironment())
3659         if (RD->hasAttr<DLLImportAttr>() &&
3660             ShouldUseExternalRTTIDescriptor(CGM, Ty))
3661           return llvm::GlobalValue::ExternalLinkage;
3662       // MinGW always uses LinkOnceODRLinkage for type info.
3663       if (RD->isDynamicClass() &&
3664           !CGM.getContext()
3665                .getTargetInfo()
3666                .getTriple()
3667                .isWindowsGNUEnvironment())
3668         return CGM.getVTableLinkage(RD);
3669     }
3670 
3671     return llvm::GlobalValue::LinkOnceODRLinkage;
3672   }
3673 
3674   llvm_unreachable("Invalid linkage!");
3675 }
3676 
3677 llvm::Constant *ItaniumRTTIBuilder::BuildTypeInfo(QualType Ty) {
3678   // We want to operate on the canonical type.
3679   Ty = Ty.getCanonicalType();
3680 
3681   // Check if we've already emitted an RTTI descriptor for this type.
3682   SmallString<256> Name;
3683   llvm::raw_svector_ostream Out(Name);
3684   CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out);
3685 
3686   llvm::GlobalVariable *OldGV = CGM.getModule().getNamedGlobal(Name);
3687   if (OldGV && !OldGV->isDeclaration()) {
3688     assert(!OldGV->hasAvailableExternallyLinkage() &&
3689            "available_externally typeinfos not yet implemented");
3690 
3691     return llvm::ConstantExpr::getBitCast(OldGV, CGM.Int8PtrTy);
3692   }
3693 
3694   // Check if there is already an external RTTI descriptor for this type.
3695   if (IsStandardLibraryRTTIDescriptor(Ty) ||
3696       ShouldUseExternalRTTIDescriptor(CGM, Ty))
3697     return GetAddrOfExternalRTTIDescriptor(Ty);
3698 
3699   // Emit the standard library with external linkage.
3700   llvm::GlobalVariable::LinkageTypes Linkage = getTypeInfoLinkage(CGM, Ty);
3701 
3702   // Give the type_info object and name the formal visibility of the
3703   // type itself.
3704   llvm::GlobalValue::VisibilityTypes llvmVisibility;
3705   if (llvm::GlobalValue::isLocalLinkage(Linkage))
3706     // If the linkage is local, only default visibility makes sense.
3707     llvmVisibility = llvm::GlobalValue::DefaultVisibility;
3708   else if (CXXABI.classifyRTTIUniqueness(Ty, Linkage) ==
3709            ItaniumCXXABI::RUK_NonUniqueHidden)
3710     llvmVisibility = llvm::GlobalValue::HiddenVisibility;
3711   else
3712     llvmVisibility = CodeGenModule::GetLLVMVisibility(Ty->getVisibility());
3713 
3714   llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass =
3715       llvm::GlobalValue::DefaultStorageClass;
3716   if (auto RD = Ty->getAsCXXRecordDecl()) {
3717     if ((CGM.getTriple().isWindowsItaniumEnvironment() &&
3718          RD->hasAttr<DLLExportAttr>()) ||
3719         (CGM.shouldMapVisibilityToDLLExport(RD) &&
3720          !llvm::GlobalValue::isLocalLinkage(Linkage) &&
3721          llvmVisibility == llvm::GlobalValue::DefaultVisibility))
3722       DLLStorageClass = llvm::GlobalValue::DLLExportStorageClass;
3723   }
3724   return BuildTypeInfo(Ty, Linkage, llvmVisibility, DLLStorageClass);
3725 }
3726 
3727 llvm::Constant *ItaniumRTTIBuilder::BuildTypeInfo(
3728       QualType Ty,
3729       llvm::GlobalVariable::LinkageTypes Linkage,
3730       llvm::GlobalValue::VisibilityTypes Visibility,
3731       llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass) {
3732   // Add the vtable pointer.
3733   BuildVTablePointer(cast<Type>(Ty));
3734 
3735   // And the name.
3736   llvm::GlobalVariable *TypeName = GetAddrOfTypeName(Ty, Linkage);
3737   llvm::Constant *TypeNameField;
3738 
3739   // If we're supposed to demote the visibility, be sure to set a flag
3740   // to use a string comparison for type_info comparisons.
3741   ItaniumCXXABI::RTTIUniquenessKind RTTIUniqueness =
3742       CXXABI.classifyRTTIUniqueness(Ty, Linkage);
3743   if (RTTIUniqueness != ItaniumCXXABI::RUK_Unique) {
3744     // The flag is the sign bit, which on ARM64 is defined to be clear
3745     // for global pointers.  This is very ARM64-specific.
3746     TypeNameField = llvm::ConstantExpr::getPtrToInt(TypeName, CGM.Int64Ty);
3747     llvm::Constant *flag =
3748         llvm::ConstantInt::get(CGM.Int64Ty, ((uint64_t)1) << 63);
3749     TypeNameField = llvm::ConstantExpr::getAdd(TypeNameField, flag);
3750     TypeNameField =
3751         llvm::ConstantExpr::getIntToPtr(TypeNameField, CGM.Int8PtrTy);
3752   } else {
3753     TypeNameField = llvm::ConstantExpr::getBitCast(TypeName, CGM.Int8PtrTy);
3754   }
3755   Fields.push_back(TypeNameField);
3756 
3757   switch (Ty->getTypeClass()) {
3758 #define TYPE(Class, Base)
3759 #define ABSTRACT_TYPE(Class, Base)
3760 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
3761 #define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
3762 #define DEPENDENT_TYPE(Class, Base) case Type::Class:
3763 #include "clang/AST/TypeNodes.inc"
3764     llvm_unreachable("Non-canonical and dependent types shouldn't get here");
3765 
3766   // GCC treats vector types as fundamental types.
3767   case Type::Builtin:
3768   case Type::Vector:
3769   case Type::ExtVector:
3770   case Type::ConstantMatrix:
3771   case Type::Complex:
3772   case Type::BlockPointer:
3773     // Itanium C++ ABI 2.9.5p4:
3774     // abi::__fundamental_type_info adds no data members to std::type_info.
3775     break;
3776 
3777   case Type::LValueReference:
3778   case Type::RValueReference:
3779     llvm_unreachable("References shouldn't get here");
3780 
3781   case Type::Auto:
3782   case Type::DeducedTemplateSpecialization:
3783     llvm_unreachable("Undeduced type shouldn't get here");
3784 
3785   case Type::Pipe:
3786     break;
3787 
3788   case Type::BitInt:
3789     break;
3790 
3791   case Type::ConstantArray:
3792   case Type::IncompleteArray:
3793   case Type::VariableArray:
3794     // Itanium C++ ABI 2.9.5p5:
3795     // abi::__array_type_info adds no data members to std::type_info.
3796     break;
3797 
3798   case Type::FunctionNoProto:
3799   case Type::FunctionProto:
3800     // Itanium C++ ABI 2.9.5p5:
3801     // abi::__function_type_info adds no data members to std::type_info.
3802     break;
3803 
3804   case Type::Enum:
3805     // Itanium C++ ABI 2.9.5p5:
3806     // abi::__enum_type_info adds no data members to std::type_info.
3807     break;
3808 
3809   case Type::Record: {
3810     const CXXRecordDecl *RD =
3811       cast<CXXRecordDecl>(cast<RecordType>(Ty)->getDecl());
3812     if (!RD->hasDefinition() || !RD->getNumBases()) {
3813       // We don't need to emit any fields.
3814       break;
3815     }
3816 
3817     if (CanUseSingleInheritance(RD))
3818       BuildSIClassTypeInfo(RD);
3819     else
3820       BuildVMIClassTypeInfo(RD);
3821 
3822     break;
3823   }
3824 
3825   case Type::ObjCObject:
3826   case Type::ObjCInterface:
3827     BuildObjCObjectTypeInfo(cast<ObjCObjectType>(Ty));
3828     break;
3829 
3830   case Type::ObjCObjectPointer:
3831     BuildPointerTypeInfo(cast<ObjCObjectPointerType>(Ty)->getPointeeType());
3832     break;
3833 
3834   case Type::Pointer:
3835     BuildPointerTypeInfo(cast<PointerType>(Ty)->getPointeeType());
3836     break;
3837 
3838   case Type::MemberPointer:
3839     BuildPointerToMemberTypeInfo(cast<MemberPointerType>(Ty));
3840     break;
3841 
3842   case Type::Atomic:
3843     // No fields, at least for the moment.
3844     break;
3845   }
3846 
3847   llvm::Constant *Init = llvm::ConstantStruct::getAnon(Fields);
3848 
3849   SmallString<256> Name;
3850   llvm::raw_svector_ostream Out(Name);
3851   CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out);
3852   llvm::Module &M = CGM.getModule();
3853   llvm::GlobalVariable *OldGV = M.getNamedGlobal(Name);
3854   llvm::GlobalVariable *GV =
3855       new llvm::GlobalVariable(M, Init->getType(),
3856                                /*isConstant=*/true, Linkage, Init, Name);
3857 
3858   // Export the typeinfo in the same circumstances as the vtable is exported.
3859   auto GVDLLStorageClass = DLLStorageClass;
3860   if (CGM.getTarget().hasPS4DLLImportExport()) {
3861     if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) {
3862       const CXXRecordDecl *RD = cast<CXXRecordDecl>(RecordTy->getDecl());
3863       if (RD->hasAttr<DLLExportAttr>() ||
3864           CXXRecordAllNonInlineVirtualsHaveAttr<DLLExportAttr>(RD)) {
3865         GVDLLStorageClass = llvm::GlobalVariable::DLLExportStorageClass;
3866       }
3867     }
3868   }
3869 
3870   // If there's already an old global variable, replace it with the new one.
3871   if (OldGV) {
3872     GV->takeName(OldGV);
3873     llvm::Constant *NewPtr =
3874       llvm::ConstantExpr::getBitCast(GV, OldGV->getType());
3875     OldGV->replaceAllUsesWith(NewPtr);
3876     OldGV->eraseFromParent();
3877   }
3878 
3879   if (CGM.supportsCOMDAT() && GV->isWeakForLinker())
3880     GV->setComdat(M.getOrInsertComdat(GV->getName()));
3881 
3882   CharUnits Align = CGM.getContext().toCharUnitsFromBits(
3883       CGM.getTarget().getPointerAlign(LangAS::Default));
3884   GV->setAlignment(Align.getAsAlign());
3885 
3886   // The Itanium ABI specifies that type_info objects must be globally
3887   // unique, with one exception: if the type is an incomplete class
3888   // type or a (possibly indirect) pointer to one.  That exception
3889   // affects the general case of comparing type_info objects produced
3890   // by the typeid operator, which is why the comparison operators on
3891   // std::type_info generally use the type_info name pointers instead
3892   // of the object addresses.  However, the language's built-in uses
3893   // of RTTI generally require class types to be complete, even when
3894   // manipulating pointers to those class types.  This allows the
3895   // implementation of dynamic_cast to rely on address equality tests,
3896   // which is much faster.
3897 
3898   // All of this is to say that it's important that both the type_info
3899   // object and the type_info name be uniqued when weakly emitted.
3900 
3901   TypeName->setVisibility(Visibility);
3902   CGM.setDSOLocal(TypeName);
3903 
3904   GV->setVisibility(Visibility);
3905   CGM.setDSOLocal(GV);
3906 
3907   TypeName->setDLLStorageClass(DLLStorageClass);
3908   GV->setDLLStorageClass(CGM.getTarget().hasPS4DLLImportExport()
3909                              ? GVDLLStorageClass
3910                              : DLLStorageClass);
3911 
3912   TypeName->setPartition(CGM.getCodeGenOpts().SymbolPartition);
3913   GV->setPartition(CGM.getCodeGenOpts().SymbolPartition);
3914 
3915   return llvm::ConstantExpr::getBitCast(GV, CGM.Int8PtrTy);
3916 }
3917 
3918 /// BuildObjCObjectTypeInfo - Build the appropriate kind of type_info
3919 /// for the given Objective-C object type.
3920 void ItaniumRTTIBuilder::BuildObjCObjectTypeInfo(const ObjCObjectType *OT) {
3921   // Drop qualifiers.
3922   const Type *T = OT->getBaseType().getTypePtr();
3923   assert(isa<BuiltinType>(T) || isa<ObjCInterfaceType>(T));
3924 
3925   // The builtin types are abi::__class_type_infos and don't require
3926   // extra fields.
3927   if (isa<BuiltinType>(T)) return;
3928 
3929   ObjCInterfaceDecl *Class = cast<ObjCInterfaceType>(T)->getDecl();
3930   ObjCInterfaceDecl *Super = Class->getSuperClass();
3931 
3932   // Root classes are also __class_type_info.
3933   if (!Super) return;
3934 
3935   QualType SuperTy = CGM.getContext().getObjCInterfaceType(Super);
3936 
3937   // Everything else is single inheritance.
3938   llvm::Constant *BaseTypeInfo =
3939       ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(SuperTy);
3940   Fields.push_back(BaseTypeInfo);
3941 }
3942 
3943 /// BuildSIClassTypeInfo - Build an abi::__si_class_type_info, used for single
3944 /// inheritance, according to the Itanium C++ ABI, 2.95p6b.
3945 void ItaniumRTTIBuilder::BuildSIClassTypeInfo(const CXXRecordDecl *RD) {
3946   // Itanium C++ ABI 2.9.5p6b:
3947   // It adds to abi::__class_type_info a single member pointing to the
3948   // type_info structure for the base type,
3949   llvm::Constant *BaseTypeInfo =
3950     ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(RD->bases_begin()->getType());
3951   Fields.push_back(BaseTypeInfo);
3952 }
3953 
3954 namespace {
3955   /// SeenBases - Contains virtual and non-virtual bases seen when traversing
3956   /// a class hierarchy.
3957   struct SeenBases {
3958     llvm::SmallPtrSet<const CXXRecordDecl *, 16> NonVirtualBases;
3959     llvm::SmallPtrSet<const CXXRecordDecl *, 16> VirtualBases;
3960   };
3961 }
3962 
3963 /// ComputeVMIClassTypeInfoFlags - Compute the value of the flags member in
3964 /// abi::__vmi_class_type_info.
3965 ///
3966 static unsigned ComputeVMIClassTypeInfoFlags(const CXXBaseSpecifier *Base,
3967                                              SeenBases &Bases) {
3968 
3969   unsigned Flags = 0;
3970 
3971   auto *BaseDecl =
3972       cast<CXXRecordDecl>(Base->getType()->castAs<RecordType>()->getDecl());
3973 
3974   if (Base->isVirtual()) {
3975     // Mark the virtual base as seen.
3976     if (!Bases.VirtualBases.insert(BaseDecl).second) {
3977       // If this virtual base has been seen before, then the class is diamond
3978       // shaped.
3979       Flags |= ItaniumRTTIBuilder::VMI_DiamondShaped;
3980     } else {
3981       if (Bases.NonVirtualBases.count(BaseDecl))
3982         Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat;
3983     }
3984   } else {
3985     // Mark the non-virtual base as seen.
3986     if (!Bases.NonVirtualBases.insert(BaseDecl).second) {
3987       // If this non-virtual base has been seen before, then the class has non-
3988       // diamond shaped repeated inheritance.
3989       Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat;
3990     } else {
3991       if (Bases.VirtualBases.count(BaseDecl))
3992         Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat;
3993     }
3994   }
3995 
3996   // Walk all bases.
3997   for (const auto &I : BaseDecl->bases())
3998     Flags |= ComputeVMIClassTypeInfoFlags(&I, Bases);
3999 
4000   return Flags;
4001 }
4002 
4003 static unsigned ComputeVMIClassTypeInfoFlags(const CXXRecordDecl *RD) {
4004   unsigned Flags = 0;
4005   SeenBases Bases;
4006 
4007   // Walk all bases.
4008   for (const auto &I : RD->bases())
4009     Flags |= ComputeVMIClassTypeInfoFlags(&I, Bases);
4010 
4011   return Flags;
4012 }
4013 
4014 /// BuildVMIClassTypeInfo - Build an abi::__vmi_class_type_info, used for
4015 /// classes with bases that do not satisfy the abi::__si_class_type_info
4016 /// constraints, according ti the Itanium C++ ABI, 2.9.5p5c.
4017 void ItaniumRTTIBuilder::BuildVMIClassTypeInfo(const CXXRecordDecl *RD) {
4018   llvm::Type *UnsignedIntLTy =
4019     CGM.getTypes().ConvertType(CGM.getContext().UnsignedIntTy);
4020 
4021   // Itanium C++ ABI 2.9.5p6c:
4022   //   __flags is a word with flags describing details about the class
4023   //   structure, which may be referenced by using the __flags_masks
4024   //   enumeration. These flags refer to both direct and indirect bases.
4025   unsigned Flags = ComputeVMIClassTypeInfoFlags(RD);
4026   Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags));
4027 
4028   // Itanium C++ ABI 2.9.5p6c:
4029   //   __base_count is a word with the number of direct proper base class
4030   //   descriptions that follow.
4031   Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, RD->getNumBases()));
4032 
4033   if (!RD->getNumBases())
4034     return;
4035 
4036   // Now add the base class descriptions.
4037 
4038   // Itanium C++ ABI 2.9.5p6c:
4039   //   __base_info[] is an array of base class descriptions -- one for every
4040   //   direct proper base. Each description is of the type:
4041   //
4042   //   struct abi::__base_class_type_info {
4043   //   public:
4044   //     const __class_type_info *__base_type;
4045   //     long __offset_flags;
4046   //
4047   //     enum __offset_flags_masks {
4048   //       __virtual_mask = 0x1,
4049   //       __public_mask = 0x2,
4050   //       __offset_shift = 8
4051   //     };
4052   //   };
4053 
4054   // If we're in mingw and 'long' isn't wide enough for a pointer, use 'long
4055   // long' instead of 'long' for __offset_flags. libstdc++abi uses long long on
4056   // LLP64 platforms.
4057   // FIXME: Consider updating libc++abi to match, and extend this logic to all
4058   // LLP64 platforms.
4059   QualType OffsetFlagsTy = CGM.getContext().LongTy;
4060   const TargetInfo &TI = CGM.getContext().getTargetInfo();
4061   if (TI.getTriple().isOSCygMing() &&
4062       TI.getPointerWidth(LangAS::Default) > TI.getLongWidth())
4063     OffsetFlagsTy = CGM.getContext().LongLongTy;
4064   llvm::Type *OffsetFlagsLTy =
4065       CGM.getTypes().ConvertType(OffsetFlagsTy);
4066 
4067   for (const auto &Base : RD->bases()) {
4068     // The __base_type member points to the RTTI for the base type.
4069     Fields.push_back(ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(Base.getType()));
4070 
4071     auto *BaseDecl =
4072         cast<CXXRecordDecl>(Base.getType()->castAs<RecordType>()->getDecl());
4073 
4074     int64_t OffsetFlags = 0;
4075 
4076     // All but the lower 8 bits of __offset_flags are a signed offset.
4077     // For a non-virtual base, this is the offset in the object of the base
4078     // subobject. For a virtual base, this is the offset in the virtual table of
4079     // the virtual base offset for the virtual base referenced (negative).
4080     CharUnits Offset;
4081     if (Base.isVirtual())
4082       Offset =
4083         CGM.getItaniumVTableContext().getVirtualBaseOffsetOffset(RD, BaseDecl);
4084     else {
4085       const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
4086       Offset = Layout.getBaseClassOffset(BaseDecl);
4087     };
4088 
4089     OffsetFlags = uint64_t(Offset.getQuantity()) << 8;
4090 
4091     // The low-order byte of __offset_flags contains flags, as given by the
4092     // masks from the enumeration __offset_flags_masks.
4093     if (Base.isVirtual())
4094       OffsetFlags |= BCTI_Virtual;
4095     if (Base.getAccessSpecifier() == AS_public)
4096       OffsetFlags |= BCTI_Public;
4097 
4098     Fields.push_back(llvm::ConstantInt::get(OffsetFlagsLTy, OffsetFlags));
4099   }
4100 }
4101 
4102 /// Compute the flags for a __pbase_type_info, and remove the corresponding
4103 /// pieces from \p Type.
4104 static unsigned extractPBaseFlags(ASTContext &Ctx, QualType &Type) {
4105   unsigned Flags = 0;
4106 
4107   if (Type.isConstQualified())
4108     Flags |= ItaniumRTTIBuilder::PTI_Const;
4109   if (Type.isVolatileQualified())
4110     Flags |= ItaniumRTTIBuilder::PTI_Volatile;
4111   if (Type.isRestrictQualified())
4112     Flags |= ItaniumRTTIBuilder::PTI_Restrict;
4113   Type = Type.getUnqualifiedType();
4114 
4115   // Itanium C++ ABI 2.9.5p7:
4116   //   When the abi::__pbase_type_info is for a direct or indirect pointer to an
4117   //   incomplete class type, the incomplete target type flag is set.
4118   if (ContainsIncompleteClassType(Type))
4119     Flags |= ItaniumRTTIBuilder::PTI_Incomplete;
4120 
4121   if (auto *Proto = Type->getAs<FunctionProtoType>()) {
4122     if (Proto->isNothrow()) {
4123       Flags |= ItaniumRTTIBuilder::PTI_Noexcept;
4124       Type = Ctx.getFunctionTypeWithExceptionSpec(Type, EST_None);
4125     }
4126   }
4127 
4128   return Flags;
4129 }
4130 
4131 /// BuildPointerTypeInfo - Build an abi::__pointer_type_info struct,
4132 /// used for pointer types.
4133 void ItaniumRTTIBuilder::BuildPointerTypeInfo(QualType PointeeTy) {
4134   // Itanium C++ ABI 2.9.5p7:
4135   //   __flags is a flag word describing the cv-qualification and other
4136   //   attributes of the type pointed to
4137   unsigned Flags = extractPBaseFlags(CGM.getContext(), PointeeTy);
4138 
4139   llvm::Type *UnsignedIntLTy =
4140     CGM.getTypes().ConvertType(CGM.getContext().UnsignedIntTy);
4141   Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags));
4142 
4143   // Itanium C++ ABI 2.9.5p7:
4144   //  __pointee is a pointer to the std::type_info derivation for the
4145   //  unqualified type being pointed to.
4146   llvm::Constant *PointeeTypeInfo =
4147       ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(PointeeTy);
4148   Fields.push_back(PointeeTypeInfo);
4149 }
4150 
4151 /// BuildPointerToMemberTypeInfo - Build an abi::__pointer_to_member_type_info
4152 /// struct, used for member pointer types.
4153 void
4154 ItaniumRTTIBuilder::BuildPointerToMemberTypeInfo(const MemberPointerType *Ty) {
4155   QualType PointeeTy = Ty->getPointeeType();
4156 
4157   // Itanium C++ ABI 2.9.5p7:
4158   //   __flags is a flag word describing the cv-qualification and other
4159   //   attributes of the type pointed to.
4160   unsigned Flags = extractPBaseFlags(CGM.getContext(), PointeeTy);
4161 
4162   const RecordType *ClassType = cast<RecordType>(Ty->getClass());
4163   if (IsIncompleteClassType(ClassType))
4164     Flags |= PTI_ContainingClassIncomplete;
4165 
4166   llvm::Type *UnsignedIntLTy =
4167     CGM.getTypes().ConvertType(CGM.getContext().UnsignedIntTy);
4168   Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags));
4169 
4170   // Itanium C++ ABI 2.9.5p7:
4171   //   __pointee is a pointer to the std::type_info derivation for the
4172   //   unqualified type being pointed to.
4173   llvm::Constant *PointeeTypeInfo =
4174       ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(PointeeTy);
4175   Fields.push_back(PointeeTypeInfo);
4176 
4177   // Itanium C++ ABI 2.9.5p9:
4178   //   __context is a pointer to an abi::__class_type_info corresponding to the
4179   //   class type containing the member pointed to
4180   //   (e.g., the "A" in "int A::*").
4181   Fields.push_back(
4182       ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(QualType(ClassType, 0)));
4183 }
4184 
4185 llvm::Constant *ItaniumCXXABI::getAddrOfRTTIDescriptor(QualType Ty) {
4186   return ItaniumRTTIBuilder(*this).BuildTypeInfo(Ty);
4187 }
4188 
4189 void ItaniumCXXABI::EmitFundamentalRTTIDescriptors(const CXXRecordDecl *RD) {
4190   // Types added here must also be added to TypeInfoIsInStandardLibrary.
4191   QualType FundamentalTypes[] = {
4192       getContext().VoidTy,             getContext().NullPtrTy,
4193       getContext().BoolTy,             getContext().WCharTy,
4194       getContext().CharTy,             getContext().UnsignedCharTy,
4195       getContext().SignedCharTy,       getContext().ShortTy,
4196       getContext().UnsignedShortTy,    getContext().IntTy,
4197       getContext().UnsignedIntTy,      getContext().LongTy,
4198       getContext().UnsignedLongTy,     getContext().LongLongTy,
4199       getContext().UnsignedLongLongTy, getContext().Int128Ty,
4200       getContext().UnsignedInt128Ty,   getContext().HalfTy,
4201       getContext().FloatTy,            getContext().DoubleTy,
4202       getContext().LongDoubleTy,       getContext().Float128Ty,
4203       getContext().Char8Ty,            getContext().Char16Ty,
4204       getContext().Char32Ty
4205   };
4206   llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass =
4207       RD->hasAttr<DLLExportAttr>() || CGM.shouldMapVisibilityToDLLExport(RD)
4208           ? llvm::GlobalValue::DLLExportStorageClass
4209           : llvm::GlobalValue::DefaultStorageClass;
4210   llvm::GlobalValue::VisibilityTypes Visibility =
4211       CodeGenModule::GetLLVMVisibility(RD->getVisibility());
4212   for (const QualType &FundamentalType : FundamentalTypes) {
4213     QualType PointerType = getContext().getPointerType(FundamentalType);
4214     QualType PointerTypeConst = getContext().getPointerType(
4215         FundamentalType.withConst());
4216     for (QualType Type : {FundamentalType, PointerType, PointerTypeConst})
4217       ItaniumRTTIBuilder(*this).BuildTypeInfo(
4218           Type, llvm::GlobalValue::ExternalLinkage,
4219           Visibility, DLLStorageClass);
4220   }
4221 }
4222 
4223 /// What sort of uniqueness rules should we use for the RTTI for the
4224 /// given type?
4225 ItaniumCXXABI::RTTIUniquenessKind ItaniumCXXABI::classifyRTTIUniqueness(
4226     QualType CanTy, llvm::GlobalValue::LinkageTypes Linkage) const {
4227   if (shouldRTTIBeUnique())
4228     return RUK_Unique;
4229 
4230   // It's only necessary for linkonce_odr or weak_odr linkage.
4231   if (Linkage != llvm::GlobalValue::LinkOnceODRLinkage &&
4232       Linkage != llvm::GlobalValue::WeakODRLinkage)
4233     return RUK_Unique;
4234 
4235   // It's only necessary with default visibility.
4236   if (CanTy->getVisibility() != DefaultVisibility)
4237     return RUK_Unique;
4238 
4239   // If we're not required to publish this symbol, hide it.
4240   if (Linkage == llvm::GlobalValue::LinkOnceODRLinkage)
4241     return RUK_NonUniqueHidden;
4242 
4243   // If we're required to publish this symbol, as we might be under an
4244   // explicit instantiation, leave it with default visibility but
4245   // enable string-comparisons.
4246   assert(Linkage == llvm::GlobalValue::WeakODRLinkage);
4247   return RUK_NonUniqueVisible;
4248 }
4249 
4250 // Find out how to codegen the complete destructor and constructor
4251 namespace {
4252 enum class StructorCodegen { Emit, RAUW, Alias, COMDAT };
4253 }
4254 static StructorCodegen getCodegenToUse(CodeGenModule &CGM,
4255                                        const CXXMethodDecl *MD) {
4256   if (!CGM.getCodeGenOpts().CXXCtorDtorAliases)
4257     return StructorCodegen::Emit;
4258 
4259   // The complete and base structors are not equivalent if there are any virtual
4260   // bases, so emit separate functions.
4261   if (MD->getParent()->getNumVBases())
4262     return StructorCodegen::Emit;
4263 
4264   GlobalDecl AliasDecl;
4265   if (const auto *DD = dyn_cast<CXXDestructorDecl>(MD)) {
4266     AliasDecl = GlobalDecl(DD, Dtor_Complete);
4267   } else {
4268     const auto *CD = cast<CXXConstructorDecl>(MD);
4269     AliasDecl = GlobalDecl(CD, Ctor_Complete);
4270   }
4271   llvm::GlobalValue::LinkageTypes Linkage = CGM.getFunctionLinkage(AliasDecl);
4272 
4273   if (llvm::GlobalValue::isDiscardableIfUnused(Linkage))
4274     return StructorCodegen::RAUW;
4275 
4276   // FIXME: Should we allow available_externally aliases?
4277   if (!llvm::GlobalAlias::isValidLinkage(Linkage))
4278     return StructorCodegen::RAUW;
4279 
4280   if (llvm::GlobalValue::isWeakForLinker(Linkage)) {
4281     // Only ELF and wasm support COMDATs with arbitrary names (C5/D5).
4282     if (CGM.getTarget().getTriple().isOSBinFormatELF() ||
4283         CGM.getTarget().getTriple().isOSBinFormatWasm())
4284       return StructorCodegen::COMDAT;
4285     return StructorCodegen::Emit;
4286   }
4287 
4288   return StructorCodegen::Alias;
4289 }
4290 
4291 static void emitConstructorDestructorAlias(CodeGenModule &CGM,
4292                                            GlobalDecl AliasDecl,
4293                                            GlobalDecl TargetDecl) {
4294   llvm::GlobalValue::LinkageTypes Linkage = CGM.getFunctionLinkage(AliasDecl);
4295 
4296   StringRef MangledName = CGM.getMangledName(AliasDecl);
4297   llvm::GlobalValue *Entry = CGM.GetGlobalValue(MangledName);
4298   if (Entry && !Entry->isDeclaration())
4299     return;
4300 
4301   auto *Aliasee = cast<llvm::GlobalValue>(CGM.GetAddrOfGlobal(TargetDecl));
4302 
4303   // Create the alias with no name.
4304   auto *Alias = llvm::GlobalAlias::create(Linkage, "", Aliasee);
4305 
4306   // Constructors and destructors are always unnamed_addr.
4307   Alias->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
4308 
4309   // Switch any previous uses to the alias.
4310   if (Entry) {
4311     assert(Entry->getType() == Aliasee->getType() &&
4312            "declaration exists with different type");
4313     Alias->takeName(Entry);
4314     Entry->replaceAllUsesWith(Alias);
4315     Entry->eraseFromParent();
4316   } else {
4317     Alias->setName(MangledName);
4318   }
4319 
4320   // Finally, set up the alias with its proper name and attributes.
4321   CGM.SetCommonAttributes(AliasDecl, Alias);
4322 }
4323 
4324 void ItaniumCXXABI::emitCXXStructor(GlobalDecl GD) {
4325   auto *MD = cast<CXXMethodDecl>(GD.getDecl());
4326   auto *CD = dyn_cast<CXXConstructorDecl>(MD);
4327   const CXXDestructorDecl *DD = CD ? nullptr : cast<CXXDestructorDecl>(MD);
4328 
4329   StructorCodegen CGType = getCodegenToUse(CGM, MD);
4330 
4331   if (CD ? GD.getCtorType() == Ctor_Complete
4332          : GD.getDtorType() == Dtor_Complete) {
4333     GlobalDecl BaseDecl;
4334     if (CD)
4335       BaseDecl = GD.getWithCtorType(Ctor_Base);
4336     else
4337       BaseDecl = GD.getWithDtorType(Dtor_Base);
4338 
4339     if (CGType == StructorCodegen::Alias || CGType == StructorCodegen::COMDAT) {
4340       emitConstructorDestructorAlias(CGM, GD, BaseDecl);
4341       return;
4342     }
4343 
4344     if (CGType == StructorCodegen::RAUW) {
4345       StringRef MangledName = CGM.getMangledName(GD);
4346       auto *Aliasee = CGM.GetAddrOfGlobal(BaseDecl);
4347       CGM.addReplacement(MangledName, Aliasee);
4348       return;
4349     }
4350   }
4351 
4352   // The base destructor is equivalent to the base destructor of its
4353   // base class if there is exactly one non-virtual base class with a
4354   // non-trivial destructor, there are no fields with a non-trivial
4355   // destructor, and the body of the destructor is trivial.
4356   if (DD && GD.getDtorType() == Dtor_Base &&
4357       CGType != StructorCodegen::COMDAT &&
4358       !CGM.TryEmitBaseDestructorAsAlias(DD))
4359     return;
4360 
4361   // FIXME: The deleting destructor is equivalent to the selected operator
4362   // delete if:
4363   //  * either the delete is a destroying operator delete or the destructor
4364   //    would be trivial if it weren't virtual,
4365   //  * the conversion from the 'this' parameter to the first parameter of the
4366   //    destructor is equivalent to a bitcast,
4367   //  * the destructor does not have an implicit "this" return, and
4368   //  * the operator delete has the same calling convention and IR function type
4369   //    as the destructor.
4370   // In such cases we should try to emit the deleting dtor as an alias to the
4371   // selected 'operator delete'.
4372 
4373   llvm::Function *Fn = CGM.codegenCXXStructor(GD);
4374 
4375   if (CGType == StructorCodegen::COMDAT) {
4376     SmallString<256> Buffer;
4377     llvm::raw_svector_ostream Out(Buffer);
4378     if (DD)
4379       getMangleContext().mangleCXXDtorComdat(DD, Out);
4380     else
4381       getMangleContext().mangleCXXCtorComdat(CD, Out);
4382     llvm::Comdat *C = CGM.getModule().getOrInsertComdat(Out.str());
4383     Fn->setComdat(C);
4384   } else {
4385     CGM.maybeSetTrivialComdat(*MD, *Fn);
4386   }
4387 }
4388 
4389 static llvm::FunctionCallee getBeginCatchFn(CodeGenModule &CGM) {
4390   // void *__cxa_begin_catch(void*);
4391   llvm::FunctionType *FTy = llvm::FunctionType::get(
4392       CGM.Int8PtrTy, CGM.Int8PtrTy, /*isVarArg=*/false);
4393 
4394   return CGM.CreateRuntimeFunction(FTy, "__cxa_begin_catch");
4395 }
4396 
4397 static llvm::FunctionCallee getEndCatchFn(CodeGenModule &CGM) {
4398   // void __cxa_end_catch();
4399   llvm::FunctionType *FTy =
4400       llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
4401 
4402   return CGM.CreateRuntimeFunction(FTy, "__cxa_end_catch");
4403 }
4404 
4405 static llvm::FunctionCallee getGetExceptionPtrFn(CodeGenModule &CGM) {
4406   // void *__cxa_get_exception_ptr(void*);
4407   llvm::FunctionType *FTy = llvm::FunctionType::get(
4408       CGM.Int8PtrTy, CGM.Int8PtrTy, /*isVarArg=*/false);
4409 
4410   return CGM.CreateRuntimeFunction(FTy, "__cxa_get_exception_ptr");
4411 }
4412 
4413 namespace {
4414   /// A cleanup to call __cxa_end_catch.  In many cases, the caught
4415   /// exception type lets us state definitively that the thrown exception
4416   /// type does not have a destructor.  In particular:
4417   ///   - Catch-alls tell us nothing, so we have to conservatively
4418   ///     assume that the thrown exception might have a destructor.
4419   ///   - Catches by reference behave according to their base types.
4420   ///   - Catches of non-record types will only trigger for exceptions
4421   ///     of non-record types, which never have destructors.
4422   ///   - Catches of record types can trigger for arbitrary subclasses
4423   ///     of the caught type, so we have to assume the actual thrown
4424   ///     exception type might have a throwing destructor, even if the
4425   ///     caught type's destructor is trivial or nothrow.
4426   struct CallEndCatch final : EHScopeStack::Cleanup {
4427     CallEndCatch(bool MightThrow) : MightThrow(MightThrow) {}
4428     bool MightThrow;
4429 
4430     void Emit(CodeGenFunction &CGF, Flags flags) override {
4431       if (!MightThrow) {
4432         CGF.EmitNounwindRuntimeCall(getEndCatchFn(CGF.CGM));
4433         return;
4434       }
4435 
4436       CGF.EmitRuntimeCallOrInvoke(getEndCatchFn(CGF.CGM));
4437     }
4438   };
4439 }
4440 
4441 /// Emits a call to __cxa_begin_catch and enters a cleanup to call
4442 /// __cxa_end_catch.
4443 ///
4444 /// \param EndMightThrow - true if __cxa_end_catch might throw
4445 static llvm::Value *CallBeginCatch(CodeGenFunction &CGF,
4446                                    llvm::Value *Exn,
4447                                    bool EndMightThrow) {
4448   llvm::CallInst *call =
4449     CGF.EmitNounwindRuntimeCall(getBeginCatchFn(CGF.CGM), Exn);
4450 
4451   CGF.EHStack.pushCleanup<CallEndCatch>(NormalAndEHCleanup, EndMightThrow);
4452 
4453   return call;
4454 }
4455 
4456 /// A "special initializer" callback for initializing a catch
4457 /// parameter during catch initialization.
4458 static void InitCatchParam(CodeGenFunction &CGF,
4459                            const VarDecl &CatchParam,
4460                            Address ParamAddr,
4461                            SourceLocation Loc) {
4462   // Load the exception from where the landing pad saved it.
4463   llvm::Value *Exn = CGF.getExceptionFromSlot();
4464 
4465   CanQualType CatchType =
4466     CGF.CGM.getContext().getCanonicalType(CatchParam.getType());
4467   llvm::Type *LLVMCatchTy = CGF.ConvertTypeForMem(CatchType);
4468 
4469   // If we're catching by reference, we can just cast the object
4470   // pointer to the appropriate pointer.
4471   if (isa<ReferenceType>(CatchType)) {
4472     QualType CaughtType = cast<ReferenceType>(CatchType)->getPointeeType();
4473     bool EndCatchMightThrow = CaughtType->isRecordType();
4474 
4475     // __cxa_begin_catch returns the adjusted object pointer.
4476     llvm::Value *AdjustedExn = CallBeginCatch(CGF, Exn, EndCatchMightThrow);
4477 
4478     // We have no way to tell the personality function that we're
4479     // catching by reference, so if we're catching a pointer,
4480     // __cxa_begin_catch will actually return that pointer by value.
4481     if (const PointerType *PT = dyn_cast<PointerType>(CaughtType)) {
4482       QualType PointeeType = PT->getPointeeType();
4483 
4484       // When catching by reference, generally we should just ignore
4485       // this by-value pointer and use the exception object instead.
4486       if (!PointeeType->isRecordType()) {
4487 
4488         // Exn points to the struct _Unwind_Exception header, which
4489         // we have to skip past in order to reach the exception data.
4490         unsigned HeaderSize =
4491           CGF.CGM.getTargetCodeGenInfo().getSizeOfUnwindException();
4492         AdjustedExn =
4493             CGF.Builder.CreateConstGEP1_32(CGF.Int8Ty, Exn, HeaderSize);
4494 
4495       // However, if we're catching a pointer-to-record type that won't
4496       // work, because the personality function might have adjusted
4497       // the pointer.  There's actually no way for us to fully satisfy
4498       // the language/ABI contract here:  we can't use Exn because it
4499       // might have the wrong adjustment, but we can't use the by-value
4500       // pointer because it's off by a level of abstraction.
4501       //
4502       // The current solution is to dump the adjusted pointer into an
4503       // alloca, which breaks language semantics (because changing the
4504       // pointer doesn't change the exception) but at least works.
4505       // The better solution would be to filter out non-exact matches
4506       // and rethrow them, but this is tricky because the rethrow
4507       // really needs to be catchable by other sites at this landing
4508       // pad.  The best solution is to fix the personality function.
4509       } else {
4510         // Pull the pointer for the reference type off.
4511         llvm::Type *PtrTy = CGF.ConvertTypeForMem(CaughtType);
4512 
4513         // Create the temporary and write the adjusted pointer into it.
4514         Address ExnPtrTmp =
4515           CGF.CreateTempAlloca(PtrTy, CGF.getPointerAlign(), "exn.byref.tmp");
4516         llvm::Value *Casted = CGF.Builder.CreateBitCast(AdjustedExn, PtrTy);
4517         CGF.Builder.CreateStore(Casted, ExnPtrTmp);
4518 
4519         // Bind the reference to the temporary.
4520         AdjustedExn = ExnPtrTmp.getPointer();
4521       }
4522     }
4523 
4524     llvm::Value *ExnCast =
4525       CGF.Builder.CreateBitCast(AdjustedExn, LLVMCatchTy, "exn.byref");
4526     CGF.Builder.CreateStore(ExnCast, ParamAddr);
4527     return;
4528   }
4529 
4530   // Scalars and complexes.
4531   TypeEvaluationKind TEK = CGF.getEvaluationKind(CatchType);
4532   if (TEK != TEK_Aggregate) {
4533     llvm::Value *AdjustedExn = CallBeginCatch(CGF, Exn, false);
4534 
4535     // If the catch type is a pointer type, __cxa_begin_catch returns
4536     // the pointer by value.
4537     if (CatchType->hasPointerRepresentation()) {
4538       llvm::Value *CastExn =
4539         CGF.Builder.CreateBitCast(AdjustedExn, LLVMCatchTy, "exn.casted");
4540 
4541       switch (CatchType.getQualifiers().getObjCLifetime()) {
4542       case Qualifiers::OCL_Strong:
4543         CastExn = CGF.EmitARCRetainNonBlock(CastExn);
4544         [[fallthrough]];
4545 
4546       case Qualifiers::OCL_None:
4547       case Qualifiers::OCL_ExplicitNone:
4548       case Qualifiers::OCL_Autoreleasing:
4549         CGF.Builder.CreateStore(CastExn, ParamAddr);
4550         return;
4551 
4552       case Qualifiers::OCL_Weak:
4553         CGF.EmitARCInitWeak(ParamAddr, CastExn);
4554         return;
4555       }
4556       llvm_unreachable("bad ownership qualifier!");
4557     }
4558 
4559     // Otherwise, it returns a pointer into the exception object.
4560 
4561     llvm::Type *PtrTy = LLVMCatchTy->getPointerTo(0); // addrspace 0 ok
4562     llvm::Value *Cast = CGF.Builder.CreateBitCast(AdjustedExn, PtrTy);
4563 
4564     LValue srcLV = CGF.MakeNaturalAlignAddrLValue(Cast, CatchType);
4565     LValue destLV = CGF.MakeAddrLValue(ParamAddr, CatchType);
4566     switch (TEK) {
4567     case TEK_Complex:
4568       CGF.EmitStoreOfComplex(CGF.EmitLoadOfComplex(srcLV, Loc), destLV,
4569                              /*init*/ true);
4570       return;
4571     case TEK_Scalar: {
4572       llvm::Value *ExnLoad = CGF.EmitLoadOfScalar(srcLV, Loc);
4573       CGF.EmitStoreOfScalar(ExnLoad, destLV, /*init*/ true);
4574       return;
4575     }
4576     case TEK_Aggregate:
4577       llvm_unreachable("evaluation kind filtered out!");
4578     }
4579     llvm_unreachable("bad evaluation kind");
4580   }
4581 
4582   assert(isa<RecordType>(CatchType) && "unexpected catch type!");
4583   auto catchRD = CatchType->getAsCXXRecordDecl();
4584   CharUnits caughtExnAlignment = CGF.CGM.getClassPointerAlignment(catchRD);
4585 
4586   llvm::Type *PtrTy = LLVMCatchTy->getPointerTo(0); // addrspace 0 ok
4587 
4588   // Check for a copy expression.  If we don't have a copy expression,
4589   // that means a trivial copy is okay.
4590   const Expr *copyExpr = CatchParam.getInit();
4591   if (!copyExpr) {
4592     llvm::Value *rawAdjustedExn = CallBeginCatch(CGF, Exn, true);
4593     Address adjustedExn(CGF.Builder.CreateBitCast(rawAdjustedExn, PtrTy),
4594                         LLVMCatchTy, caughtExnAlignment);
4595     LValue Dest = CGF.MakeAddrLValue(ParamAddr, CatchType);
4596     LValue Src = CGF.MakeAddrLValue(adjustedExn, CatchType);
4597     CGF.EmitAggregateCopy(Dest, Src, CatchType, AggValueSlot::DoesNotOverlap);
4598     return;
4599   }
4600 
4601   // We have to call __cxa_get_exception_ptr to get the adjusted
4602   // pointer before copying.
4603   llvm::CallInst *rawAdjustedExn =
4604     CGF.EmitNounwindRuntimeCall(getGetExceptionPtrFn(CGF.CGM), Exn);
4605 
4606   // Cast that to the appropriate type.
4607   Address adjustedExn(CGF.Builder.CreateBitCast(rawAdjustedExn, PtrTy),
4608                       LLVMCatchTy, caughtExnAlignment);
4609 
4610   // The copy expression is defined in terms of an OpaqueValueExpr.
4611   // Find it and map it to the adjusted expression.
4612   CodeGenFunction::OpaqueValueMapping
4613     opaque(CGF, OpaqueValueExpr::findInCopyConstruct(copyExpr),
4614            CGF.MakeAddrLValue(adjustedExn, CatchParam.getType()));
4615 
4616   // Call the copy ctor in a terminate scope.
4617   CGF.EHStack.pushTerminate();
4618 
4619   // Perform the copy construction.
4620   CGF.EmitAggExpr(copyExpr,
4621                   AggValueSlot::forAddr(ParamAddr, Qualifiers(),
4622                                         AggValueSlot::IsNotDestructed,
4623                                         AggValueSlot::DoesNotNeedGCBarriers,
4624                                         AggValueSlot::IsNotAliased,
4625                                         AggValueSlot::DoesNotOverlap));
4626 
4627   // Leave the terminate scope.
4628   CGF.EHStack.popTerminate();
4629 
4630   // Undo the opaque value mapping.
4631   opaque.pop();
4632 
4633   // Finally we can call __cxa_begin_catch.
4634   CallBeginCatch(CGF, Exn, true);
4635 }
4636 
4637 /// Begins a catch statement by initializing the catch variable and
4638 /// calling __cxa_begin_catch.
4639 void ItaniumCXXABI::emitBeginCatch(CodeGenFunction &CGF,
4640                                    const CXXCatchStmt *S) {
4641   // We have to be very careful with the ordering of cleanups here:
4642   //   C++ [except.throw]p4:
4643   //     The destruction [of the exception temporary] occurs
4644   //     immediately after the destruction of the object declared in
4645   //     the exception-declaration in the handler.
4646   //
4647   // So the precise ordering is:
4648   //   1.  Construct catch variable.
4649   //   2.  __cxa_begin_catch
4650   //   3.  Enter __cxa_end_catch cleanup
4651   //   4.  Enter dtor cleanup
4652   //
4653   // We do this by using a slightly abnormal initialization process.
4654   // Delegation sequence:
4655   //   - ExitCXXTryStmt opens a RunCleanupsScope
4656   //     - EmitAutoVarAlloca creates the variable and debug info
4657   //       - InitCatchParam initializes the variable from the exception
4658   //       - CallBeginCatch calls __cxa_begin_catch
4659   //       - CallBeginCatch enters the __cxa_end_catch cleanup
4660   //     - EmitAutoVarCleanups enters the variable destructor cleanup
4661   //   - EmitCXXTryStmt emits the code for the catch body
4662   //   - EmitCXXTryStmt close the RunCleanupsScope
4663 
4664   VarDecl *CatchParam = S->getExceptionDecl();
4665   if (!CatchParam) {
4666     llvm::Value *Exn = CGF.getExceptionFromSlot();
4667     CallBeginCatch(CGF, Exn, true);
4668     return;
4669   }
4670 
4671   // Emit the local.
4672   CodeGenFunction::AutoVarEmission var = CGF.EmitAutoVarAlloca(*CatchParam);
4673   InitCatchParam(CGF, *CatchParam, var.getObjectAddress(CGF), S->getBeginLoc());
4674   CGF.EmitAutoVarCleanups(var);
4675 }
4676 
4677 /// Get or define the following function:
4678 ///   void @__clang_call_terminate(i8* %exn) nounwind noreturn
4679 /// This code is used only in C++.
4680 static llvm::FunctionCallee getClangCallTerminateFn(CodeGenModule &CGM) {
4681   ASTContext &C = CGM.getContext();
4682   const CGFunctionInfo &FI = CGM.getTypes().arrangeBuiltinFunctionDeclaration(
4683       C.VoidTy, {C.getPointerType(C.CharTy)});
4684   llvm::FunctionType *fnTy = CGM.getTypes().GetFunctionType(FI);
4685   llvm::FunctionCallee fnRef = CGM.CreateRuntimeFunction(
4686       fnTy, "__clang_call_terminate", llvm::AttributeList(), /*Local=*/true);
4687   llvm::Function *fn =
4688       cast<llvm::Function>(fnRef.getCallee()->stripPointerCasts());
4689   if (fn->empty()) {
4690     CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, fn, /*IsThunk=*/false);
4691     fn->setDoesNotThrow();
4692     fn->setDoesNotReturn();
4693 
4694     // What we really want is to massively penalize inlining without
4695     // forbidding it completely.  The difference between that and
4696     // 'noinline' is negligible.
4697     fn->addFnAttr(llvm::Attribute::NoInline);
4698 
4699     // Allow this function to be shared across translation units, but
4700     // we don't want it to turn into an exported symbol.
4701     fn->setLinkage(llvm::Function::LinkOnceODRLinkage);
4702     fn->setVisibility(llvm::Function::HiddenVisibility);
4703     if (CGM.supportsCOMDAT())
4704       fn->setComdat(CGM.getModule().getOrInsertComdat(fn->getName()));
4705 
4706     // Set up the function.
4707     llvm::BasicBlock *entry =
4708         llvm::BasicBlock::Create(CGM.getLLVMContext(), "", fn);
4709     CGBuilderTy builder(CGM, entry);
4710 
4711     // Pull the exception pointer out of the parameter list.
4712     llvm::Value *exn = &*fn->arg_begin();
4713 
4714     // Call __cxa_begin_catch(exn).
4715     llvm::CallInst *catchCall = builder.CreateCall(getBeginCatchFn(CGM), exn);
4716     catchCall->setDoesNotThrow();
4717     catchCall->setCallingConv(CGM.getRuntimeCC());
4718 
4719     // Call std::terminate().
4720     llvm::CallInst *termCall = builder.CreateCall(CGM.getTerminateFn());
4721     termCall->setDoesNotThrow();
4722     termCall->setDoesNotReturn();
4723     termCall->setCallingConv(CGM.getRuntimeCC());
4724 
4725     // std::terminate cannot return.
4726     builder.CreateUnreachable();
4727   }
4728   return fnRef;
4729 }
4730 
4731 llvm::CallInst *
4732 ItaniumCXXABI::emitTerminateForUnexpectedException(CodeGenFunction &CGF,
4733                                                    llvm::Value *Exn) {
4734   // In C++, we want to call __cxa_begin_catch() before terminating.
4735   if (Exn) {
4736     assert(CGF.CGM.getLangOpts().CPlusPlus);
4737     return CGF.EmitNounwindRuntimeCall(getClangCallTerminateFn(CGF.CGM), Exn);
4738   }
4739   return CGF.EmitNounwindRuntimeCall(CGF.CGM.getTerminateFn());
4740 }
4741 
4742 std::pair<llvm::Value *, const CXXRecordDecl *>
4743 ItaniumCXXABI::LoadVTablePtr(CodeGenFunction &CGF, Address This,
4744                              const CXXRecordDecl *RD) {
4745   return {CGF.GetVTablePtr(This, CGM.Int8PtrTy, RD), RD};
4746 }
4747 
4748 void WebAssemblyCXXABI::emitBeginCatch(CodeGenFunction &CGF,
4749                                        const CXXCatchStmt *C) {
4750   if (CGF.getTarget().hasFeature("exception-handling"))
4751     CGF.EHStack.pushCleanup<CatchRetScope>(
4752         NormalCleanup, cast<llvm::CatchPadInst>(CGF.CurrentFuncletPad));
4753   ItaniumCXXABI::emitBeginCatch(CGF, C);
4754 }
4755 
4756 llvm::CallInst *
4757 WebAssemblyCXXABI::emitTerminateForUnexpectedException(CodeGenFunction &CGF,
4758                                                        llvm::Value *Exn) {
4759   // Itanium ABI calls __clang_call_terminate(), which __cxa_begin_catch() on
4760   // the violating exception to mark it handled, but it is currently hard to do
4761   // with wasm EH instruction structure with catch/catch_all, we just call
4762   // std::terminate and ignore the violating exception as in CGCXXABI.
4763   // TODO Consider code transformation that makes calling __clang_call_terminate
4764   // possible.
4765   return CGCXXABI::emitTerminateForUnexpectedException(CGF, Exn);
4766 }
4767 
4768 /// Register a global destructor as best as we know how.
4769 void XLCXXABI::registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
4770                                   llvm::FunctionCallee Dtor,
4771                                   llvm::Constant *Addr) {
4772   if (D.getTLSKind() != VarDecl::TLS_None) {
4773     // atexit routine expects "int(*)(int,...)"
4774     llvm::FunctionType *FTy =
4775         llvm::FunctionType::get(CGM.IntTy, CGM.IntTy, true);
4776     llvm::PointerType *FpTy = FTy->getPointerTo();
4777 
4778     // extern "C" int __pt_atexit_np(int flags, int(*)(int,...), ...);
4779     llvm::FunctionType *AtExitTy =
4780         llvm::FunctionType::get(CGM.IntTy, {CGM.IntTy, FpTy}, true);
4781 
4782     // Fetch the actual function.
4783     llvm::FunctionCallee AtExit =
4784         CGM.CreateRuntimeFunction(AtExitTy, "__pt_atexit_np");
4785 
4786     // Create __dtor function for the var decl.
4787     llvm::Function *DtorStub = CGF.createTLSAtExitStub(D, Dtor, Addr, AtExit);
4788 
4789     // Register above __dtor with atexit().
4790     // First param is flags and must be 0, second param is function ptr
4791     llvm::Value *NV = llvm::Constant::getNullValue(CGM.IntTy);
4792     CGF.EmitNounwindRuntimeCall(AtExit, {NV, DtorStub});
4793 
4794     // Cannot unregister TLS __dtor so done
4795     return;
4796   }
4797 
4798   // Create __dtor function for the var decl.
4799   llvm::Function *DtorStub = CGF.createAtExitStub(D, Dtor, Addr);
4800 
4801   // Register above __dtor with atexit().
4802   CGF.registerGlobalDtorWithAtExit(DtorStub);
4803 
4804   // Emit __finalize function to unregister __dtor and (as appropriate) call
4805   // __dtor.
4806   emitCXXStermFinalizer(D, DtorStub, Addr);
4807 }
4808 
4809 void XLCXXABI::emitCXXStermFinalizer(const VarDecl &D, llvm::Function *dtorStub,
4810                                      llvm::Constant *addr) {
4811   llvm::FunctionType *FTy = llvm::FunctionType::get(CGM.VoidTy, false);
4812   SmallString<256> FnName;
4813   {
4814     llvm::raw_svector_ostream Out(FnName);
4815     getMangleContext().mangleDynamicStermFinalizer(&D, Out);
4816   }
4817 
4818   // Create the finalization action associated with a variable.
4819   const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
4820   llvm::Function *StermFinalizer = CGM.CreateGlobalInitOrCleanUpFunction(
4821       FTy, FnName.str(), FI, D.getLocation());
4822 
4823   CodeGenFunction CGF(CGM);
4824 
4825   CGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, StermFinalizer, FI,
4826                     FunctionArgList(), D.getLocation(),
4827                     D.getInit()->getExprLoc());
4828 
4829   // The unatexit subroutine unregisters __dtor functions that were previously
4830   // registered by the atexit subroutine. If the referenced function is found,
4831   // the unatexit returns a value of 0, meaning that the cleanup is still
4832   // pending (and we should call the __dtor function).
4833   llvm::Value *V = CGF.unregisterGlobalDtorWithUnAtExit(dtorStub);
4834 
4835   llvm::Value *NeedsDestruct = CGF.Builder.CreateIsNull(V, "needs_destruct");
4836 
4837   llvm::BasicBlock *DestructCallBlock = CGF.createBasicBlock("destruct.call");
4838   llvm::BasicBlock *EndBlock = CGF.createBasicBlock("destruct.end");
4839 
4840   // Check if unatexit returns a value of 0. If it does, jump to
4841   // DestructCallBlock, otherwise jump to EndBlock directly.
4842   CGF.Builder.CreateCondBr(NeedsDestruct, DestructCallBlock, EndBlock);
4843 
4844   CGF.EmitBlock(DestructCallBlock);
4845 
4846   // Emit the call to dtorStub.
4847   llvm::CallInst *CI = CGF.Builder.CreateCall(dtorStub);
4848 
4849   // Make sure the call and the callee agree on calling convention.
4850   CI->setCallingConv(dtorStub->getCallingConv());
4851 
4852   CGF.EmitBlock(EndBlock);
4853 
4854   CGF.FinishFunction();
4855 
4856   if (auto *IPA = D.getAttr<InitPriorityAttr>()) {
4857     CGM.AddCXXPrioritizedStermFinalizerEntry(StermFinalizer,
4858                                              IPA->getPriority());
4859   } else if (isTemplateInstantiation(D.getTemplateSpecializationKind()) ||
4860              getContext().GetGVALinkageForVariable(&D) == GVA_DiscardableODR) {
4861     // According to C++ [basic.start.init]p2, class template static data
4862     // members (i.e., implicitly or explicitly instantiated specializations)
4863     // have unordered initialization. As a consequence, we can put them into
4864     // their own llvm.global_dtors entry.
4865     CGM.AddCXXStermFinalizerToGlobalDtor(StermFinalizer, 65535);
4866   } else {
4867     CGM.AddCXXStermFinalizerEntry(StermFinalizer);
4868   }
4869 }
4870