1 //===------- ItaniumCXXABI.cpp - Emit LLVM Code from ASTs for a Module ----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This provides C++ code generation targeting the Itanium C++ ABI.  The class
10 // in this file generates structures that follow the Itanium C++ ABI, which is
11 // documented at:
12 //  https://itanium-cxx-abi.github.io/cxx-abi/abi.html
13 //  https://itanium-cxx-abi.github.io/cxx-abi/abi-eh.html
14 //
15 // It also supports the closely-related ARM ABI, documented at:
16 // https://developer.arm.com/documentation/ihi0041/g/
17 //
18 //===----------------------------------------------------------------------===//
19 
20 #include "CGCXXABI.h"
21 #include "CGCleanup.h"
22 #include "CGRecordLayout.h"
23 #include "CGVTables.h"
24 #include "CodeGenFunction.h"
25 #include "CodeGenModule.h"
26 #include "TargetInfo.h"
27 #include "clang/AST/Attr.h"
28 #include "clang/AST/Mangle.h"
29 #include "clang/AST/StmtCXX.h"
30 #include "clang/AST/Type.h"
31 #include "clang/CodeGen/ConstantInitBuilder.h"
32 #include "llvm/IR/DataLayout.h"
33 #include "llvm/IR/GlobalValue.h"
34 #include "llvm/IR/Instructions.h"
35 #include "llvm/IR/Intrinsics.h"
36 #include "llvm/IR/Value.h"
37 #include "llvm/Support/ScopedPrinter.h"
38 
39 using namespace clang;
40 using namespace CodeGen;
41 
42 namespace {
43 class ItaniumCXXABI : public CodeGen::CGCXXABI {
44   /// VTables - All the vtables which have been defined.
45   llvm::DenseMap<const CXXRecordDecl *, llvm::GlobalVariable *> VTables;
46 
47   /// All the thread wrapper functions that have been used.
48   llvm::SmallVector<std::pair<const VarDecl *, llvm::Function *>, 8>
49       ThreadWrappers;
50 
51 protected:
52   bool UseARMMethodPtrABI;
53   bool UseARMGuardVarABI;
54   bool Use32BitVTableOffsetABI;
55 
getMangleContext()56   ItaniumMangleContext &getMangleContext() {
57     return cast<ItaniumMangleContext>(CodeGen::CGCXXABI::getMangleContext());
58   }
59 
60 public:
ItaniumCXXABI(CodeGen::CodeGenModule & CGM,bool UseARMMethodPtrABI=false,bool UseARMGuardVarABI=false)61   ItaniumCXXABI(CodeGen::CodeGenModule &CGM,
62                 bool UseARMMethodPtrABI = false,
63                 bool UseARMGuardVarABI = false) :
64     CGCXXABI(CGM), UseARMMethodPtrABI(UseARMMethodPtrABI),
65     UseARMGuardVarABI(UseARMGuardVarABI),
66     Use32BitVTableOffsetABI(false) { }
67 
68   bool classifyReturnType(CGFunctionInfo &FI) const override;
69 
getRecordArgABI(const CXXRecordDecl * RD) const70   RecordArgABI getRecordArgABI(const CXXRecordDecl *RD) const override {
71     // If C++ prohibits us from making a copy, pass by address.
72     if (!RD->canPassInRegisters())
73       return RAA_Indirect;
74     return RAA_Default;
75   }
76 
isThisCompleteObject(GlobalDecl GD) const77   bool isThisCompleteObject(GlobalDecl GD) const override {
78     // The Itanium ABI has separate complete-object vs.  base-object
79     // variants of both constructors and destructors.
80     if (isa<CXXDestructorDecl>(GD.getDecl())) {
81       switch (GD.getDtorType()) {
82       case Dtor_Complete:
83       case Dtor_Deleting:
84         return true;
85 
86       case Dtor_Base:
87         return false;
88 
89       case Dtor_Comdat:
90         llvm_unreachable("emitting dtor comdat as function?");
91       }
92       llvm_unreachable("bad dtor kind");
93     }
94     if (isa<CXXConstructorDecl>(GD.getDecl())) {
95       switch (GD.getCtorType()) {
96       case Ctor_Complete:
97         return true;
98 
99       case Ctor_Base:
100         return false;
101 
102       case Ctor_CopyingClosure:
103       case Ctor_DefaultClosure:
104         llvm_unreachable("closure ctors in Itanium ABI?");
105 
106       case Ctor_Comdat:
107         llvm_unreachable("emitting ctor comdat as function?");
108       }
109       llvm_unreachable("bad dtor kind");
110     }
111 
112     // No other kinds.
113     return false;
114   }
115 
116   bool isZeroInitializable(const MemberPointerType *MPT) override;
117 
118   llvm::Type *ConvertMemberPointerType(const MemberPointerType *MPT) override;
119 
120   CGCallee
121     EmitLoadOfMemberFunctionPointer(CodeGenFunction &CGF,
122                                     const Expr *E,
123                                     Address This,
124                                     llvm::Value *&ThisPtrForCall,
125                                     llvm::Value *MemFnPtr,
126                                     const MemberPointerType *MPT) override;
127 
128   llvm::Value *
129     EmitMemberDataPointerAddress(CodeGenFunction &CGF, const Expr *E,
130                                  Address Base,
131                                  llvm::Value *MemPtr,
132                                  const MemberPointerType *MPT) override;
133 
134   llvm::Value *EmitMemberPointerConversion(CodeGenFunction &CGF,
135                                            const CastExpr *E,
136                                            llvm::Value *Src) override;
137   llvm::Constant *EmitMemberPointerConversion(const CastExpr *E,
138                                               llvm::Constant *Src) override;
139 
140   llvm::Constant *EmitNullMemberPointer(const MemberPointerType *MPT) override;
141 
142   llvm::Constant *EmitMemberFunctionPointer(const CXXMethodDecl *MD) override;
143   llvm::Constant *EmitMemberDataPointer(const MemberPointerType *MPT,
144                                         CharUnits offset) override;
145   llvm::Constant *EmitMemberPointer(const APValue &MP, QualType MPT) override;
146   llvm::Constant *BuildMemberPointer(const CXXMethodDecl *MD,
147                                      CharUnits ThisAdjustment);
148 
149   llvm::Value *EmitMemberPointerComparison(CodeGenFunction &CGF,
150                                            llvm::Value *L, llvm::Value *R,
151                                            const MemberPointerType *MPT,
152                                            bool Inequality) override;
153 
154   llvm::Value *EmitMemberPointerIsNotNull(CodeGenFunction &CGF,
155                                          llvm::Value *Addr,
156                                          const MemberPointerType *MPT) override;
157 
158   void emitVirtualObjectDelete(CodeGenFunction &CGF, const CXXDeleteExpr *DE,
159                                Address Ptr, QualType ElementType,
160                                const CXXDestructorDecl *Dtor) override;
161 
162   void emitRethrow(CodeGenFunction &CGF, bool isNoReturn) override;
163   void emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) override;
164 
165   void emitBeginCatch(CodeGenFunction &CGF, const CXXCatchStmt *C) override;
166 
167   llvm::CallInst *
168   emitTerminateForUnexpectedException(CodeGenFunction &CGF,
169                                       llvm::Value *Exn) override;
170 
171   void EmitFundamentalRTTIDescriptors(const CXXRecordDecl *RD);
172   llvm::Constant *getAddrOfRTTIDescriptor(QualType Ty) override;
173   CatchTypeInfo
getAddrOfCXXCatchHandlerType(QualType Ty,QualType CatchHandlerType)174   getAddrOfCXXCatchHandlerType(QualType Ty,
175                                QualType CatchHandlerType) override {
176     return CatchTypeInfo{getAddrOfRTTIDescriptor(Ty), 0};
177   }
178 
179   bool shouldTypeidBeNullChecked(bool IsDeref, QualType SrcRecordTy) override;
180   void EmitBadTypeidCall(CodeGenFunction &CGF) override;
181   llvm::Value *EmitTypeid(CodeGenFunction &CGF, QualType SrcRecordTy,
182                           Address ThisPtr,
183                           llvm::Type *StdTypeInfoPtrTy) override;
184 
185   bool shouldDynamicCastCallBeNullChecked(bool SrcIsPtr,
186                                           QualType SrcRecordTy) override;
187 
188   llvm::Value *EmitDynamicCastCall(CodeGenFunction &CGF, Address Value,
189                                    QualType SrcRecordTy, QualType DestTy,
190                                    QualType DestRecordTy,
191                                    llvm::BasicBlock *CastEnd) override;
192 
193   llvm::Value *EmitDynamicCastToVoid(CodeGenFunction &CGF, Address Value,
194                                      QualType SrcRecordTy,
195                                      QualType DestTy) override;
196 
197   bool EmitBadCastCall(CodeGenFunction &CGF) override;
198 
199   llvm::Value *
200     GetVirtualBaseClassOffset(CodeGenFunction &CGF, Address This,
201                               const CXXRecordDecl *ClassDecl,
202                               const CXXRecordDecl *BaseClassDecl) override;
203 
204   void EmitCXXConstructors(const CXXConstructorDecl *D) override;
205 
206   AddedStructorArgCounts
207   buildStructorSignature(GlobalDecl GD,
208                          SmallVectorImpl<CanQualType> &ArgTys) override;
209 
useThunkForDtorVariant(const CXXDestructorDecl * Dtor,CXXDtorType DT) const210   bool useThunkForDtorVariant(const CXXDestructorDecl *Dtor,
211                               CXXDtorType DT) const override {
212     // Itanium does not emit any destructor variant as an inline thunk.
213     // Delegating may occur as an optimization, but all variants are either
214     // emitted with external linkage or as linkonce if they are inline and used.
215     return false;
216   }
217 
218   void EmitCXXDestructors(const CXXDestructorDecl *D) override;
219 
220   void addImplicitStructorParams(CodeGenFunction &CGF, QualType &ResTy,
221                                  FunctionArgList &Params) override;
222 
223   void EmitInstanceFunctionProlog(CodeGenFunction &CGF) override;
224 
225   AddedStructorArgs getImplicitConstructorArgs(CodeGenFunction &CGF,
226                                                const CXXConstructorDecl *D,
227                                                CXXCtorType Type,
228                                                bool ForVirtualBase,
229                                                bool Delegating) override;
230 
231   llvm::Value *getCXXDestructorImplicitParam(CodeGenFunction &CGF,
232                                              const CXXDestructorDecl *DD,
233                                              CXXDtorType Type,
234                                              bool ForVirtualBase,
235                                              bool Delegating) override;
236 
237   void EmitDestructorCall(CodeGenFunction &CGF, const CXXDestructorDecl *DD,
238                           CXXDtorType Type, bool ForVirtualBase,
239                           bool Delegating, Address This,
240                           QualType ThisTy) override;
241 
242   void emitVTableDefinitions(CodeGenVTables &CGVT,
243                              const CXXRecordDecl *RD) override;
244 
245   bool isVirtualOffsetNeededForVTableField(CodeGenFunction &CGF,
246                                            CodeGenFunction::VPtr Vptr) override;
247 
doStructorsInitializeVPtrs(const CXXRecordDecl * VTableClass)248   bool doStructorsInitializeVPtrs(const CXXRecordDecl *VTableClass) override {
249     return true;
250   }
251 
252   llvm::Constant *
253   getVTableAddressPoint(BaseSubobject Base,
254                         const CXXRecordDecl *VTableClass) override;
255 
256   llvm::Value *getVTableAddressPointInStructor(
257       CodeGenFunction &CGF, const CXXRecordDecl *VTableClass,
258       BaseSubobject Base, const CXXRecordDecl *NearestVBase) override;
259 
260   llvm::Value *getVTableAddressPointInStructorWithVTT(
261       CodeGenFunction &CGF, const CXXRecordDecl *VTableClass,
262       BaseSubobject Base, const CXXRecordDecl *NearestVBase);
263 
264   llvm::Constant *
265   getVTableAddressPointForConstExpr(BaseSubobject Base,
266                                     const CXXRecordDecl *VTableClass) override;
267 
268   llvm::GlobalVariable *getAddrOfVTable(const CXXRecordDecl *RD,
269                                         CharUnits VPtrOffset) override;
270 
271   CGCallee getVirtualFunctionPointer(CodeGenFunction &CGF, GlobalDecl GD,
272                                      Address This, llvm::Type *Ty,
273                                      SourceLocation Loc) override;
274 
275   llvm::Value *EmitVirtualDestructorCall(CodeGenFunction &CGF,
276                                          const CXXDestructorDecl *Dtor,
277                                          CXXDtorType DtorType, Address This,
278                                          DeleteOrMemberCallExpr E) override;
279 
280   void emitVirtualInheritanceTables(const CXXRecordDecl *RD) override;
281 
282   bool canSpeculativelyEmitVTable(const CXXRecordDecl *RD) const override;
283   bool canSpeculativelyEmitVTableAsBaseClass(const CXXRecordDecl *RD) const;
284 
setThunkLinkage(llvm::Function * Thunk,bool ForVTable,GlobalDecl GD,bool ReturnAdjustment)285   void setThunkLinkage(llvm::Function *Thunk, bool ForVTable, GlobalDecl GD,
286                        bool ReturnAdjustment) override {
287     // Allow inlining of thunks by emitting them with available_externally
288     // linkage together with vtables when needed.
289     if (ForVTable && !Thunk->hasLocalLinkage())
290       Thunk->setLinkage(llvm::GlobalValue::AvailableExternallyLinkage);
291     CGM.setGVProperties(Thunk, GD);
292   }
293 
exportThunk()294   bool exportThunk() override { return true; }
295 
296   llvm::Value *performThisAdjustment(CodeGenFunction &CGF, Address This,
297                                      const ThisAdjustment &TA) override;
298 
299   llvm::Value *performReturnAdjustment(CodeGenFunction &CGF, Address Ret,
300                                        const ReturnAdjustment &RA) override;
301 
getSrcArgforCopyCtor(const CXXConstructorDecl *,FunctionArgList & Args) const302   size_t getSrcArgforCopyCtor(const CXXConstructorDecl *,
303                               FunctionArgList &Args) const override {
304     assert(!Args.empty() && "expected the arglist to not be empty!");
305     return Args.size() - 1;
306   }
307 
GetPureVirtualCallName()308   StringRef GetPureVirtualCallName() override { return "__cxa_pure_virtual"; }
GetDeletedVirtualCallName()309   StringRef GetDeletedVirtualCallName() override
310     { return "__cxa_deleted_virtual"; }
311 
312   CharUnits getArrayCookieSizeImpl(QualType elementType) override;
313   Address InitializeArrayCookie(CodeGenFunction &CGF,
314                                 Address NewPtr,
315                                 llvm::Value *NumElements,
316                                 const CXXNewExpr *expr,
317                                 QualType ElementType) override;
318   llvm::Value *readArrayCookieImpl(CodeGenFunction &CGF,
319                                    Address allocPtr,
320                                    CharUnits cookieSize) override;
321 
322   void EmitGuardedInit(CodeGenFunction &CGF, const VarDecl &D,
323                        llvm::GlobalVariable *DeclPtr,
324                        bool PerformInit) override;
325   void registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
326                           llvm::FunctionCallee dtor,
327                           llvm::Constant *addr) override;
328 
329   llvm::Function *getOrCreateThreadLocalWrapper(const VarDecl *VD,
330                                                 llvm::Value *Val);
331   void EmitThreadLocalInitFuncs(
332       CodeGenModule &CGM,
333       ArrayRef<const VarDecl *> CXXThreadLocals,
334       ArrayRef<llvm::Function *> CXXThreadLocalInits,
335       ArrayRef<const VarDecl *> CXXThreadLocalInitVars) override;
336 
337   /// Determine whether we will definitely emit this variable with a constant
338   /// initializer, either because the language semantics demand it or because
339   /// we know that the initializer is a constant.
isEmittedWithConstantInitializer(const VarDecl * VD) const340   bool isEmittedWithConstantInitializer(const VarDecl *VD) const {
341     VD = VD->getMostRecentDecl();
342     if (VD->hasAttr<ConstInitAttr>())
343       return true;
344 
345     // All later checks examine the initializer specified on the variable. If
346     // the variable is weak, such examination would not be correct.
347     if (VD->isWeak() || VD->hasAttr<SelectAnyAttr>())
348       return false;
349 
350     const VarDecl *InitDecl = VD->getInitializingDeclaration();
351     if (!InitDecl)
352       return false;
353 
354     // If there's no initializer to run, this is constant initialization.
355     if (!InitDecl->hasInit())
356       return true;
357 
358     // If we have the only definition, we don't need a thread wrapper if we
359     // will emit the value as a constant.
360     if (isUniqueGVALinkage(getContext().GetGVALinkageForVariable(VD)))
361       return !VD->needsDestruction(getContext()) && InitDecl->evaluateValue();
362 
363     // Otherwise, we need a thread wrapper unless we know that every
364     // translation unit will emit the value as a constant. We rely on the
365     // variable being constant-initialized in every translation unit if it's
366     // constant-initialized in any translation unit, which isn't actually
367     // guaranteed by the standard but is necessary for sanity.
368     return InitDecl->hasConstantInitialization();
369   }
370 
usesThreadWrapperFunction(const VarDecl * VD) const371   bool usesThreadWrapperFunction(const VarDecl *VD) const override {
372     return !isEmittedWithConstantInitializer(VD) ||
373            VD->needsDestruction(getContext());
374   }
375   LValue EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF, const VarDecl *VD,
376                                       QualType LValType) override;
377 
378   bool NeedsVTTParameter(GlobalDecl GD) override;
379 
380   /**************************** RTTI Uniqueness ******************************/
381 
382 protected:
383   /// Returns true if the ABI requires RTTI type_info objects to be unique
384   /// across a program.
shouldRTTIBeUnique() const385   virtual bool shouldRTTIBeUnique() const { return true; }
386 
387 public:
388   /// What sort of unique-RTTI behavior should we use?
389   enum RTTIUniquenessKind {
390     /// We are guaranteeing, or need to guarantee, that the RTTI string
391     /// is unique.
392     RUK_Unique,
393 
394     /// We are not guaranteeing uniqueness for the RTTI string, so we
395     /// can demote to hidden visibility but must use string comparisons.
396     RUK_NonUniqueHidden,
397 
398     /// We are not guaranteeing uniqueness for the RTTI string, so we
399     /// have to use string comparisons, but we also have to emit it with
400     /// non-hidden visibility.
401     RUK_NonUniqueVisible
402   };
403 
404   /// Return the required visibility status for the given type and linkage in
405   /// the current ABI.
406   RTTIUniquenessKind
407   classifyRTTIUniqueness(QualType CanTy,
408                          llvm::GlobalValue::LinkageTypes Linkage) const;
409   friend class ItaniumRTTIBuilder;
410 
411   void emitCXXStructor(GlobalDecl GD) override;
412 
413   std::pair<llvm::Value *, const CXXRecordDecl *>
414   LoadVTablePtr(CodeGenFunction &CGF, Address This,
415                 const CXXRecordDecl *RD) override;
416 
417  private:
hasAnyUnusedVirtualInlineFunction(const CXXRecordDecl * RD) const418    bool hasAnyUnusedVirtualInlineFunction(const CXXRecordDecl *RD) const {
419      const auto &VtableLayout =
420          CGM.getItaniumVTableContext().getVTableLayout(RD);
421 
422      for (const auto &VtableComponent : VtableLayout.vtable_components()) {
423        // Skip empty slot.
424        if (!VtableComponent.isUsedFunctionPointerKind())
425          continue;
426 
427        const CXXMethodDecl *Method = VtableComponent.getFunctionDecl();
428        if (!Method->getCanonicalDecl()->isInlined())
429          continue;
430 
431        StringRef Name = CGM.getMangledName(VtableComponent.getGlobalDecl());
432        auto *Entry = CGM.GetGlobalValue(Name);
433        // This checks if virtual inline function has already been emitted.
434        // Note that it is possible that this inline function would be emitted
435        // after trying to emit vtable speculatively. Because of this we do
436        // an extra pass after emitting all deferred vtables to find and emit
437        // these vtables opportunistically.
438        if (!Entry || Entry->isDeclaration())
439          return true;
440      }
441      return false;
442   }
443 
isVTableHidden(const CXXRecordDecl * RD) const444   bool isVTableHidden(const CXXRecordDecl *RD) const {
445     const auto &VtableLayout =
446             CGM.getItaniumVTableContext().getVTableLayout(RD);
447 
448     for (const auto &VtableComponent : VtableLayout.vtable_components()) {
449       if (VtableComponent.isRTTIKind()) {
450         const CXXRecordDecl *RTTIDecl = VtableComponent.getRTTIDecl();
451         if (RTTIDecl->getVisibility() == Visibility::HiddenVisibility)
452           return true;
453       } else if (VtableComponent.isUsedFunctionPointerKind()) {
454         const CXXMethodDecl *Method = VtableComponent.getFunctionDecl();
455         if (Method->getVisibility() == Visibility::HiddenVisibility &&
456             !Method->isDefined())
457           return true;
458       }
459     }
460     return false;
461   }
462 };
463 
464 class ARMCXXABI : public ItaniumCXXABI {
465 public:
ARMCXXABI(CodeGen::CodeGenModule & CGM)466   ARMCXXABI(CodeGen::CodeGenModule &CGM) :
467     ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true,
468                   /*UseARMGuardVarABI=*/true) {}
469 
HasThisReturn(GlobalDecl GD) const470   bool HasThisReturn(GlobalDecl GD) const override {
471     return (isa<CXXConstructorDecl>(GD.getDecl()) || (
472               isa<CXXDestructorDecl>(GD.getDecl()) &&
473               GD.getDtorType() != Dtor_Deleting));
474   }
475 
476   void EmitReturnFromThunk(CodeGenFunction &CGF, RValue RV,
477                            QualType ResTy) override;
478 
479   CharUnits getArrayCookieSizeImpl(QualType elementType) override;
480   Address InitializeArrayCookie(CodeGenFunction &CGF,
481                                 Address NewPtr,
482                                 llvm::Value *NumElements,
483                                 const CXXNewExpr *expr,
484                                 QualType ElementType) override;
485   llvm::Value *readArrayCookieImpl(CodeGenFunction &CGF, Address allocPtr,
486                                    CharUnits cookieSize) override;
487 };
488 
489 class AppleARM64CXXABI : public ARMCXXABI {
490 public:
AppleARM64CXXABI(CodeGen::CodeGenModule & CGM)491   AppleARM64CXXABI(CodeGen::CodeGenModule &CGM) : ARMCXXABI(CGM) {
492     Use32BitVTableOffsetABI = true;
493   }
494 
495   // ARM64 libraries are prepared for non-unique RTTI.
shouldRTTIBeUnique() const496   bool shouldRTTIBeUnique() const override { return false; }
497 };
498 
499 class FuchsiaCXXABI final : public ItaniumCXXABI {
500 public:
FuchsiaCXXABI(CodeGen::CodeGenModule & CGM)501   explicit FuchsiaCXXABI(CodeGen::CodeGenModule &CGM)
502       : ItaniumCXXABI(CGM) {}
503 
504 private:
HasThisReturn(GlobalDecl GD) const505   bool HasThisReturn(GlobalDecl GD) const override {
506     return isa<CXXConstructorDecl>(GD.getDecl()) ||
507            (isa<CXXDestructorDecl>(GD.getDecl()) &&
508             GD.getDtorType() != Dtor_Deleting);
509   }
510 };
511 
512 class WebAssemblyCXXABI final : public ItaniumCXXABI {
513 public:
WebAssemblyCXXABI(CodeGen::CodeGenModule & CGM)514   explicit WebAssemblyCXXABI(CodeGen::CodeGenModule &CGM)
515       : ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true,
516                       /*UseARMGuardVarABI=*/true) {}
517   void emitBeginCatch(CodeGenFunction &CGF, const CXXCatchStmt *C) override;
518   llvm::CallInst *
519   emitTerminateForUnexpectedException(CodeGenFunction &CGF,
520                                       llvm::Value *Exn) override;
521 
522 private:
HasThisReturn(GlobalDecl GD) const523   bool HasThisReturn(GlobalDecl GD) const override {
524     return isa<CXXConstructorDecl>(GD.getDecl()) ||
525            (isa<CXXDestructorDecl>(GD.getDecl()) &&
526             GD.getDtorType() != Dtor_Deleting);
527   }
canCallMismatchedFunctionType() const528   bool canCallMismatchedFunctionType() const override { return false; }
529 };
530 
531 class XLCXXABI final : public ItaniumCXXABI {
532 public:
XLCXXABI(CodeGen::CodeGenModule & CGM)533   explicit XLCXXABI(CodeGen::CodeGenModule &CGM)
534       : ItaniumCXXABI(CGM) {}
535 
536   void registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
537                           llvm::FunctionCallee dtor,
538                           llvm::Constant *addr) override;
539 
useSinitAndSterm() const540   bool useSinitAndSterm() const override { return true; }
541 
542 private:
543   void emitCXXStermFinalizer(const VarDecl &D, llvm::Function *dtorStub,
544                              llvm::Constant *addr);
545 };
546 }
547 
CreateItaniumCXXABI(CodeGenModule & CGM)548 CodeGen::CGCXXABI *CodeGen::CreateItaniumCXXABI(CodeGenModule &CGM) {
549   switch (CGM.getContext().getCXXABIKind()) {
550   // For IR-generation purposes, there's no significant difference
551   // between the ARM and iOS ABIs.
552   case TargetCXXABI::GenericARM:
553   case TargetCXXABI::iOS:
554   case TargetCXXABI::WatchOS:
555     return new ARMCXXABI(CGM);
556 
557   case TargetCXXABI::AppleARM64:
558     return new AppleARM64CXXABI(CGM);
559 
560   case TargetCXXABI::Fuchsia:
561     return new FuchsiaCXXABI(CGM);
562 
563   // Note that AArch64 uses the generic ItaniumCXXABI class since it doesn't
564   // include the other 32-bit ARM oddities: constructor/destructor return values
565   // and array cookies.
566   case TargetCXXABI::GenericAArch64:
567     return new ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true,
568                              /*UseARMGuardVarABI=*/true);
569 
570   case TargetCXXABI::GenericMIPS:
571     return new ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true);
572 
573   case TargetCXXABI::WebAssembly:
574     return new WebAssemblyCXXABI(CGM);
575 
576   case TargetCXXABI::XL:
577     return new XLCXXABI(CGM);
578 
579   case TargetCXXABI::GenericItanium:
580     if (CGM.getContext().getTargetInfo().getTriple().getArch()
581         == llvm::Triple::le32) {
582       // For PNaCl, use ARM-style method pointers so that PNaCl code
583       // does not assume anything about the alignment of function
584       // pointers.
585       return new ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true);
586     }
587     return new ItaniumCXXABI(CGM);
588 
589   case TargetCXXABI::Microsoft:
590     llvm_unreachable("Microsoft ABI is not Itanium-based");
591   }
592   llvm_unreachable("bad ABI kind");
593 }
594 
595 llvm::Type *
ConvertMemberPointerType(const MemberPointerType * MPT)596 ItaniumCXXABI::ConvertMemberPointerType(const MemberPointerType *MPT) {
597   if (MPT->isMemberDataPointer())
598     return CGM.PtrDiffTy;
599   return llvm::StructType::get(CGM.PtrDiffTy, CGM.PtrDiffTy);
600 }
601 
602 /// In the Itanium and ARM ABIs, method pointers have the form:
603 ///   struct { ptrdiff_t ptr; ptrdiff_t adj; } memptr;
604 ///
605 /// In the Itanium ABI:
606 ///  - method pointers are virtual if (memptr.ptr & 1) is nonzero
607 ///  - the this-adjustment is (memptr.adj)
608 ///  - the virtual offset is (memptr.ptr - 1)
609 ///
610 /// In the ARM ABI:
611 ///  - method pointers are virtual if (memptr.adj & 1) is nonzero
612 ///  - the this-adjustment is (memptr.adj >> 1)
613 ///  - the virtual offset is (memptr.ptr)
614 /// ARM uses 'adj' for the virtual flag because Thumb functions
615 /// may be only single-byte aligned.
616 ///
617 /// If the member is virtual, the adjusted 'this' pointer points
618 /// to a vtable pointer from which the virtual offset is applied.
619 ///
620 /// If the member is non-virtual, memptr.ptr is the address of
621 /// the function to call.
EmitLoadOfMemberFunctionPointer(CodeGenFunction & CGF,const Expr * E,Address ThisAddr,llvm::Value * & ThisPtrForCall,llvm::Value * MemFnPtr,const MemberPointerType * MPT)622 CGCallee ItaniumCXXABI::EmitLoadOfMemberFunctionPointer(
623     CodeGenFunction &CGF, const Expr *E, Address ThisAddr,
624     llvm::Value *&ThisPtrForCall,
625     llvm::Value *MemFnPtr, const MemberPointerType *MPT) {
626   CGBuilderTy &Builder = CGF.Builder;
627 
628   const FunctionProtoType *FPT =
629     MPT->getPointeeType()->getAs<FunctionProtoType>();
630   auto *RD =
631       cast<CXXRecordDecl>(MPT->getClass()->castAs<RecordType>()->getDecl());
632 
633   llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(
634       CGM.getTypes().arrangeCXXMethodType(RD, FPT, /*FD=*/nullptr));
635 
636   llvm::Constant *ptrdiff_1 = llvm::ConstantInt::get(CGM.PtrDiffTy, 1);
637 
638   llvm::BasicBlock *FnVirtual = CGF.createBasicBlock("memptr.virtual");
639   llvm::BasicBlock *FnNonVirtual = CGF.createBasicBlock("memptr.nonvirtual");
640   llvm::BasicBlock *FnEnd = CGF.createBasicBlock("memptr.end");
641 
642   // Extract memptr.adj, which is in the second field.
643   llvm::Value *RawAdj = Builder.CreateExtractValue(MemFnPtr, 1, "memptr.adj");
644 
645   // Compute the true adjustment.
646   llvm::Value *Adj = RawAdj;
647   if (UseARMMethodPtrABI)
648     Adj = Builder.CreateAShr(Adj, ptrdiff_1, "memptr.adj.shifted");
649 
650   // Apply the adjustment and cast back to the original struct type
651   // for consistency.
652   llvm::Value *This = ThisAddr.getPointer();
653   llvm::Value *Ptr = Builder.CreateBitCast(This, Builder.getInt8PtrTy());
654   Ptr = Builder.CreateInBoundsGEP(Builder.getInt8Ty(), Ptr, Adj);
655   This = Builder.CreateBitCast(Ptr, This->getType(), "this.adjusted");
656   ThisPtrForCall = This;
657 
658   // Load the function pointer.
659   llvm::Value *FnAsInt = Builder.CreateExtractValue(MemFnPtr, 0, "memptr.ptr");
660 
661   // If the LSB in the function pointer is 1, the function pointer points to
662   // a virtual function.
663   llvm::Value *IsVirtual;
664   if (UseARMMethodPtrABI)
665     IsVirtual = Builder.CreateAnd(RawAdj, ptrdiff_1);
666   else
667     IsVirtual = Builder.CreateAnd(FnAsInt, ptrdiff_1);
668   IsVirtual = Builder.CreateIsNotNull(IsVirtual, "memptr.isvirtual");
669   Builder.CreateCondBr(IsVirtual, FnVirtual, FnNonVirtual);
670 
671   // In the virtual path, the adjustment left 'This' pointing to the
672   // vtable of the correct base subobject.  The "function pointer" is an
673   // offset within the vtable (+1 for the virtual flag on non-ARM).
674   CGF.EmitBlock(FnVirtual);
675 
676   // Cast the adjusted this to a pointer to vtable pointer and load.
677   llvm::Type *VTableTy = Builder.getInt8PtrTy();
678   CharUnits VTablePtrAlign =
679     CGF.CGM.getDynamicOffsetAlignment(ThisAddr.getAlignment(), RD,
680                                       CGF.getPointerAlign());
681   llvm::Value *VTable =
682     CGF.GetVTablePtr(Address(This, VTablePtrAlign), VTableTy, RD);
683 
684   // Apply the offset.
685   // On ARM64, to reserve extra space in virtual member function pointers,
686   // we only pay attention to the low 32 bits of the offset.
687   llvm::Value *VTableOffset = FnAsInt;
688   if (!UseARMMethodPtrABI)
689     VTableOffset = Builder.CreateSub(VTableOffset, ptrdiff_1);
690   if (Use32BitVTableOffsetABI) {
691     VTableOffset = Builder.CreateTrunc(VTableOffset, CGF.Int32Ty);
692     VTableOffset = Builder.CreateZExt(VTableOffset, CGM.PtrDiffTy);
693   }
694 
695   // Check the address of the function pointer if CFI on member function
696   // pointers is enabled.
697   llvm::Constant *CheckSourceLocation;
698   llvm::Constant *CheckTypeDesc;
699   bool ShouldEmitCFICheck = CGF.SanOpts.has(SanitizerKind::CFIMFCall) &&
700                             CGM.HasHiddenLTOVisibility(RD);
701   bool ShouldEmitVFEInfo = CGM.getCodeGenOpts().VirtualFunctionElimination &&
702                            CGM.HasHiddenLTOVisibility(RD);
703   bool ShouldEmitWPDInfo =
704       CGM.getCodeGenOpts().WholeProgramVTables &&
705       // Don't insert type tests if we are forcing public std visibility.
706       !CGM.HasLTOVisibilityPublicStd(RD);
707   llvm::Value *VirtualFn = nullptr;
708 
709   {
710     CodeGenFunction::SanitizerScope SanScope(&CGF);
711     llvm::Value *TypeId = nullptr;
712     llvm::Value *CheckResult = nullptr;
713 
714     if (ShouldEmitCFICheck || ShouldEmitVFEInfo || ShouldEmitWPDInfo) {
715       // If doing CFI, VFE or WPD, we will need the metadata node to check
716       // against.
717       llvm::Metadata *MD =
718           CGM.CreateMetadataIdentifierForVirtualMemPtrType(QualType(MPT, 0));
719       TypeId = llvm::MetadataAsValue::get(CGF.getLLVMContext(), MD);
720     }
721 
722     if (ShouldEmitVFEInfo) {
723       llvm::Value *VFPAddr = Builder.CreateGEP(VTable, VTableOffset);
724 
725       // If doing VFE, load from the vtable with a type.checked.load intrinsic
726       // call. Note that we use the GEP to calculate the address to load from
727       // and pass 0 as the offset to the intrinsic. This is because every
728       // vtable slot of the correct type is marked with matching metadata, and
729       // we know that the load must be from one of these slots.
730       llvm::Value *CheckedLoad = Builder.CreateCall(
731           CGM.getIntrinsic(llvm::Intrinsic::type_checked_load),
732           {VFPAddr, llvm::ConstantInt::get(CGM.Int32Ty, 0), TypeId});
733       CheckResult = Builder.CreateExtractValue(CheckedLoad, 1);
734       VirtualFn = Builder.CreateExtractValue(CheckedLoad, 0);
735       VirtualFn = Builder.CreateBitCast(VirtualFn, FTy->getPointerTo(),
736                                         "memptr.virtualfn");
737     } else {
738       // When not doing VFE, emit a normal load, as it allows more
739       // optimisations than type.checked.load.
740       if (ShouldEmitCFICheck || ShouldEmitWPDInfo) {
741         llvm::Value *VFPAddr = Builder.CreateGEP(VTable, VTableOffset);
742         CheckResult = Builder.CreateCall(
743             CGM.getIntrinsic(llvm::Intrinsic::type_test),
744             {Builder.CreateBitCast(VFPAddr, CGF.Int8PtrTy), TypeId});
745       }
746 
747       if (CGM.getItaniumVTableContext().isRelativeLayout()) {
748         VirtualFn = CGF.Builder.CreateCall(
749             CGM.getIntrinsic(llvm::Intrinsic::load_relative,
750                              {VTableOffset->getType()}),
751             {VTable, VTableOffset});
752         VirtualFn = CGF.Builder.CreateBitCast(VirtualFn, FTy->getPointerTo());
753       } else {
754         llvm::Value *VFPAddr = CGF.Builder.CreateGEP(VTable, VTableOffset);
755         VFPAddr = CGF.Builder.CreateBitCast(
756             VFPAddr, FTy->getPointerTo()->getPointerTo());
757         VirtualFn = CGF.Builder.CreateAlignedLoad(
758             FTy->getPointerTo(), VFPAddr, CGF.getPointerAlign(),
759             "memptr.virtualfn");
760       }
761     }
762     assert(VirtualFn && "Virtual fuction pointer not created!");
763     assert((!ShouldEmitCFICheck || !ShouldEmitVFEInfo || !ShouldEmitWPDInfo ||
764             CheckResult) &&
765            "Check result required but not created!");
766 
767     if (ShouldEmitCFICheck) {
768       // If doing CFI, emit the check.
769       CheckSourceLocation = CGF.EmitCheckSourceLocation(E->getBeginLoc());
770       CheckTypeDesc = CGF.EmitCheckTypeDescriptor(QualType(MPT, 0));
771       llvm::Constant *StaticData[] = {
772           llvm::ConstantInt::get(CGF.Int8Ty, CodeGenFunction::CFITCK_VMFCall),
773           CheckSourceLocation,
774           CheckTypeDesc,
775       };
776 
777       if (CGM.getCodeGenOpts().SanitizeTrap.has(SanitizerKind::CFIMFCall)) {
778         CGF.EmitTrapCheck(CheckResult, SanitizerHandler::CFICheckFail);
779       } else {
780         llvm::Value *AllVtables = llvm::MetadataAsValue::get(
781             CGM.getLLVMContext(),
782             llvm::MDString::get(CGM.getLLVMContext(), "all-vtables"));
783         llvm::Value *ValidVtable = Builder.CreateCall(
784             CGM.getIntrinsic(llvm::Intrinsic::type_test), {VTable, AllVtables});
785         CGF.EmitCheck(std::make_pair(CheckResult, SanitizerKind::CFIMFCall),
786                       SanitizerHandler::CFICheckFail, StaticData,
787                       {VTable, ValidVtable});
788       }
789 
790       FnVirtual = Builder.GetInsertBlock();
791     }
792   } // End of sanitizer scope
793 
794   CGF.EmitBranch(FnEnd);
795 
796   // In the non-virtual path, the function pointer is actually a
797   // function pointer.
798   CGF.EmitBlock(FnNonVirtual);
799   llvm::Value *NonVirtualFn =
800     Builder.CreateIntToPtr(FnAsInt, FTy->getPointerTo(), "memptr.nonvirtualfn");
801 
802   // Check the function pointer if CFI on member function pointers is enabled.
803   if (ShouldEmitCFICheck) {
804     CXXRecordDecl *RD = MPT->getClass()->getAsCXXRecordDecl();
805     if (RD->hasDefinition()) {
806       CodeGenFunction::SanitizerScope SanScope(&CGF);
807 
808       llvm::Constant *StaticData[] = {
809           llvm::ConstantInt::get(CGF.Int8Ty, CodeGenFunction::CFITCK_NVMFCall),
810           CheckSourceLocation,
811           CheckTypeDesc,
812       };
813 
814       llvm::Value *Bit = Builder.getFalse();
815       llvm::Value *CastedNonVirtualFn =
816           Builder.CreateBitCast(NonVirtualFn, CGF.Int8PtrTy);
817       for (const CXXRecordDecl *Base : CGM.getMostBaseClasses(RD)) {
818         llvm::Metadata *MD = CGM.CreateMetadataIdentifierForType(
819             getContext().getMemberPointerType(
820                 MPT->getPointeeType(),
821                 getContext().getRecordType(Base).getTypePtr()));
822         llvm::Value *TypeId =
823             llvm::MetadataAsValue::get(CGF.getLLVMContext(), MD);
824 
825         llvm::Value *TypeTest =
826             Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::type_test),
827                                {CastedNonVirtualFn, TypeId});
828         Bit = Builder.CreateOr(Bit, TypeTest);
829       }
830 
831       CGF.EmitCheck(std::make_pair(Bit, SanitizerKind::CFIMFCall),
832                     SanitizerHandler::CFICheckFail, StaticData,
833                     {CastedNonVirtualFn, llvm::UndefValue::get(CGF.IntPtrTy)});
834 
835       FnNonVirtual = Builder.GetInsertBlock();
836     }
837   }
838 
839   // We're done.
840   CGF.EmitBlock(FnEnd);
841   llvm::PHINode *CalleePtr = Builder.CreatePHI(FTy->getPointerTo(), 2);
842   CalleePtr->addIncoming(VirtualFn, FnVirtual);
843   CalleePtr->addIncoming(NonVirtualFn, FnNonVirtual);
844 
845   CGCallee Callee(FPT, CalleePtr);
846   return Callee;
847 }
848 
849 /// Compute an l-value by applying the given pointer-to-member to a
850 /// base object.
EmitMemberDataPointerAddress(CodeGenFunction & CGF,const Expr * E,Address Base,llvm::Value * MemPtr,const MemberPointerType * MPT)851 llvm::Value *ItaniumCXXABI::EmitMemberDataPointerAddress(
852     CodeGenFunction &CGF, const Expr *E, Address Base, llvm::Value *MemPtr,
853     const MemberPointerType *MPT) {
854   assert(MemPtr->getType() == CGM.PtrDiffTy);
855 
856   CGBuilderTy &Builder = CGF.Builder;
857 
858   // Cast to char*.
859   Base = Builder.CreateElementBitCast(Base, CGF.Int8Ty);
860 
861   // Apply the offset, which we assume is non-null.
862   llvm::Value *Addr = Builder.CreateInBoundsGEP(
863       Base.getElementType(), Base.getPointer(), MemPtr, "memptr.offset");
864 
865   // Cast the address to the appropriate pointer type, adopting the
866   // address space of the base pointer.
867   llvm::Type *PType = CGF.ConvertTypeForMem(MPT->getPointeeType())
868                             ->getPointerTo(Base.getAddressSpace());
869   return Builder.CreateBitCast(Addr, PType);
870 }
871 
872 /// Perform a bitcast, derived-to-base, or base-to-derived member pointer
873 /// conversion.
874 ///
875 /// Bitcast conversions are always a no-op under Itanium.
876 ///
877 /// Obligatory offset/adjustment diagram:
878 ///         <-- offset -->          <-- adjustment -->
879 ///   |--------------------------|----------------------|--------------------|
880 ///   ^Derived address point     ^Base address point    ^Member address point
881 ///
882 /// So when converting a base member pointer to a derived member pointer,
883 /// we add the offset to the adjustment because the address point has
884 /// decreased;  and conversely, when converting a derived MP to a base MP
885 /// we subtract the offset from the adjustment because the address point
886 /// has increased.
887 ///
888 /// The standard forbids (at compile time) conversion to and from
889 /// virtual bases, which is why we don't have to consider them here.
890 ///
891 /// The standard forbids (at run time) casting a derived MP to a base
892 /// MP when the derived MP does not point to a member of the base.
893 /// This is why -1 is a reasonable choice for null data member
894 /// pointers.
895 llvm::Value *
EmitMemberPointerConversion(CodeGenFunction & CGF,const CastExpr * E,llvm::Value * src)896 ItaniumCXXABI::EmitMemberPointerConversion(CodeGenFunction &CGF,
897                                            const CastExpr *E,
898                                            llvm::Value *src) {
899   assert(E->getCastKind() == CK_DerivedToBaseMemberPointer ||
900          E->getCastKind() == CK_BaseToDerivedMemberPointer ||
901          E->getCastKind() == CK_ReinterpretMemberPointer);
902 
903   // Under Itanium, reinterprets don't require any additional processing.
904   if (E->getCastKind() == CK_ReinterpretMemberPointer) return src;
905 
906   // Use constant emission if we can.
907   if (isa<llvm::Constant>(src))
908     return EmitMemberPointerConversion(E, cast<llvm::Constant>(src));
909 
910   llvm::Constant *adj = getMemberPointerAdjustment(E);
911   if (!adj) return src;
912 
913   CGBuilderTy &Builder = CGF.Builder;
914   bool isDerivedToBase = (E->getCastKind() == CK_DerivedToBaseMemberPointer);
915 
916   const MemberPointerType *destTy =
917     E->getType()->castAs<MemberPointerType>();
918 
919   // For member data pointers, this is just a matter of adding the
920   // offset if the source is non-null.
921   if (destTy->isMemberDataPointer()) {
922     llvm::Value *dst;
923     if (isDerivedToBase)
924       dst = Builder.CreateNSWSub(src, adj, "adj");
925     else
926       dst = Builder.CreateNSWAdd(src, adj, "adj");
927 
928     // Null check.
929     llvm::Value *null = llvm::Constant::getAllOnesValue(src->getType());
930     llvm::Value *isNull = Builder.CreateICmpEQ(src, null, "memptr.isnull");
931     return Builder.CreateSelect(isNull, src, dst);
932   }
933 
934   // The this-adjustment is left-shifted by 1 on ARM.
935   if (UseARMMethodPtrABI) {
936     uint64_t offset = cast<llvm::ConstantInt>(adj)->getZExtValue();
937     offset <<= 1;
938     adj = llvm::ConstantInt::get(adj->getType(), offset);
939   }
940 
941   llvm::Value *srcAdj = Builder.CreateExtractValue(src, 1, "src.adj");
942   llvm::Value *dstAdj;
943   if (isDerivedToBase)
944     dstAdj = Builder.CreateNSWSub(srcAdj, adj, "adj");
945   else
946     dstAdj = Builder.CreateNSWAdd(srcAdj, adj, "adj");
947 
948   return Builder.CreateInsertValue(src, dstAdj, 1);
949 }
950 
951 llvm::Constant *
EmitMemberPointerConversion(const CastExpr * E,llvm::Constant * src)952 ItaniumCXXABI::EmitMemberPointerConversion(const CastExpr *E,
953                                            llvm::Constant *src) {
954   assert(E->getCastKind() == CK_DerivedToBaseMemberPointer ||
955          E->getCastKind() == CK_BaseToDerivedMemberPointer ||
956          E->getCastKind() == CK_ReinterpretMemberPointer);
957 
958   // Under Itanium, reinterprets don't require any additional processing.
959   if (E->getCastKind() == CK_ReinterpretMemberPointer) return src;
960 
961   // If the adjustment is trivial, we don't need to do anything.
962   llvm::Constant *adj = getMemberPointerAdjustment(E);
963   if (!adj) return src;
964 
965   bool isDerivedToBase = (E->getCastKind() == CK_DerivedToBaseMemberPointer);
966 
967   const MemberPointerType *destTy =
968     E->getType()->castAs<MemberPointerType>();
969 
970   // For member data pointers, this is just a matter of adding the
971   // offset if the source is non-null.
972   if (destTy->isMemberDataPointer()) {
973     // null maps to null.
974     if (src->isAllOnesValue()) return src;
975 
976     if (isDerivedToBase)
977       return llvm::ConstantExpr::getNSWSub(src, adj);
978     else
979       return llvm::ConstantExpr::getNSWAdd(src, adj);
980   }
981 
982   // The this-adjustment is left-shifted by 1 on ARM.
983   if (UseARMMethodPtrABI) {
984     uint64_t offset = cast<llvm::ConstantInt>(adj)->getZExtValue();
985     offset <<= 1;
986     adj = llvm::ConstantInt::get(adj->getType(), offset);
987   }
988 
989   llvm::Constant *srcAdj = llvm::ConstantExpr::getExtractValue(src, 1);
990   llvm::Constant *dstAdj;
991   if (isDerivedToBase)
992     dstAdj = llvm::ConstantExpr::getNSWSub(srcAdj, adj);
993   else
994     dstAdj = llvm::ConstantExpr::getNSWAdd(srcAdj, adj);
995 
996   return llvm::ConstantExpr::getInsertValue(src, dstAdj, 1);
997 }
998 
999 llvm::Constant *
EmitNullMemberPointer(const MemberPointerType * MPT)1000 ItaniumCXXABI::EmitNullMemberPointer(const MemberPointerType *MPT) {
1001   // Itanium C++ ABI 2.3:
1002   //   A NULL pointer is represented as -1.
1003   if (MPT->isMemberDataPointer())
1004     return llvm::ConstantInt::get(CGM.PtrDiffTy, -1ULL, /*isSigned=*/true);
1005 
1006   llvm::Constant *Zero = llvm::ConstantInt::get(CGM.PtrDiffTy, 0);
1007   llvm::Constant *Values[2] = { Zero, Zero };
1008   return llvm::ConstantStruct::getAnon(Values);
1009 }
1010 
1011 llvm::Constant *
EmitMemberDataPointer(const MemberPointerType * MPT,CharUnits offset)1012 ItaniumCXXABI::EmitMemberDataPointer(const MemberPointerType *MPT,
1013                                      CharUnits offset) {
1014   // Itanium C++ ABI 2.3:
1015   //   A pointer to data member is an offset from the base address of
1016   //   the class object containing it, represented as a ptrdiff_t
1017   return llvm::ConstantInt::get(CGM.PtrDiffTy, offset.getQuantity());
1018 }
1019 
1020 llvm::Constant *
EmitMemberFunctionPointer(const CXXMethodDecl * MD)1021 ItaniumCXXABI::EmitMemberFunctionPointer(const CXXMethodDecl *MD) {
1022   return BuildMemberPointer(MD, CharUnits::Zero());
1023 }
1024 
BuildMemberPointer(const CXXMethodDecl * MD,CharUnits ThisAdjustment)1025 llvm::Constant *ItaniumCXXABI::BuildMemberPointer(const CXXMethodDecl *MD,
1026                                                   CharUnits ThisAdjustment) {
1027   assert(MD->isInstance() && "Member function must not be static!");
1028 
1029   CodeGenTypes &Types = CGM.getTypes();
1030 
1031   // Get the function pointer (or index if this is a virtual function).
1032   llvm::Constant *MemPtr[2];
1033   if (MD->isVirtual()) {
1034     uint64_t Index = CGM.getItaniumVTableContext().getMethodVTableIndex(MD);
1035     uint64_t VTableOffset;
1036     if (CGM.getItaniumVTableContext().isRelativeLayout()) {
1037       // Multiply by 4-byte relative offsets.
1038       VTableOffset = Index * 4;
1039     } else {
1040       const ASTContext &Context = getContext();
1041       CharUnits PointerWidth = Context.toCharUnitsFromBits(
1042           Context.getTargetInfo().getPointerWidth(0));
1043       VTableOffset = Index * PointerWidth.getQuantity();
1044     }
1045 
1046     if (UseARMMethodPtrABI) {
1047       // ARM C++ ABI 3.2.1:
1048       //   This ABI specifies that adj contains twice the this
1049       //   adjustment, plus 1 if the member function is virtual. The
1050       //   least significant bit of adj then makes exactly the same
1051       //   discrimination as the least significant bit of ptr does for
1052       //   Itanium.
1053       MemPtr[0] = llvm::ConstantInt::get(CGM.PtrDiffTy, VTableOffset);
1054       MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy,
1055                                          2 * ThisAdjustment.getQuantity() + 1);
1056     } else {
1057       // Itanium C++ ABI 2.3:
1058       //   For a virtual function, [the pointer field] is 1 plus the
1059       //   virtual table offset (in bytes) of the function,
1060       //   represented as a ptrdiff_t.
1061       MemPtr[0] = llvm::ConstantInt::get(CGM.PtrDiffTy, VTableOffset + 1);
1062       MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy,
1063                                          ThisAdjustment.getQuantity());
1064     }
1065   } else {
1066     const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
1067     llvm::Type *Ty;
1068     // Check whether the function has a computable LLVM signature.
1069     if (Types.isFuncTypeConvertible(FPT)) {
1070       // The function has a computable LLVM signature; use the correct type.
1071       Ty = Types.GetFunctionType(Types.arrangeCXXMethodDeclaration(MD));
1072     } else {
1073       // Use an arbitrary non-function type to tell GetAddrOfFunction that the
1074       // function type is incomplete.
1075       Ty = CGM.PtrDiffTy;
1076     }
1077     llvm::Constant *addr = CGM.GetAddrOfFunction(MD, Ty);
1078 
1079     MemPtr[0] = llvm::ConstantExpr::getPtrToInt(addr, CGM.PtrDiffTy);
1080     MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy,
1081                                        (UseARMMethodPtrABI ? 2 : 1) *
1082                                        ThisAdjustment.getQuantity());
1083   }
1084 
1085   return llvm::ConstantStruct::getAnon(MemPtr);
1086 }
1087 
EmitMemberPointer(const APValue & MP,QualType MPType)1088 llvm::Constant *ItaniumCXXABI::EmitMemberPointer(const APValue &MP,
1089                                                  QualType MPType) {
1090   const MemberPointerType *MPT = MPType->castAs<MemberPointerType>();
1091   const ValueDecl *MPD = MP.getMemberPointerDecl();
1092   if (!MPD)
1093     return EmitNullMemberPointer(MPT);
1094 
1095   CharUnits ThisAdjustment = getContext().getMemberPointerPathAdjustment(MP);
1096 
1097   if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(MPD))
1098     return BuildMemberPointer(MD, ThisAdjustment);
1099 
1100   CharUnits FieldOffset =
1101     getContext().toCharUnitsFromBits(getContext().getFieldOffset(MPD));
1102   return EmitMemberDataPointer(MPT, ThisAdjustment + FieldOffset);
1103 }
1104 
1105 /// The comparison algorithm is pretty easy: the member pointers are
1106 /// the same if they're either bitwise identical *or* both null.
1107 ///
1108 /// ARM is different here only because null-ness is more complicated.
1109 llvm::Value *
EmitMemberPointerComparison(CodeGenFunction & CGF,llvm::Value * L,llvm::Value * R,const MemberPointerType * MPT,bool Inequality)1110 ItaniumCXXABI::EmitMemberPointerComparison(CodeGenFunction &CGF,
1111                                            llvm::Value *L,
1112                                            llvm::Value *R,
1113                                            const MemberPointerType *MPT,
1114                                            bool Inequality) {
1115   CGBuilderTy &Builder = CGF.Builder;
1116 
1117   llvm::ICmpInst::Predicate Eq;
1118   llvm::Instruction::BinaryOps And, Or;
1119   if (Inequality) {
1120     Eq = llvm::ICmpInst::ICMP_NE;
1121     And = llvm::Instruction::Or;
1122     Or = llvm::Instruction::And;
1123   } else {
1124     Eq = llvm::ICmpInst::ICMP_EQ;
1125     And = llvm::Instruction::And;
1126     Or = llvm::Instruction::Or;
1127   }
1128 
1129   // Member data pointers are easy because there's a unique null
1130   // value, so it just comes down to bitwise equality.
1131   if (MPT->isMemberDataPointer())
1132     return Builder.CreateICmp(Eq, L, R);
1133 
1134   // For member function pointers, the tautologies are more complex.
1135   // The Itanium tautology is:
1136   //   (L == R) <==> (L.ptr == R.ptr && (L.ptr == 0 || L.adj == R.adj))
1137   // The ARM tautology is:
1138   //   (L == R) <==> (L.ptr == R.ptr &&
1139   //                  (L.adj == R.adj ||
1140   //                   (L.ptr == 0 && ((L.adj|R.adj) & 1) == 0)))
1141   // The inequality tautologies have exactly the same structure, except
1142   // applying De Morgan's laws.
1143 
1144   llvm::Value *LPtr = Builder.CreateExtractValue(L, 0, "lhs.memptr.ptr");
1145   llvm::Value *RPtr = Builder.CreateExtractValue(R, 0, "rhs.memptr.ptr");
1146 
1147   // This condition tests whether L.ptr == R.ptr.  This must always be
1148   // true for equality to hold.
1149   llvm::Value *PtrEq = Builder.CreateICmp(Eq, LPtr, RPtr, "cmp.ptr");
1150 
1151   // This condition, together with the assumption that L.ptr == R.ptr,
1152   // tests whether the pointers are both null.  ARM imposes an extra
1153   // condition.
1154   llvm::Value *Zero = llvm::Constant::getNullValue(LPtr->getType());
1155   llvm::Value *EqZero = Builder.CreateICmp(Eq, LPtr, Zero, "cmp.ptr.null");
1156 
1157   // This condition tests whether L.adj == R.adj.  If this isn't
1158   // true, the pointers are unequal unless they're both null.
1159   llvm::Value *LAdj = Builder.CreateExtractValue(L, 1, "lhs.memptr.adj");
1160   llvm::Value *RAdj = Builder.CreateExtractValue(R, 1, "rhs.memptr.adj");
1161   llvm::Value *AdjEq = Builder.CreateICmp(Eq, LAdj, RAdj, "cmp.adj");
1162 
1163   // Null member function pointers on ARM clear the low bit of Adj,
1164   // so the zero condition has to check that neither low bit is set.
1165   if (UseARMMethodPtrABI) {
1166     llvm::Value *One = llvm::ConstantInt::get(LPtr->getType(), 1);
1167 
1168     // Compute (l.adj | r.adj) & 1 and test it against zero.
1169     llvm::Value *OrAdj = Builder.CreateOr(LAdj, RAdj, "or.adj");
1170     llvm::Value *OrAdjAnd1 = Builder.CreateAnd(OrAdj, One);
1171     llvm::Value *OrAdjAnd1EqZero = Builder.CreateICmp(Eq, OrAdjAnd1, Zero,
1172                                                       "cmp.or.adj");
1173     EqZero = Builder.CreateBinOp(And, EqZero, OrAdjAnd1EqZero);
1174   }
1175 
1176   // Tie together all our conditions.
1177   llvm::Value *Result = Builder.CreateBinOp(Or, EqZero, AdjEq);
1178   Result = Builder.CreateBinOp(And, PtrEq, Result,
1179                                Inequality ? "memptr.ne" : "memptr.eq");
1180   return Result;
1181 }
1182 
1183 llvm::Value *
EmitMemberPointerIsNotNull(CodeGenFunction & CGF,llvm::Value * MemPtr,const MemberPointerType * MPT)1184 ItaniumCXXABI::EmitMemberPointerIsNotNull(CodeGenFunction &CGF,
1185                                           llvm::Value *MemPtr,
1186                                           const MemberPointerType *MPT) {
1187   CGBuilderTy &Builder = CGF.Builder;
1188 
1189   /// For member data pointers, this is just a check against -1.
1190   if (MPT->isMemberDataPointer()) {
1191     assert(MemPtr->getType() == CGM.PtrDiffTy);
1192     llvm::Value *NegativeOne =
1193       llvm::Constant::getAllOnesValue(MemPtr->getType());
1194     return Builder.CreateICmpNE(MemPtr, NegativeOne, "memptr.tobool");
1195   }
1196 
1197   // In Itanium, a member function pointer is not null if 'ptr' is not null.
1198   llvm::Value *Ptr = Builder.CreateExtractValue(MemPtr, 0, "memptr.ptr");
1199 
1200   llvm::Constant *Zero = llvm::ConstantInt::get(Ptr->getType(), 0);
1201   llvm::Value *Result = Builder.CreateICmpNE(Ptr, Zero, "memptr.tobool");
1202 
1203   // On ARM, a member function pointer is also non-null if the low bit of 'adj'
1204   // (the virtual bit) is set.
1205   if (UseARMMethodPtrABI) {
1206     llvm::Constant *One = llvm::ConstantInt::get(Ptr->getType(), 1);
1207     llvm::Value *Adj = Builder.CreateExtractValue(MemPtr, 1, "memptr.adj");
1208     llvm::Value *VirtualBit = Builder.CreateAnd(Adj, One, "memptr.virtualbit");
1209     llvm::Value *IsVirtual = Builder.CreateICmpNE(VirtualBit, Zero,
1210                                                   "memptr.isvirtual");
1211     Result = Builder.CreateOr(Result, IsVirtual);
1212   }
1213 
1214   return Result;
1215 }
1216 
classifyReturnType(CGFunctionInfo & FI) const1217 bool ItaniumCXXABI::classifyReturnType(CGFunctionInfo &FI) const {
1218   const CXXRecordDecl *RD = FI.getReturnType()->getAsCXXRecordDecl();
1219   if (!RD)
1220     return false;
1221 
1222   // If C++ prohibits us from making a copy, return by address.
1223   if (!RD->canPassInRegisters()) {
1224     auto Align = CGM.getContext().getTypeAlignInChars(FI.getReturnType());
1225     FI.getReturnInfo() = ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
1226     return true;
1227   }
1228   return false;
1229 }
1230 
1231 /// The Itanium ABI requires non-zero initialization only for data
1232 /// member pointers, for which '0' is a valid offset.
isZeroInitializable(const MemberPointerType * MPT)1233 bool ItaniumCXXABI::isZeroInitializable(const MemberPointerType *MPT) {
1234   return MPT->isMemberFunctionPointer();
1235 }
1236 
1237 /// The Itanium ABI always places an offset to the complete object
1238 /// at entry -2 in the vtable.
emitVirtualObjectDelete(CodeGenFunction & CGF,const CXXDeleteExpr * DE,Address Ptr,QualType ElementType,const CXXDestructorDecl * Dtor)1239 void ItaniumCXXABI::emitVirtualObjectDelete(CodeGenFunction &CGF,
1240                                             const CXXDeleteExpr *DE,
1241                                             Address Ptr,
1242                                             QualType ElementType,
1243                                             const CXXDestructorDecl *Dtor) {
1244   bool UseGlobalDelete = DE->isGlobalDelete();
1245   if (UseGlobalDelete) {
1246     // Derive the complete-object pointer, which is what we need
1247     // to pass to the deallocation function.
1248 
1249     // Grab the vtable pointer as an intptr_t*.
1250     auto *ClassDecl =
1251         cast<CXXRecordDecl>(ElementType->castAs<RecordType>()->getDecl());
1252     llvm::Value *VTable =
1253         CGF.GetVTablePtr(Ptr, CGF.IntPtrTy->getPointerTo(), ClassDecl);
1254 
1255     // Track back to entry -2 and pull out the offset there.
1256     llvm::Value *OffsetPtr = CGF.Builder.CreateConstInBoundsGEP1_64(
1257         VTable, -2, "complete-offset.ptr");
1258     llvm::Value *Offset = CGF.Builder.CreateAlignedLoad(CGF.IntPtrTy, OffsetPtr,                                                        CGF.getPointerAlign());
1259 
1260     // Apply the offset.
1261     llvm::Value *CompletePtr =
1262       CGF.Builder.CreateBitCast(Ptr.getPointer(), CGF.Int8PtrTy);
1263     CompletePtr =
1264         CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, CompletePtr, Offset);
1265 
1266     // If we're supposed to call the global delete, make sure we do so
1267     // even if the destructor throws.
1268     CGF.pushCallObjectDeleteCleanup(DE->getOperatorDelete(), CompletePtr,
1269                                     ElementType);
1270   }
1271 
1272   // FIXME: Provide a source location here even though there's no
1273   // CXXMemberCallExpr for dtor call.
1274   CXXDtorType DtorType = UseGlobalDelete ? Dtor_Complete : Dtor_Deleting;
1275   EmitVirtualDestructorCall(CGF, Dtor, DtorType, Ptr, DE);
1276 
1277   if (UseGlobalDelete)
1278     CGF.PopCleanupBlock();
1279 }
1280 
emitRethrow(CodeGenFunction & CGF,bool isNoReturn)1281 void ItaniumCXXABI::emitRethrow(CodeGenFunction &CGF, bool isNoReturn) {
1282   // void __cxa_rethrow();
1283 
1284   llvm::FunctionType *FTy =
1285     llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
1286 
1287   llvm::FunctionCallee Fn = CGM.CreateRuntimeFunction(FTy, "__cxa_rethrow");
1288 
1289   if (isNoReturn)
1290     CGF.EmitNoreturnRuntimeCallOrInvoke(Fn, None);
1291   else
1292     CGF.EmitRuntimeCallOrInvoke(Fn);
1293 }
1294 
getAllocateExceptionFn(CodeGenModule & CGM)1295 static llvm::FunctionCallee getAllocateExceptionFn(CodeGenModule &CGM) {
1296   // void *__cxa_allocate_exception(size_t thrown_size);
1297 
1298   llvm::FunctionType *FTy =
1299     llvm::FunctionType::get(CGM.Int8PtrTy, CGM.SizeTy, /*isVarArg=*/false);
1300 
1301   return CGM.CreateRuntimeFunction(FTy, "__cxa_allocate_exception");
1302 }
1303 
getThrowFn(CodeGenModule & CGM)1304 static llvm::FunctionCallee getThrowFn(CodeGenModule &CGM) {
1305   // void __cxa_throw(void *thrown_exception, std::type_info *tinfo,
1306   //                  void (*dest) (void *));
1307 
1308   llvm::Type *Args[3] = { CGM.Int8PtrTy, CGM.Int8PtrTy, CGM.Int8PtrTy };
1309   llvm::FunctionType *FTy =
1310     llvm::FunctionType::get(CGM.VoidTy, Args, /*isVarArg=*/false);
1311 
1312   return CGM.CreateRuntimeFunction(FTy, "__cxa_throw");
1313 }
1314 
emitThrow(CodeGenFunction & CGF,const CXXThrowExpr * E)1315 void ItaniumCXXABI::emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) {
1316   QualType ThrowType = E->getSubExpr()->getType();
1317   // Now allocate the exception object.
1318   llvm::Type *SizeTy = CGF.ConvertType(getContext().getSizeType());
1319   uint64_t TypeSize = getContext().getTypeSizeInChars(ThrowType).getQuantity();
1320 
1321   llvm::FunctionCallee AllocExceptionFn = getAllocateExceptionFn(CGM);
1322   llvm::CallInst *ExceptionPtr = CGF.EmitNounwindRuntimeCall(
1323       AllocExceptionFn, llvm::ConstantInt::get(SizeTy, TypeSize), "exception");
1324 
1325   CharUnits ExnAlign = CGF.getContext().getExnObjectAlignment();
1326   CGF.EmitAnyExprToExn(E->getSubExpr(), Address(ExceptionPtr, ExnAlign));
1327 
1328   // Now throw the exception.
1329   llvm::Constant *TypeInfo = CGM.GetAddrOfRTTIDescriptor(ThrowType,
1330                                                          /*ForEH=*/true);
1331 
1332   // The address of the destructor.  If the exception type has a
1333   // trivial destructor (or isn't a record), we just pass null.
1334   llvm::Constant *Dtor = nullptr;
1335   if (const RecordType *RecordTy = ThrowType->getAs<RecordType>()) {
1336     CXXRecordDecl *Record = cast<CXXRecordDecl>(RecordTy->getDecl());
1337     if (!Record->hasTrivialDestructor()) {
1338       CXXDestructorDecl *DtorD = Record->getDestructor();
1339       Dtor = CGM.getAddrOfCXXStructor(GlobalDecl(DtorD, Dtor_Complete));
1340       Dtor = llvm::ConstantExpr::getBitCast(Dtor, CGM.Int8PtrTy);
1341     }
1342   }
1343   if (!Dtor) Dtor = llvm::Constant::getNullValue(CGM.Int8PtrTy);
1344 
1345   llvm::Value *args[] = { ExceptionPtr, TypeInfo, Dtor };
1346   CGF.EmitNoreturnRuntimeCallOrInvoke(getThrowFn(CGM), args);
1347 }
1348 
getItaniumDynamicCastFn(CodeGenFunction & CGF)1349 static llvm::FunctionCallee getItaniumDynamicCastFn(CodeGenFunction &CGF) {
1350   // void *__dynamic_cast(const void *sub,
1351   //                      const abi::__class_type_info *src,
1352   //                      const abi::__class_type_info *dst,
1353   //                      std::ptrdiff_t src2dst_offset);
1354 
1355   llvm::Type *Int8PtrTy = CGF.Int8PtrTy;
1356   llvm::Type *PtrDiffTy =
1357     CGF.ConvertType(CGF.getContext().getPointerDiffType());
1358 
1359   llvm::Type *Args[4] = { Int8PtrTy, Int8PtrTy, Int8PtrTy, PtrDiffTy };
1360 
1361   llvm::FunctionType *FTy = llvm::FunctionType::get(Int8PtrTy, Args, false);
1362 
1363   // Mark the function as nounwind readonly.
1364   llvm::Attribute::AttrKind FuncAttrs[] = { llvm::Attribute::NoUnwind,
1365                                             llvm::Attribute::ReadOnly };
1366   llvm::AttributeList Attrs = llvm::AttributeList::get(
1367       CGF.getLLVMContext(), llvm::AttributeList::FunctionIndex, FuncAttrs);
1368 
1369   return CGF.CGM.CreateRuntimeFunction(FTy, "__dynamic_cast", Attrs);
1370 }
1371 
getBadCastFn(CodeGenFunction & CGF)1372 static llvm::FunctionCallee getBadCastFn(CodeGenFunction &CGF) {
1373   // void __cxa_bad_cast();
1374   llvm::FunctionType *FTy = llvm::FunctionType::get(CGF.VoidTy, false);
1375   return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_cast");
1376 }
1377 
1378 /// Compute the src2dst_offset hint as described in the
1379 /// Itanium C++ ABI [2.9.7]
computeOffsetHint(ASTContext & Context,const CXXRecordDecl * Src,const CXXRecordDecl * Dst)1380 static CharUnits computeOffsetHint(ASTContext &Context,
1381                                    const CXXRecordDecl *Src,
1382                                    const CXXRecordDecl *Dst) {
1383   CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true,
1384                      /*DetectVirtual=*/false);
1385 
1386   // If Dst is not derived from Src we can skip the whole computation below and
1387   // return that Src is not a public base of Dst.  Record all inheritance paths.
1388   if (!Dst->isDerivedFrom(Src, Paths))
1389     return CharUnits::fromQuantity(-2ULL);
1390 
1391   unsigned NumPublicPaths = 0;
1392   CharUnits Offset;
1393 
1394   // Now walk all possible inheritance paths.
1395   for (const CXXBasePath &Path : Paths) {
1396     if (Path.Access != AS_public)  // Ignore non-public inheritance.
1397       continue;
1398 
1399     ++NumPublicPaths;
1400 
1401     for (const CXXBasePathElement &PathElement : Path) {
1402       // If the path contains a virtual base class we can't give any hint.
1403       // -1: no hint.
1404       if (PathElement.Base->isVirtual())
1405         return CharUnits::fromQuantity(-1ULL);
1406 
1407       if (NumPublicPaths > 1) // Won't use offsets, skip computation.
1408         continue;
1409 
1410       // Accumulate the base class offsets.
1411       const ASTRecordLayout &L = Context.getASTRecordLayout(PathElement.Class);
1412       Offset += L.getBaseClassOffset(
1413           PathElement.Base->getType()->getAsCXXRecordDecl());
1414     }
1415   }
1416 
1417   // -2: Src is not a public base of Dst.
1418   if (NumPublicPaths == 0)
1419     return CharUnits::fromQuantity(-2ULL);
1420 
1421   // -3: Src is a multiple public base type but never a virtual base type.
1422   if (NumPublicPaths > 1)
1423     return CharUnits::fromQuantity(-3ULL);
1424 
1425   // Otherwise, the Src type is a unique public nonvirtual base type of Dst.
1426   // Return the offset of Src from the origin of Dst.
1427   return Offset;
1428 }
1429 
getBadTypeidFn(CodeGenFunction & CGF)1430 static llvm::FunctionCallee getBadTypeidFn(CodeGenFunction &CGF) {
1431   // void __cxa_bad_typeid();
1432   llvm::FunctionType *FTy = llvm::FunctionType::get(CGF.VoidTy, false);
1433 
1434   return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_typeid");
1435 }
1436 
shouldTypeidBeNullChecked(bool IsDeref,QualType SrcRecordTy)1437 bool ItaniumCXXABI::shouldTypeidBeNullChecked(bool IsDeref,
1438                                               QualType SrcRecordTy) {
1439   return IsDeref;
1440 }
1441 
EmitBadTypeidCall(CodeGenFunction & CGF)1442 void ItaniumCXXABI::EmitBadTypeidCall(CodeGenFunction &CGF) {
1443   llvm::FunctionCallee Fn = getBadTypeidFn(CGF);
1444   llvm::CallBase *Call = CGF.EmitRuntimeCallOrInvoke(Fn);
1445   Call->setDoesNotReturn();
1446   CGF.Builder.CreateUnreachable();
1447 }
1448 
EmitTypeid(CodeGenFunction & CGF,QualType SrcRecordTy,Address ThisPtr,llvm::Type * StdTypeInfoPtrTy)1449 llvm::Value *ItaniumCXXABI::EmitTypeid(CodeGenFunction &CGF,
1450                                        QualType SrcRecordTy,
1451                                        Address ThisPtr,
1452                                        llvm::Type *StdTypeInfoPtrTy) {
1453   auto *ClassDecl =
1454       cast<CXXRecordDecl>(SrcRecordTy->castAs<RecordType>()->getDecl());
1455   llvm::Value *Value =
1456       CGF.GetVTablePtr(ThisPtr, StdTypeInfoPtrTy->getPointerTo(), ClassDecl);
1457 
1458   if (CGM.getItaniumVTableContext().isRelativeLayout()) {
1459     // Load the type info.
1460     Value = CGF.Builder.CreateBitCast(Value, CGM.Int8PtrTy);
1461     Value = CGF.Builder.CreateCall(
1462         CGM.getIntrinsic(llvm::Intrinsic::load_relative, {CGM.Int32Ty}),
1463         {Value, llvm::ConstantInt::get(CGM.Int32Ty, -4)});
1464 
1465     // Setup to dereference again since this is a proxy we accessed.
1466     Value = CGF.Builder.CreateBitCast(Value, StdTypeInfoPtrTy->getPointerTo());
1467   } else {
1468     // Load the type info.
1469     Value = CGF.Builder.CreateConstInBoundsGEP1_64(Value, -1ULL);
1470   }
1471   return CGF.Builder.CreateAlignedLoad(StdTypeInfoPtrTy, Value,
1472                                        CGF.getPointerAlign());
1473 }
1474 
shouldDynamicCastCallBeNullChecked(bool SrcIsPtr,QualType SrcRecordTy)1475 bool ItaniumCXXABI::shouldDynamicCastCallBeNullChecked(bool SrcIsPtr,
1476                                                        QualType SrcRecordTy) {
1477   return SrcIsPtr;
1478 }
1479 
EmitDynamicCastCall(CodeGenFunction & CGF,Address ThisAddr,QualType SrcRecordTy,QualType DestTy,QualType DestRecordTy,llvm::BasicBlock * CastEnd)1480 llvm::Value *ItaniumCXXABI::EmitDynamicCastCall(
1481     CodeGenFunction &CGF, Address ThisAddr, QualType SrcRecordTy,
1482     QualType DestTy, QualType DestRecordTy, llvm::BasicBlock *CastEnd) {
1483   llvm::Type *PtrDiffLTy =
1484       CGF.ConvertType(CGF.getContext().getPointerDiffType());
1485   llvm::Type *DestLTy = CGF.ConvertType(DestTy);
1486 
1487   llvm::Value *SrcRTTI =
1488       CGF.CGM.GetAddrOfRTTIDescriptor(SrcRecordTy.getUnqualifiedType());
1489   llvm::Value *DestRTTI =
1490       CGF.CGM.GetAddrOfRTTIDescriptor(DestRecordTy.getUnqualifiedType());
1491 
1492   // Compute the offset hint.
1493   const CXXRecordDecl *SrcDecl = SrcRecordTy->getAsCXXRecordDecl();
1494   const CXXRecordDecl *DestDecl = DestRecordTy->getAsCXXRecordDecl();
1495   llvm::Value *OffsetHint = llvm::ConstantInt::get(
1496       PtrDiffLTy,
1497       computeOffsetHint(CGF.getContext(), SrcDecl, DestDecl).getQuantity());
1498 
1499   // Emit the call to __dynamic_cast.
1500   llvm::Value *Value = ThisAddr.getPointer();
1501   Value = CGF.EmitCastToVoidPtr(Value);
1502 
1503   llvm::Value *args[] = {Value, SrcRTTI, DestRTTI, OffsetHint};
1504   Value = CGF.EmitNounwindRuntimeCall(getItaniumDynamicCastFn(CGF), args);
1505   Value = CGF.Builder.CreateBitCast(Value, DestLTy);
1506 
1507   /// C++ [expr.dynamic.cast]p9:
1508   ///   A failed cast to reference type throws std::bad_cast
1509   if (DestTy->isReferenceType()) {
1510     llvm::BasicBlock *BadCastBlock =
1511         CGF.createBasicBlock("dynamic_cast.bad_cast");
1512 
1513     llvm::Value *IsNull = CGF.Builder.CreateIsNull(Value);
1514     CGF.Builder.CreateCondBr(IsNull, BadCastBlock, CastEnd);
1515 
1516     CGF.EmitBlock(BadCastBlock);
1517     EmitBadCastCall(CGF);
1518   }
1519 
1520   return Value;
1521 }
1522 
EmitDynamicCastToVoid(CodeGenFunction & CGF,Address ThisAddr,QualType SrcRecordTy,QualType DestTy)1523 llvm::Value *ItaniumCXXABI::EmitDynamicCastToVoid(CodeGenFunction &CGF,
1524                                                   Address ThisAddr,
1525                                                   QualType SrcRecordTy,
1526                                                   QualType DestTy) {
1527   llvm::Type *DestLTy = CGF.ConvertType(DestTy);
1528   auto *ClassDecl =
1529       cast<CXXRecordDecl>(SrcRecordTy->castAs<RecordType>()->getDecl());
1530   llvm::Value *OffsetToTop;
1531   if (CGM.getItaniumVTableContext().isRelativeLayout()) {
1532     // Get the vtable pointer.
1533     llvm::Value *VTable =
1534         CGF.GetVTablePtr(ThisAddr, CGM.Int32Ty->getPointerTo(), ClassDecl);
1535 
1536     // Get the offset-to-top from the vtable.
1537     OffsetToTop =
1538         CGF.Builder.CreateConstInBoundsGEP1_32(/*Type=*/nullptr, VTable, -2U);
1539     OffsetToTop = CGF.Builder.CreateAlignedLoad(
1540         CGM.Int32Ty, OffsetToTop, CharUnits::fromQuantity(4), "offset.to.top");
1541   } else {
1542     llvm::Type *PtrDiffLTy =
1543         CGF.ConvertType(CGF.getContext().getPointerDiffType());
1544 
1545     // Get the vtable pointer.
1546     llvm::Value *VTable =
1547         CGF.GetVTablePtr(ThisAddr, PtrDiffLTy->getPointerTo(), ClassDecl);
1548 
1549     // Get the offset-to-top from the vtable.
1550     OffsetToTop = CGF.Builder.CreateConstInBoundsGEP1_64(VTable, -2ULL);
1551     OffsetToTop = CGF.Builder.CreateAlignedLoad(
1552         PtrDiffLTy, OffsetToTop, CGF.getPointerAlign(), "offset.to.top");
1553   }
1554   // Finally, add the offset to the pointer.
1555   llvm::Value *Value = ThisAddr.getPointer();
1556   Value = CGF.EmitCastToVoidPtr(Value);
1557   Value = CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, Value, OffsetToTop);
1558   return CGF.Builder.CreateBitCast(Value, DestLTy);
1559 }
1560 
EmitBadCastCall(CodeGenFunction & CGF)1561 bool ItaniumCXXABI::EmitBadCastCall(CodeGenFunction &CGF) {
1562   llvm::FunctionCallee Fn = getBadCastFn(CGF);
1563   llvm::CallBase *Call = CGF.EmitRuntimeCallOrInvoke(Fn);
1564   Call->setDoesNotReturn();
1565   CGF.Builder.CreateUnreachable();
1566   return true;
1567 }
1568 
1569 llvm::Value *
GetVirtualBaseClassOffset(CodeGenFunction & CGF,Address This,const CXXRecordDecl * ClassDecl,const CXXRecordDecl * BaseClassDecl)1570 ItaniumCXXABI::GetVirtualBaseClassOffset(CodeGenFunction &CGF,
1571                                          Address This,
1572                                          const CXXRecordDecl *ClassDecl,
1573                                          const CXXRecordDecl *BaseClassDecl) {
1574   llvm::Value *VTablePtr = CGF.GetVTablePtr(This, CGM.Int8PtrTy, ClassDecl);
1575   CharUnits VBaseOffsetOffset =
1576       CGM.getItaniumVTableContext().getVirtualBaseOffsetOffset(ClassDecl,
1577                                                                BaseClassDecl);
1578   llvm::Value *VBaseOffsetPtr =
1579     CGF.Builder.CreateConstGEP1_64(VTablePtr, VBaseOffsetOffset.getQuantity(),
1580                                    "vbase.offset.ptr");
1581 
1582   llvm::Value *VBaseOffset;
1583   if (CGM.getItaniumVTableContext().isRelativeLayout()) {
1584     VBaseOffsetPtr =
1585         CGF.Builder.CreateBitCast(VBaseOffsetPtr, CGF.Int32Ty->getPointerTo());
1586     VBaseOffset = CGF.Builder.CreateAlignedLoad(
1587         CGF.Int32Ty, VBaseOffsetPtr, CharUnits::fromQuantity(4),
1588         "vbase.offset");
1589   } else {
1590     VBaseOffsetPtr = CGF.Builder.CreateBitCast(VBaseOffsetPtr,
1591                                                CGM.PtrDiffTy->getPointerTo());
1592     VBaseOffset = CGF.Builder.CreateAlignedLoad(
1593         CGM.PtrDiffTy, VBaseOffsetPtr, CGF.getPointerAlign(), "vbase.offset");
1594   }
1595   return VBaseOffset;
1596 }
1597 
EmitCXXConstructors(const CXXConstructorDecl * D)1598 void ItaniumCXXABI::EmitCXXConstructors(const CXXConstructorDecl *D) {
1599   // Just make sure we're in sync with TargetCXXABI.
1600   assert(CGM.getTarget().getCXXABI().hasConstructorVariants());
1601 
1602   // The constructor used for constructing this as a base class;
1603   // ignores virtual bases.
1604   CGM.EmitGlobal(GlobalDecl(D, Ctor_Base));
1605 
1606   // The constructor used for constructing this as a complete class;
1607   // constructs the virtual bases, then calls the base constructor.
1608   if (!D->getParent()->isAbstract()) {
1609     // We don't need to emit the complete ctor if the class is abstract.
1610     CGM.EmitGlobal(GlobalDecl(D, Ctor_Complete));
1611   }
1612 }
1613 
1614 CGCXXABI::AddedStructorArgCounts
buildStructorSignature(GlobalDecl GD,SmallVectorImpl<CanQualType> & ArgTys)1615 ItaniumCXXABI::buildStructorSignature(GlobalDecl GD,
1616                                       SmallVectorImpl<CanQualType> &ArgTys) {
1617   ASTContext &Context = getContext();
1618 
1619   // All parameters are already in place except VTT, which goes after 'this'.
1620   // These are Clang types, so we don't need to worry about sret yet.
1621 
1622   // Check if we need to add a VTT parameter (which has type void **).
1623   if ((isa<CXXConstructorDecl>(GD.getDecl()) ? GD.getCtorType() == Ctor_Base
1624                                              : GD.getDtorType() == Dtor_Base) &&
1625       cast<CXXMethodDecl>(GD.getDecl())->getParent()->getNumVBases() != 0) {
1626     ArgTys.insert(ArgTys.begin() + 1,
1627                   Context.getPointerType(Context.VoidPtrTy));
1628     return AddedStructorArgCounts::prefix(1);
1629   }
1630   return AddedStructorArgCounts{};
1631 }
1632 
EmitCXXDestructors(const CXXDestructorDecl * D)1633 void ItaniumCXXABI::EmitCXXDestructors(const CXXDestructorDecl *D) {
1634   // The destructor used for destructing this as a base class; ignores
1635   // virtual bases.
1636   CGM.EmitGlobal(GlobalDecl(D, Dtor_Base));
1637 
1638   // The destructor used for destructing this as a most-derived class;
1639   // call the base destructor and then destructs any virtual bases.
1640   CGM.EmitGlobal(GlobalDecl(D, Dtor_Complete));
1641 
1642   // The destructor in a virtual table is always a 'deleting'
1643   // destructor, which calls the complete destructor and then uses the
1644   // appropriate operator delete.
1645   if (D->isVirtual())
1646     CGM.EmitGlobal(GlobalDecl(D, Dtor_Deleting));
1647 }
1648 
addImplicitStructorParams(CodeGenFunction & CGF,QualType & ResTy,FunctionArgList & Params)1649 void ItaniumCXXABI::addImplicitStructorParams(CodeGenFunction &CGF,
1650                                               QualType &ResTy,
1651                                               FunctionArgList &Params) {
1652   const CXXMethodDecl *MD = cast<CXXMethodDecl>(CGF.CurGD.getDecl());
1653   assert(isa<CXXConstructorDecl>(MD) || isa<CXXDestructorDecl>(MD));
1654 
1655   // Check if we need a VTT parameter as well.
1656   if (NeedsVTTParameter(CGF.CurGD)) {
1657     ASTContext &Context = getContext();
1658 
1659     // FIXME: avoid the fake decl
1660     QualType T = Context.getPointerType(Context.VoidPtrTy);
1661     auto *VTTDecl = ImplicitParamDecl::Create(
1662         Context, /*DC=*/nullptr, MD->getLocation(), &Context.Idents.get("vtt"),
1663         T, ImplicitParamDecl::CXXVTT);
1664     Params.insert(Params.begin() + 1, VTTDecl);
1665     getStructorImplicitParamDecl(CGF) = VTTDecl;
1666   }
1667 }
1668 
EmitInstanceFunctionProlog(CodeGenFunction & CGF)1669 void ItaniumCXXABI::EmitInstanceFunctionProlog(CodeGenFunction &CGF) {
1670   // Naked functions have no prolog.
1671   if (CGF.CurFuncDecl && CGF.CurFuncDecl->hasAttr<NakedAttr>())
1672     return;
1673 
1674   /// Initialize the 'this' slot. In the Itanium C++ ABI, no prologue
1675   /// adjustments are required, because they are all handled by thunks.
1676   setCXXABIThisValue(CGF, loadIncomingCXXThis(CGF));
1677 
1678   /// Initialize the 'vtt' slot if needed.
1679   if (getStructorImplicitParamDecl(CGF)) {
1680     getStructorImplicitParamValue(CGF) = CGF.Builder.CreateLoad(
1681         CGF.GetAddrOfLocalVar(getStructorImplicitParamDecl(CGF)), "vtt");
1682   }
1683 
1684   /// If this is a function that the ABI specifies returns 'this', initialize
1685   /// the return slot to 'this' at the start of the function.
1686   ///
1687   /// Unlike the setting of return types, this is done within the ABI
1688   /// implementation instead of by clients of CGCXXABI because:
1689   /// 1) getThisValue is currently protected
1690   /// 2) in theory, an ABI could implement 'this' returns some other way;
1691   ///    HasThisReturn only specifies a contract, not the implementation
1692   if (HasThisReturn(CGF.CurGD))
1693     CGF.Builder.CreateStore(getThisValue(CGF), CGF.ReturnValue);
1694 }
1695 
getImplicitConstructorArgs(CodeGenFunction & CGF,const CXXConstructorDecl * D,CXXCtorType Type,bool ForVirtualBase,bool Delegating)1696 CGCXXABI::AddedStructorArgs ItaniumCXXABI::getImplicitConstructorArgs(
1697     CodeGenFunction &CGF, const CXXConstructorDecl *D, CXXCtorType Type,
1698     bool ForVirtualBase, bool Delegating) {
1699   if (!NeedsVTTParameter(GlobalDecl(D, Type)))
1700     return AddedStructorArgs{};
1701 
1702   // Insert the implicit 'vtt' argument as the second argument.
1703   llvm::Value *VTT =
1704       CGF.GetVTTParameter(GlobalDecl(D, Type), ForVirtualBase, Delegating);
1705   QualType VTTTy = getContext().getPointerType(getContext().VoidPtrTy);
1706   return AddedStructorArgs::prefix({{VTT, VTTTy}});
1707 }
1708 
getCXXDestructorImplicitParam(CodeGenFunction & CGF,const CXXDestructorDecl * DD,CXXDtorType Type,bool ForVirtualBase,bool Delegating)1709 llvm::Value *ItaniumCXXABI::getCXXDestructorImplicitParam(
1710     CodeGenFunction &CGF, const CXXDestructorDecl *DD, CXXDtorType Type,
1711     bool ForVirtualBase, bool Delegating) {
1712   GlobalDecl GD(DD, Type);
1713   return CGF.GetVTTParameter(GD, ForVirtualBase, Delegating);
1714 }
1715 
EmitDestructorCall(CodeGenFunction & CGF,const CXXDestructorDecl * DD,CXXDtorType Type,bool ForVirtualBase,bool Delegating,Address This,QualType ThisTy)1716 void ItaniumCXXABI::EmitDestructorCall(CodeGenFunction &CGF,
1717                                        const CXXDestructorDecl *DD,
1718                                        CXXDtorType Type, bool ForVirtualBase,
1719                                        bool Delegating, Address This,
1720                                        QualType ThisTy) {
1721   GlobalDecl GD(DD, Type);
1722   llvm::Value *VTT =
1723       getCXXDestructorImplicitParam(CGF, DD, Type, ForVirtualBase, Delegating);
1724   QualType VTTTy = getContext().getPointerType(getContext().VoidPtrTy);
1725 
1726   CGCallee Callee;
1727   if (getContext().getLangOpts().AppleKext &&
1728       Type != Dtor_Base && DD->isVirtual())
1729     Callee = CGF.BuildAppleKextVirtualDestructorCall(DD, Type, DD->getParent());
1730   else
1731     Callee = CGCallee::forDirect(CGM.getAddrOfCXXStructor(GD), GD);
1732 
1733   CGF.EmitCXXDestructorCall(GD, Callee, This.getPointer(), ThisTy, VTT, VTTTy,
1734                             nullptr);
1735 }
1736 
emitVTableDefinitions(CodeGenVTables & CGVT,const CXXRecordDecl * RD)1737 void ItaniumCXXABI::emitVTableDefinitions(CodeGenVTables &CGVT,
1738                                           const CXXRecordDecl *RD) {
1739   llvm::GlobalVariable *VTable = getAddrOfVTable(RD, CharUnits());
1740   if (VTable->hasInitializer())
1741     return;
1742 
1743   ItaniumVTableContext &VTContext = CGM.getItaniumVTableContext();
1744   const VTableLayout &VTLayout = VTContext.getVTableLayout(RD);
1745   llvm::GlobalVariable::LinkageTypes Linkage = CGM.getVTableLinkage(RD);
1746   llvm::Constant *RTTI =
1747       CGM.GetAddrOfRTTIDescriptor(CGM.getContext().getTagDeclType(RD));
1748 
1749   // Create and set the initializer.
1750   ConstantInitBuilder builder(CGM);
1751   auto components = builder.beginStruct();
1752   CGVT.createVTableInitializer(components, VTLayout, RTTI,
1753                                llvm::GlobalValue::isLocalLinkage(Linkage));
1754   components.finishAndSetAsInitializer(VTable);
1755 
1756   // Set the correct linkage.
1757   VTable->setLinkage(Linkage);
1758 
1759   if (CGM.supportsCOMDAT() && VTable->isWeakForLinker())
1760     VTable->setComdat(CGM.getModule().getOrInsertComdat(VTable->getName()));
1761 
1762   // Set the right visibility.
1763   CGM.setGVProperties(VTable, RD);
1764 
1765   // If this is the magic class __cxxabiv1::__fundamental_type_info,
1766   // we will emit the typeinfo for the fundamental types. This is the
1767   // same behaviour as GCC.
1768   const DeclContext *DC = RD->getDeclContext();
1769   if (RD->getIdentifier() &&
1770       RD->getIdentifier()->isStr("__fundamental_type_info") &&
1771       isa<NamespaceDecl>(DC) && cast<NamespaceDecl>(DC)->getIdentifier() &&
1772       cast<NamespaceDecl>(DC)->getIdentifier()->isStr("__cxxabiv1") &&
1773       DC->getParent()->isTranslationUnit())
1774     EmitFundamentalRTTIDescriptors(RD);
1775 
1776   // Always emit type metadata on non-available_externally definitions, and on
1777   // available_externally definitions if we are performing whole program
1778   // devirtualization. For WPD we need the type metadata on all vtable
1779   // definitions to ensure we associate derived classes with base classes
1780   // defined in headers but with a strong definition only in a shared library.
1781   if (!VTable->isDeclarationForLinker() ||
1782       CGM.getCodeGenOpts().WholeProgramVTables) {
1783     CGM.EmitVTableTypeMetadata(RD, VTable, VTLayout);
1784     // For available_externally definitions, add the vtable to
1785     // @llvm.compiler.used so that it isn't deleted before whole program
1786     // analysis.
1787     if (VTable->isDeclarationForLinker()) {
1788       assert(CGM.getCodeGenOpts().WholeProgramVTables);
1789       CGM.addCompilerUsedGlobal(VTable);
1790     }
1791   }
1792 
1793   if (VTContext.isRelativeLayout() && !VTable->isDSOLocal())
1794     CGVT.GenerateRelativeVTableAlias(VTable, VTable->getName());
1795 }
1796 
isVirtualOffsetNeededForVTableField(CodeGenFunction & CGF,CodeGenFunction::VPtr Vptr)1797 bool ItaniumCXXABI::isVirtualOffsetNeededForVTableField(
1798     CodeGenFunction &CGF, CodeGenFunction::VPtr Vptr) {
1799   if (Vptr.NearestVBase == nullptr)
1800     return false;
1801   return NeedsVTTParameter(CGF.CurGD);
1802 }
1803 
getVTableAddressPointInStructor(CodeGenFunction & CGF,const CXXRecordDecl * VTableClass,BaseSubobject Base,const CXXRecordDecl * NearestVBase)1804 llvm::Value *ItaniumCXXABI::getVTableAddressPointInStructor(
1805     CodeGenFunction &CGF, const CXXRecordDecl *VTableClass, BaseSubobject Base,
1806     const CXXRecordDecl *NearestVBase) {
1807 
1808   if ((Base.getBase()->getNumVBases() || NearestVBase != nullptr) &&
1809       NeedsVTTParameter(CGF.CurGD)) {
1810     return getVTableAddressPointInStructorWithVTT(CGF, VTableClass, Base,
1811                                                   NearestVBase);
1812   }
1813   return getVTableAddressPoint(Base, VTableClass);
1814 }
1815 
1816 llvm::Constant *
getVTableAddressPoint(BaseSubobject Base,const CXXRecordDecl * VTableClass)1817 ItaniumCXXABI::getVTableAddressPoint(BaseSubobject Base,
1818                                      const CXXRecordDecl *VTableClass) {
1819   llvm::GlobalValue *VTable = getAddrOfVTable(VTableClass, CharUnits());
1820 
1821   // Find the appropriate vtable within the vtable group, and the address point
1822   // within that vtable.
1823   VTableLayout::AddressPointLocation AddressPoint =
1824       CGM.getItaniumVTableContext()
1825           .getVTableLayout(VTableClass)
1826           .getAddressPoint(Base);
1827   llvm::Value *Indices[] = {
1828     llvm::ConstantInt::get(CGM.Int32Ty, 0),
1829     llvm::ConstantInt::get(CGM.Int32Ty, AddressPoint.VTableIndex),
1830     llvm::ConstantInt::get(CGM.Int32Ty, AddressPoint.AddressPointIndex),
1831   };
1832 
1833   return llvm::ConstantExpr::getGetElementPtr(VTable->getValueType(), VTable,
1834                                               Indices, /*InBounds=*/true,
1835                                               /*InRangeIndex=*/1);
1836 }
1837 
1838 // Check whether all the non-inline virtual methods for the class have the
1839 // specified attribute.
1840 template <typename T>
CXXRecordAllNonInlineVirtualsHaveAttr(const CXXRecordDecl * RD)1841 static bool CXXRecordAllNonInlineVirtualsHaveAttr(const CXXRecordDecl *RD) {
1842   bool FoundNonInlineVirtualMethodWithAttr = false;
1843   for (const auto *D : RD->noload_decls()) {
1844     if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
1845       if (!FD->isVirtualAsWritten() || FD->isInlineSpecified() ||
1846           FD->doesThisDeclarationHaveABody())
1847         continue;
1848       if (!D->hasAttr<T>())
1849         return false;
1850       FoundNonInlineVirtualMethodWithAttr = true;
1851     }
1852   }
1853 
1854   // We didn't find any non-inline virtual methods missing the attribute.  We
1855   // will return true when we found at least one non-inline virtual with the
1856   // attribute.  (This lets our caller know that the attribute needs to be
1857   // propagated up to the vtable.)
1858   return FoundNonInlineVirtualMethodWithAttr;
1859 }
1860 
getVTableAddressPointInStructorWithVTT(CodeGenFunction & CGF,const CXXRecordDecl * VTableClass,BaseSubobject Base,const CXXRecordDecl * NearestVBase)1861 llvm::Value *ItaniumCXXABI::getVTableAddressPointInStructorWithVTT(
1862     CodeGenFunction &CGF, const CXXRecordDecl *VTableClass, BaseSubobject Base,
1863     const CXXRecordDecl *NearestVBase) {
1864   assert((Base.getBase()->getNumVBases() || NearestVBase != nullptr) &&
1865          NeedsVTTParameter(CGF.CurGD) && "This class doesn't have VTT");
1866 
1867   // Get the secondary vpointer index.
1868   uint64_t VirtualPointerIndex =
1869       CGM.getVTables().getSecondaryVirtualPointerIndex(VTableClass, Base);
1870 
1871   /// Load the VTT.
1872   llvm::Value *VTT = CGF.LoadCXXVTT();
1873   if (VirtualPointerIndex)
1874     VTT = CGF.Builder.CreateConstInBoundsGEP1_64(VTT, VirtualPointerIndex);
1875 
1876   // And load the address point from the VTT.
1877   return CGF.Builder.CreateAlignedLoad(CGF.VoidPtrTy, VTT,
1878                                        CGF.getPointerAlign());
1879 }
1880 
getVTableAddressPointForConstExpr(BaseSubobject Base,const CXXRecordDecl * VTableClass)1881 llvm::Constant *ItaniumCXXABI::getVTableAddressPointForConstExpr(
1882     BaseSubobject Base, const CXXRecordDecl *VTableClass) {
1883   return getVTableAddressPoint(Base, VTableClass);
1884 }
1885 
getAddrOfVTable(const CXXRecordDecl * RD,CharUnits VPtrOffset)1886 llvm::GlobalVariable *ItaniumCXXABI::getAddrOfVTable(const CXXRecordDecl *RD,
1887                                                      CharUnits VPtrOffset) {
1888   assert(VPtrOffset.isZero() && "Itanium ABI only supports zero vptr offsets");
1889 
1890   llvm::GlobalVariable *&VTable = VTables[RD];
1891   if (VTable)
1892     return VTable;
1893 
1894   // Queue up this vtable for possible deferred emission.
1895   CGM.addDeferredVTable(RD);
1896 
1897   SmallString<256> Name;
1898   llvm::raw_svector_ostream Out(Name);
1899   getMangleContext().mangleCXXVTable(RD, Out);
1900 
1901   const VTableLayout &VTLayout =
1902       CGM.getItaniumVTableContext().getVTableLayout(RD);
1903   llvm::Type *VTableType = CGM.getVTables().getVTableType(VTLayout);
1904 
1905   // Use pointer alignment for the vtable. Otherwise we would align them based
1906   // on the size of the initializer which doesn't make sense as only single
1907   // values are read.
1908   unsigned PAlign = CGM.getItaniumVTableContext().isRelativeLayout()
1909                         ? 32
1910                         : CGM.getTarget().getPointerAlign(0);
1911 
1912   VTable = CGM.CreateOrReplaceCXXRuntimeVariable(
1913       Name, VTableType, llvm::GlobalValue::ExternalLinkage,
1914       getContext().toCharUnitsFromBits(PAlign).getQuantity());
1915   VTable->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
1916 
1917   // In MS C++ if you have a class with virtual functions in which you are using
1918   // selective member import/export, then all virtual functions must be exported
1919   // unless they are inline, otherwise a link error will result. To match this
1920   // behavior, for such classes, we dllimport the vtable if it is defined
1921   // externally and all the non-inline virtual methods are marked dllimport, and
1922   // we dllexport the vtable if it is defined in this TU and all the non-inline
1923   // virtual methods are marked dllexport.
1924   if (CGM.getTarget().hasPS4DLLImportExport()) {
1925     if ((!RD->hasAttr<DLLImportAttr>()) && (!RD->hasAttr<DLLExportAttr>())) {
1926       if (CGM.getVTables().isVTableExternal(RD)) {
1927         if (CXXRecordAllNonInlineVirtualsHaveAttr<DLLImportAttr>(RD))
1928           VTable->setDLLStorageClass(llvm::GlobalValue::DLLImportStorageClass);
1929       } else {
1930         if (CXXRecordAllNonInlineVirtualsHaveAttr<DLLExportAttr>(RD))
1931           VTable->setDLLStorageClass(llvm::GlobalValue::DLLExportStorageClass);
1932       }
1933     }
1934   }
1935   CGM.setGVProperties(VTable, RD);
1936 
1937   return VTable;
1938 }
1939 
getVirtualFunctionPointer(CodeGenFunction & CGF,GlobalDecl GD,Address This,llvm::Type * Ty,SourceLocation Loc)1940 CGCallee ItaniumCXXABI::getVirtualFunctionPointer(CodeGenFunction &CGF,
1941                                                   GlobalDecl GD,
1942                                                   Address This,
1943                                                   llvm::Type *Ty,
1944                                                   SourceLocation Loc) {
1945   auto *MethodDecl = cast<CXXMethodDecl>(GD.getDecl());
1946   llvm::Value *VTable = CGF.GetVTablePtr(
1947       This, Ty->getPointerTo()->getPointerTo(), MethodDecl->getParent());
1948 
1949   uint64_t VTableIndex = CGM.getItaniumVTableContext().getMethodVTableIndex(GD);
1950   llvm::Value *VFunc;
1951   if (CGF.ShouldEmitVTableTypeCheckedLoad(MethodDecl->getParent())) {
1952     VFunc = CGF.EmitVTableTypeCheckedLoad(
1953         MethodDecl->getParent(), VTable,
1954         VTableIndex * CGM.getContext().getTargetInfo().getPointerWidth(0) / 8);
1955   } else {
1956     CGF.EmitTypeMetadataCodeForVCall(MethodDecl->getParent(), VTable, Loc);
1957 
1958     llvm::Value *VFuncLoad;
1959     if (CGM.getItaniumVTableContext().isRelativeLayout()) {
1960       VTable = CGF.Builder.CreateBitCast(VTable, CGM.Int8PtrTy);
1961       llvm::Value *Load = CGF.Builder.CreateCall(
1962           CGM.getIntrinsic(llvm::Intrinsic::load_relative, {CGM.Int32Ty}),
1963           {VTable, llvm::ConstantInt::get(CGM.Int32Ty, 4 * VTableIndex)});
1964       VFuncLoad = CGF.Builder.CreateBitCast(Load, Ty->getPointerTo());
1965     } else {
1966       VTable =
1967           CGF.Builder.CreateBitCast(VTable, Ty->getPointerTo()->getPointerTo());
1968       llvm::Value *VTableSlotPtr =
1969           CGF.Builder.CreateConstInBoundsGEP1_64(VTable, VTableIndex, "vfn");
1970       VFuncLoad =
1971           CGF.Builder.CreateAlignedLoad(Ty->getPointerTo(), VTableSlotPtr,
1972                                         CGF.getPointerAlign());
1973     }
1974 
1975     // Add !invariant.load md to virtual function load to indicate that
1976     // function didn't change inside vtable.
1977     // It's safe to add it without -fstrict-vtable-pointers, but it would not
1978     // help in devirtualization because it will only matter if we will have 2
1979     // the same virtual function loads from the same vtable load, which won't
1980     // happen without enabled devirtualization with -fstrict-vtable-pointers.
1981     if (CGM.getCodeGenOpts().OptimizationLevel > 0 &&
1982         CGM.getCodeGenOpts().StrictVTablePointers) {
1983       if (auto *VFuncLoadInstr = dyn_cast<llvm::Instruction>(VFuncLoad)) {
1984         VFuncLoadInstr->setMetadata(
1985             llvm::LLVMContext::MD_invariant_load,
1986             llvm::MDNode::get(CGM.getLLVMContext(),
1987                               llvm::ArrayRef<llvm::Metadata *>()));
1988       }
1989     }
1990     VFunc = VFuncLoad;
1991   }
1992 
1993   CGCallee Callee(GD, VFunc);
1994   return Callee;
1995 }
1996 
EmitVirtualDestructorCall(CodeGenFunction & CGF,const CXXDestructorDecl * Dtor,CXXDtorType DtorType,Address This,DeleteOrMemberCallExpr E)1997 llvm::Value *ItaniumCXXABI::EmitVirtualDestructorCall(
1998     CodeGenFunction &CGF, const CXXDestructorDecl *Dtor, CXXDtorType DtorType,
1999     Address This, DeleteOrMemberCallExpr E) {
2000   auto *CE = E.dyn_cast<const CXXMemberCallExpr *>();
2001   auto *D = E.dyn_cast<const CXXDeleteExpr *>();
2002   assert((CE != nullptr) ^ (D != nullptr));
2003   assert(CE == nullptr || CE->arg_begin() == CE->arg_end());
2004   assert(DtorType == Dtor_Deleting || DtorType == Dtor_Complete);
2005 
2006   GlobalDecl GD(Dtor, DtorType);
2007   const CGFunctionInfo *FInfo =
2008       &CGM.getTypes().arrangeCXXStructorDeclaration(GD);
2009   llvm::FunctionType *Ty = CGF.CGM.getTypes().GetFunctionType(*FInfo);
2010   CGCallee Callee = CGCallee::forVirtual(CE, GD, This, Ty);
2011 
2012   QualType ThisTy;
2013   if (CE) {
2014     ThisTy = CE->getObjectType();
2015   } else {
2016     ThisTy = D->getDestroyedType();
2017   }
2018 
2019   CGF.EmitCXXDestructorCall(GD, Callee, This.getPointer(), ThisTy, nullptr,
2020                             QualType(), nullptr);
2021   return nullptr;
2022 }
2023 
emitVirtualInheritanceTables(const CXXRecordDecl * RD)2024 void ItaniumCXXABI::emitVirtualInheritanceTables(const CXXRecordDecl *RD) {
2025   CodeGenVTables &VTables = CGM.getVTables();
2026   llvm::GlobalVariable *VTT = VTables.GetAddrOfVTT(RD);
2027   VTables.EmitVTTDefinition(VTT, CGM.getVTableLinkage(RD), RD);
2028 }
2029 
canSpeculativelyEmitVTableAsBaseClass(const CXXRecordDecl * RD) const2030 bool ItaniumCXXABI::canSpeculativelyEmitVTableAsBaseClass(
2031     const CXXRecordDecl *RD) const {
2032   // We don't emit available_externally vtables if we are in -fapple-kext mode
2033   // because kext mode does not permit devirtualization.
2034   if (CGM.getLangOpts().AppleKext)
2035     return false;
2036 
2037   // If the vtable is hidden then it is not safe to emit an available_externally
2038   // copy of vtable.
2039   if (isVTableHidden(RD))
2040     return false;
2041 
2042   if (CGM.getCodeGenOpts().ForceEmitVTables)
2043     return true;
2044 
2045   // If we don't have any not emitted inline virtual function then we are safe
2046   // to emit an available_externally copy of vtable.
2047   // FIXME we can still emit a copy of the vtable if we
2048   // can emit definition of the inline functions.
2049   if (hasAnyUnusedVirtualInlineFunction(RD))
2050     return false;
2051 
2052   // For a class with virtual bases, we must also be able to speculatively
2053   // emit the VTT, because CodeGen doesn't have separate notions of "can emit
2054   // the vtable" and "can emit the VTT". For a base subobject, this means we
2055   // need to be able to emit non-virtual base vtables.
2056   if (RD->getNumVBases()) {
2057     for (const auto &B : RD->bases()) {
2058       auto *BRD = B.getType()->getAsCXXRecordDecl();
2059       assert(BRD && "no class for base specifier");
2060       if (B.isVirtual() || !BRD->isDynamicClass())
2061         continue;
2062       if (!canSpeculativelyEmitVTableAsBaseClass(BRD))
2063         return false;
2064     }
2065   }
2066 
2067   return true;
2068 }
2069 
canSpeculativelyEmitVTable(const CXXRecordDecl * RD) const2070 bool ItaniumCXXABI::canSpeculativelyEmitVTable(const CXXRecordDecl *RD) const {
2071   if (!canSpeculativelyEmitVTableAsBaseClass(RD))
2072     return false;
2073 
2074   // For a complete-object vtable (or more specifically, for the VTT), we need
2075   // to be able to speculatively emit the vtables of all dynamic virtual bases.
2076   for (const auto &B : RD->vbases()) {
2077     auto *BRD = B.getType()->getAsCXXRecordDecl();
2078     assert(BRD && "no class for base specifier");
2079     if (!BRD->isDynamicClass())
2080       continue;
2081     if (!canSpeculativelyEmitVTableAsBaseClass(BRD))
2082       return false;
2083   }
2084 
2085   return true;
2086 }
performTypeAdjustment(CodeGenFunction & CGF,Address InitialPtr,int64_t NonVirtualAdjustment,int64_t VirtualAdjustment,bool IsReturnAdjustment)2087 static llvm::Value *performTypeAdjustment(CodeGenFunction &CGF,
2088                                           Address InitialPtr,
2089                                           int64_t NonVirtualAdjustment,
2090                                           int64_t VirtualAdjustment,
2091                                           bool IsReturnAdjustment) {
2092   if (!NonVirtualAdjustment && !VirtualAdjustment)
2093     return InitialPtr.getPointer();
2094 
2095   Address V = CGF.Builder.CreateElementBitCast(InitialPtr, CGF.Int8Ty);
2096 
2097   // In a base-to-derived cast, the non-virtual adjustment is applied first.
2098   if (NonVirtualAdjustment && !IsReturnAdjustment) {
2099     V = CGF.Builder.CreateConstInBoundsByteGEP(V,
2100                               CharUnits::fromQuantity(NonVirtualAdjustment));
2101   }
2102 
2103   // Perform the virtual adjustment if we have one.
2104   llvm::Value *ResultPtr;
2105   if (VirtualAdjustment) {
2106     Address VTablePtrPtr = CGF.Builder.CreateElementBitCast(V, CGF.Int8PtrTy);
2107     llvm::Value *VTablePtr = CGF.Builder.CreateLoad(VTablePtrPtr);
2108 
2109     llvm::Value *Offset;
2110     llvm::Value *OffsetPtr =
2111         CGF.Builder.CreateConstInBoundsGEP1_64(VTablePtr, VirtualAdjustment);
2112     if (CGF.CGM.getItaniumVTableContext().isRelativeLayout()) {
2113       // Load the adjustment offset from the vtable as a 32-bit int.
2114       OffsetPtr =
2115           CGF.Builder.CreateBitCast(OffsetPtr, CGF.Int32Ty->getPointerTo());
2116       Offset =
2117           CGF.Builder.CreateAlignedLoad(CGF.Int32Ty, OffsetPtr,
2118                                         CharUnits::fromQuantity(4));
2119     } else {
2120       llvm::Type *PtrDiffTy =
2121           CGF.ConvertType(CGF.getContext().getPointerDiffType());
2122 
2123       OffsetPtr =
2124           CGF.Builder.CreateBitCast(OffsetPtr, PtrDiffTy->getPointerTo());
2125 
2126       // Load the adjustment offset from the vtable.
2127       Offset = CGF.Builder.CreateAlignedLoad(PtrDiffTy, OffsetPtr,
2128                                              CGF.getPointerAlign());
2129     }
2130     // Adjust our pointer.
2131     ResultPtr = CGF.Builder.CreateInBoundsGEP(
2132         V.getElementType(), V.getPointer(), Offset);
2133   } else {
2134     ResultPtr = V.getPointer();
2135   }
2136 
2137   // In a derived-to-base conversion, the non-virtual adjustment is
2138   // applied second.
2139   if (NonVirtualAdjustment && IsReturnAdjustment) {
2140     ResultPtr = CGF.Builder.CreateConstInBoundsGEP1_64(ResultPtr,
2141                                                        NonVirtualAdjustment);
2142   }
2143 
2144   // Cast back to the original type.
2145   return CGF.Builder.CreateBitCast(ResultPtr, InitialPtr.getType());
2146 }
2147 
performThisAdjustment(CodeGenFunction & CGF,Address This,const ThisAdjustment & TA)2148 llvm::Value *ItaniumCXXABI::performThisAdjustment(CodeGenFunction &CGF,
2149                                                   Address This,
2150                                                   const ThisAdjustment &TA) {
2151   return performTypeAdjustment(CGF, This, TA.NonVirtual,
2152                                TA.Virtual.Itanium.VCallOffsetOffset,
2153                                /*IsReturnAdjustment=*/false);
2154 }
2155 
2156 llvm::Value *
performReturnAdjustment(CodeGenFunction & CGF,Address Ret,const ReturnAdjustment & RA)2157 ItaniumCXXABI::performReturnAdjustment(CodeGenFunction &CGF, Address Ret,
2158                                        const ReturnAdjustment &RA) {
2159   return performTypeAdjustment(CGF, Ret, RA.NonVirtual,
2160                                RA.Virtual.Itanium.VBaseOffsetOffset,
2161                                /*IsReturnAdjustment=*/true);
2162 }
2163 
EmitReturnFromThunk(CodeGenFunction & CGF,RValue RV,QualType ResultType)2164 void ARMCXXABI::EmitReturnFromThunk(CodeGenFunction &CGF,
2165                                     RValue RV, QualType ResultType) {
2166   if (!isa<CXXDestructorDecl>(CGF.CurGD.getDecl()))
2167     return ItaniumCXXABI::EmitReturnFromThunk(CGF, RV, ResultType);
2168 
2169   // Destructor thunks in the ARM ABI have indeterminate results.
2170   llvm::Type *T = CGF.ReturnValue.getElementType();
2171   RValue Undef = RValue::get(llvm::UndefValue::get(T));
2172   return ItaniumCXXABI::EmitReturnFromThunk(CGF, Undef, ResultType);
2173 }
2174 
2175 /************************** Array allocation cookies **************************/
2176 
getArrayCookieSizeImpl(QualType elementType)2177 CharUnits ItaniumCXXABI::getArrayCookieSizeImpl(QualType elementType) {
2178   // The array cookie is a size_t; pad that up to the element alignment.
2179   // The cookie is actually right-justified in that space.
2180   return std::max(CharUnits::fromQuantity(CGM.SizeSizeInBytes),
2181                   CGM.getContext().getPreferredTypeAlignInChars(elementType));
2182 }
2183 
InitializeArrayCookie(CodeGenFunction & CGF,Address NewPtr,llvm::Value * NumElements,const CXXNewExpr * expr,QualType ElementType)2184 Address ItaniumCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
2185                                              Address NewPtr,
2186                                              llvm::Value *NumElements,
2187                                              const CXXNewExpr *expr,
2188                                              QualType ElementType) {
2189   assert(requiresArrayCookie(expr));
2190 
2191   unsigned AS = NewPtr.getAddressSpace();
2192 
2193   ASTContext &Ctx = getContext();
2194   CharUnits SizeSize = CGF.getSizeSize();
2195 
2196   // The size of the cookie.
2197   CharUnits CookieSize =
2198       std::max(SizeSize, Ctx.getPreferredTypeAlignInChars(ElementType));
2199   assert(CookieSize == getArrayCookieSizeImpl(ElementType));
2200 
2201   // Compute an offset to the cookie.
2202   Address CookiePtr = NewPtr;
2203   CharUnits CookieOffset = CookieSize - SizeSize;
2204   if (!CookieOffset.isZero())
2205     CookiePtr = CGF.Builder.CreateConstInBoundsByteGEP(CookiePtr, CookieOffset);
2206 
2207   // Write the number of elements into the appropriate slot.
2208   Address NumElementsPtr =
2209       CGF.Builder.CreateElementBitCast(CookiePtr, CGF.SizeTy);
2210   llvm::Instruction *SI = CGF.Builder.CreateStore(NumElements, NumElementsPtr);
2211 
2212   // Handle the array cookie specially in ASan.
2213   if (CGM.getLangOpts().Sanitize.has(SanitizerKind::Address) && AS == 0 &&
2214       (expr->getOperatorNew()->isReplaceableGlobalAllocationFunction() ||
2215        CGM.getCodeGenOpts().SanitizeAddressPoisonCustomArrayCookie)) {
2216     // The store to the CookiePtr does not need to be instrumented.
2217     CGM.getSanitizerMetadata()->disableSanitizerForInstruction(SI);
2218     llvm::FunctionType *FTy =
2219         llvm::FunctionType::get(CGM.VoidTy, NumElementsPtr.getType(), false);
2220     llvm::FunctionCallee F =
2221         CGM.CreateRuntimeFunction(FTy, "__asan_poison_cxx_array_cookie");
2222     CGF.Builder.CreateCall(F, NumElementsPtr.getPointer());
2223   }
2224 
2225   // Finally, compute a pointer to the actual data buffer by skipping
2226   // over the cookie completely.
2227   return CGF.Builder.CreateConstInBoundsByteGEP(NewPtr, CookieSize);
2228 }
2229 
readArrayCookieImpl(CodeGenFunction & CGF,Address allocPtr,CharUnits cookieSize)2230 llvm::Value *ItaniumCXXABI::readArrayCookieImpl(CodeGenFunction &CGF,
2231                                                 Address allocPtr,
2232                                                 CharUnits cookieSize) {
2233   // The element size is right-justified in the cookie.
2234   Address numElementsPtr = allocPtr;
2235   CharUnits numElementsOffset = cookieSize - CGF.getSizeSize();
2236   if (!numElementsOffset.isZero())
2237     numElementsPtr =
2238       CGF.Builder.CreateConstInBoundsByteGEP(numElementsPtr, numElementsOffset);
2239 
2240   unsigned AS = allocPtr.getAddressSpace();
2241   numElementsPtr = CGF.Builder.CreateElementBitCast(numElementsPtr, CGF.SizeTy);
2242   if (!CGM.getLangOpts().Sanitize.has(SanitizerKind::Address) || AS != 0)
2243     return CGF.Builder.CreateLoad(numElementsPtr);
2244   // In asan mode emit a function call instead of a regular load and let the
2245   // run-time deal with it: if the shadow is properly poisoned return the
2246   // cookie, otherwise return 0 to avoid an infinite loop calling DTORs.
2247   // We can't simply ignore this load using nosanitize metadata because
2248   // the metadata may be lost.
2249   llvm::FunctionType *FTy =
2250       llvm::FunctionType::get(CGF.SizeTy, CGF.SizeTy->getPointerTo(0), false);
2251   llvm::FunctionCallee F =
2252       CGM.CreateRuntimeFunction(FTy, "__asan_load_cxx_array_cookie");
2253   return CGF.Builder.CreateCall(F, numElementsPtr.getPointer());
2254 }
2255 
getArrayCookieSizeImpl(QualType elementType)2256 CharUnits ARMCXXABI::getArrayCookieSizeImpl(QualType elementType) {
2257   // ARM says that the cookie is always:
2258   //   struct array_cookie {
2259   //     std::size_t element_size; // element_size != 0
2260   //     std::size_t element_count;
2261   //   };
2262   // But the base ABI doesn't give anything an alignment greater than
2263   // 8, so we can dismiss this as typical ABI-author blindness to
2264   // actual language complexity and round up to the element alignment.
2265   return std::max(CharUnits::fromQuantity(2 * CGM.SizeSizeInBytes),
2266                   CGM.getContext().getTypeAlignInChars(elementType));
2267 }
2268 
InitializeArrayCookie(CodeGenFunction & CGF,Address newPtr,llvm::Value * numElements,const CXXNewExpr * expr,QualType elementType)2269 Address ARMCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
2270                                          Address newPtr,
2271                                          llvm::Value *numElements,
2272                                          const CXXNewExpr *expr,
2273                                          QualType elementType) {
2274   assert(requiresArrayCookie(expr));
2275 
2276   // The cookie is always at the start of the buffer.
2277   Address cookie = newPtr;
2278 
2279   // The first element is the element size.
2280   cookie = CGF.Builder.CreateElementBitCast(cookie, CGF.SizeTy);
2281   llvm::Value *elementSize = llvm::ConstantInt::get(CGF.SizeTy,
2282                  getContext().getTypeSizeInChars(elementType).getQuantity());
2283   CGF.Builder.CreateStore(elementSize, cookie);
2284 
2285   // The second element is the element count.
2286   cookie = CGF.Builder.CreateConstInBoundsGEP(cookie, 1);
2287   CGF.Builder.CreateStore(numElements, cookie);
2288 
2289   // Finally, compute a pointer to the actual data buffer by skipping
2290   // over the cookie completely.
2291   CharUnits cookieSize = ARMCXXABI::getArrayCookieSizeImpl(elementType);
2292   return CGF.Builder.CreateConstInBoundsByteGEP(newPtr, cookieSize);
2293 }
2294 
readArrayCookieImpl(CodeGenFunction & CGF,Address allocPtr,CharUnits cookieSize)2295 llvm::Value *ARMCXXABI::readArrayCookieImpl(CodeGenFunction &CGF,
2296                                             Address allocPtr,
2297                                             CharUnits cookieSize) {
2298   // The number of elements is at offset sizeof(size_t) relative to
2299   // the allocated pointer.
2300   Address numElementsPtr
2301     = CGF.Builder.CreateConstInBoundsByteGEP(allocPtr, CGF.getSizeSize());
2302 
2303   numElementsPtr = CGF.Builder.CreateElementBitCast(numElementsPtr, CGF.SizeTy);
2304   return CGF.Builder.CreateLoad(numElementsPtr);
2305 }
2306 
2307 /*********************** Static local initialization **************************/
2308 
getGuardAcquireFn(CodeGenModule & CGM,llvm::PointerType * GuardPtrTy)2309 static llvm::FunctionCallee getGuardAcquireFn(CodeGenModule &CGM,
2310                                               llvm::PointerType *GuardPtrTy) {
2311   // int __cxa_guard_acquire(__guard *guard_object);
2312   llvm::FunctionType *FTy =
2313     llvm::FunctionType::get(CGM.getTypes().ConvertType(CGM.getContext().IntTy),
2314                             GuardPtrTy, /*isVarArg=*/false);
2315   return CGM.CreateRuntimeFunction(
2316       FTy, "__cxa_guard_acquire",
2317       llvm::AttributeList::get(CGM.getLLVMContext(),
2318                                llvm::AttributeList::FunctionIndex,
2319                                llvm::Attribute::NoUnwind));
2320 }
2321 
getGuardReleaseFn(CodeGenModule & CGM,llvm::PointerType * GuardPtrTy)2322 static llvm::FunctionCallee getGuardReleaseFn(CodeGenModule &CGM,
2323                                               llvm::PointerType *GuardPtrTy) {
2324   // void __cxa_guard_release(__guard *guard_object);
2325   llvm::FunctionType *FTy =
2326     llvm::FunctionType::get(CGM.VoidTy, GuardPtrTy, /*isVarArg=*/false);
2327   return CGM.CreateRuntimeFunction(
2328       FTy, "__cxa_guard_release",
2329       llvm::AttributeList::get(CGM.getLLVMContext(),
2330                                llvm::AttributeList::FunctionIndex,
2331                                llvm::Attribute::NoUnwind));
2332 }
2333 
getGuardAbortFn(CodeGenModule & CGM,llvm::PointerType * GuardPtrTy)2334 static llvm::FunctionCallee getGuardAbortFn(CodeGenModule &CGM,
2335                                             llvm::PointerType *GuardPtrTy) {
2336   // void __cxa_guard_abort(__guard *guard_object);
2337   llvm::FunctionType *FTy =
2338     llvm::FunctionType::get(CGM.VoidTy, GuardPtrTy, /*isVarArg=*/false);
2339   return CGM.CreateRuntimeFunction(
2340       FTy, "__cxa_guard_abort",
2341       llvm::AttributeList::get(CGM.getLLVMContext(),
2342                                llvm::AttributeList::FunctionIndex,
2343                                llvm::Attribute::NoUnwind));
2344 }
2345 
2346 namespace {
2347   struct CallGuardAbort final : EHScopeStack::Cleanup {
2348     llvm::GlobalVariable *Guard;
CallGuardAbort__anon87ecd9350211::CallGuardAbort2349     CallGuardAbort(llvm::GlobalVariable *Guard) : Guard(Guard) {}
2350 
Emit__anon87ecd9350211::CallGuardAbort2351     void Emit(CodeGenFunction &CGF, Flags flags) override {
2352       CGF.EmitNounwindRuntimeCall(getGuardAbortFn(CGF.CGM, Guard->getType()),
2353                                   Guard);
2354     }
2355   };
2356 }
2357 
2358 /// The ARM code here follows the Itanium code closely enough that we
2359 /// just special-case it at particular places.
EmitGuardedInit(CodeGenFunction & CGF,const VarDecl & D,llvm::GlobalVariable * var,bool shouldPerformInit)2360 void ItaniumCXXABI::EmitGuardedInit(CodeGenFunction &CGF,
2361                                     const VarDecl &D,
2362                                     llvm::GlobalVariable *var,
2363                                     bool shouldPerformInit) {
2364   CGBuilderTy &Builder = CGF.Builder;
2365 
2366   // Inline variables that weren't instantiated from variable templates have
2367   // partially-ordered initialization within their translation unit.
2368   bool NonTemplateInline =
2369       D.isInline() &&
2370       !isTemplateInstantiation(D.getTemplateSpecializationKind());
2371 
2372   // We only need to use thread-safe statics for local non-TLS variables and
2373   // inline variables; other global initialization is always single-threaded
2374   // or (through lazy dynamic loading in multiple threads) unsequenced.
2375   bool threadsafe = getContext().getLangOpts().ThreadsafeStatics &&
2376                     (D.isLocalVarDecl() || NonTemplateInline) &&
2377                     !D.getTLSKind();
2378 
2379   // If we have a global variable with internal linkage and thread-safe statics
2380   // are disabled, we can just let the guard variable be of type i8.
2381   bool useInt8GuardVariable = !threadsafe && var->hasInternalLinkage();
2382 
2383   llvm::IntegerType *guardTy;
2384   CharUnits guardAlignment;
2385   if (useInt8GuardVariable) {
2386     guardTy = CGF.Int8Ty;
2387     guardAlignment = CharUnits::One();
2388   } else {
2389     // Guard variables are 64 bits in the generic ABI and size width on ARM
2390     // (i.e. 32-bit on AArch32, 64-bit on AArch64).
2391     if (UseARMGuardVarABI) {
2392       guardTy = CGF.SizeTy;
2393       guardAlignment = CGF.getSizeAlign();
2394     } else {
2395       guardTy = CGF.Int64Ty;
2396       guardAlignment = CharUnits::fromQuantity(
2397                              CGM.getDataLayout().getABITypeAlignment(guardTy));
2398     }
2399   }
2400   llvm::PointerType *guardPtrTy = guardTy->getPointerTo(
2401       CGF.CGM.getDataLayout().getDefaultGlobalsAddressSpace());
2402 
2403   // Create the guard variable if we don't already have it (as we
2404   // might if we're double-emitting this function body).
2405   llvm::GlobalVariable *guard = CGM.getStaticLocalDeclGuardAddress(&D);
2406   if (!guard) {
2407     // Mangle the name for the guard.
2408     SmallString<256> guardName;
2409     {
2410       llvm::raw_svector_ostream out(guardName);
2411       getMangleContext().mangleStaticGuardVariable(&D, out);
2412     }
2413 
2414     // Create the guard variable with a zero-initializer.
2415     // Just absorb linkage and visibility from the guarded variable.
2416     guard = new llvm::GlobalVariable(CGM.getModule(), guardTy,
2417                                      false, var->getLinkage(),
2418                                      llvm::ConstantInt::get(guardTy, 0),
2419                                      guardName.str());
2420     guard->setDSOLocal(var->isDSOLocal());
2421     guard->setVisibility(var->getVisibility());
2422     // If the variable is thread-local, so is its guard variable.
2423     guard->setThreadLocalMode(var->getThreadLocalMode());
2424     guard->setAlignment(guardAlignment.getAsAlign());
2425 
2426     // The ABI says: "It is suggested that it be emitted in the same COMDAT
2427     // group as the associated data object." In practice, this doesn't work for
2428     // non-ELF and non-Wasm object formats, so only do it for ELF and Wasm.
2429     llvm::Comdat *C = var->getComdat();
2430     if (!D.isLocalVarDecl() && C &&
2431         (CGM.getTarget().getTriple().isOSBinFormatELF() ||
2432          CGM.getTarget().getTriple().isOSBinFormatWasm())) {
2433       guard->setComdat(C);
2434       // An inline variable's guard function is run from the per-TU
2435       // initialization function, not via a dedicated global ctor function, so
2436       // we can't put it in a comdat.
2437       if (!NonTemplateInline)
2438         CGF.CurFn->setComdat(C);
2439     } else if (CGM.supportsCOMDAT() && guard->isWeakForLinker()) {
2440       guard->setComdat(CGM.getModule().getOrInsertComdat(guard->getName()));
2441     }
2442 
2443     CGM.setStaticLocalDeclGuardAddress(&D, guard);
2444   }
2445 
2446   Address guardAddr = Address(guard, guardAlignment);
2447 
2448   // Test whether the variable has completed initialization.
2449   //
2450   // Itanium C++ ABI 3.3.2:
2451   //   The following is pseudo-code showing how these functions can be used:
2452   //     if (obj_guard.first_byte == 0) {
2453   //       if ( __cxa_guard_acquire (&obj_guard) ) {
2454   //         try {
2455   //           ... initialize the object ...;
2456   //         } catch (...) {
2457   //            __cxa_guard_abort (&obj_guard);
2458   //            throw;
2459   //         }
2460   //         ... queue object destructor with __cxa_atexit() ...;
2461   //         __cxa_guard_release (&obj_guard);
2462   //       }
2463   //     }
2464 
2465   // Load the first byte of the guard variable.
2466   llvm::LoadInst *LI =
2467       Builder.CreateLoad(Builder.CreateElementBitCast(guardAddr, CGM.Int8Ty));
2468 
2469   // Itanium ABI:
2470   //   An implementation supporting thread-safety on multiprocessor
2471   //   systems must also guarantee that references to the initialized
2472   //   object do not occur before the load of the initialization flag.
2473   //
2474   // In LLVM, we do this by marking the load Acquire.
2475   if (threadsafe)
2476     LI->setAtomic(llvm::AtomicOrdering::Acquire);
2477 
2478   // For ARM, we should only check the first bit, rather than the entire byte:
2479   //
2480   // ARM C++ ABI 3.2.3.1:
2481   //   To support the potential use of initialization guard variables
2482   //   as semaphores that are the target of ARM SWP and LDREX/STREX
2483   //   synchronizing instructions we define a static initialization
2484   //   guard variable to be a 4-byte aligned, 4-byte word with the
2485   //   following inline access protocol.
2486   //     #define INITIALIZED 1
2487   //     if ((obj_guard & INITIALIZED) != INITIALIZED) {
2488   //       if (__cxa_guard_acquire(&obj_guard))
2489   //         ...
2490   //     }
2491   //
2492   // and similarly for ARM64:
2493   //
2494   // ARM64 C++ ABI 3.2.2:
2495   //   This ABI instead only specifies the value bit 0 of the static guard
2496   //   variable; all other bits are platform defined. Bit 0 shall be 0 when the
2497   //   variable is not initialized and 1 when it is.
2498   llvm::Value *V =
2499       (UseARMGuardVarABI && !useInt8GuardVariable)
2500           ? Builder.CreateAnd(LI, llvm::ConstantInt::get(CGM.Int8Ty, 1))
2501           : LI;
2502   llvm::Value *NeedsInit = Builder.CreateIsNull(V, "guard.uninitialized");
2503 
2504   llvm::BasicBlock *InitCheckBlock = CGF.createBasicBlock("init.check");
2505   llvm::BasicBlock *EndBlock = CGF.createBasicBlock("init.end");
2506 
2507   // Check if the first byte of the guard variable is zero.
2508   CGF.EmitCXXGuardedInitBranch(NeedsInit, InitCheckBlock, EndBlock,
2509                                CodeGenFunction::GuardKind::VariableGuard, &D);
2510 
2511   CGF.EmitBlock(InitCheckBlock);
2512 
2513   // Variables used when coping with thread-safe statics and exceptions.
2514   if (threadsafe) {
2515     // Call __cxa_guard_acquire.
2516     llvm::Value *V
2517       = CGF.EmitNounwindRuntimeCall(getGuardAcquireFn(CGM, guardPtrTy), guard);
2518 
2519     llvm::BasicBlock *InitBlock = CGF.createBasicBlock("init");
2520 
2521     Builder.CreateCondBr(Builder.CreateIsNotNull(V, "tobool"),
2522                          InitBlock, EndBlock);
2523 
2524     // Call __cxa_guard_abort along the exceptional edge.
2525     CGF.EHStack.pushCleanup<CallGuardAbort>(EHCleanup, guard);
2526 
2527     CGF.EmitBlock(InitBlock);
2528   }
2529 
2530   // Emit the initializer and add a global destructor if appropriate.
2531   CGF.EmitCXXGlobalVarDeclInit(D, var, shouldPerformInit);
2532 
2533   if (threadsafe) {
2534     // Pop the guard-abort cleanup if we pushed one.
2535     CGF.PopCleanupBlock();
2536 
2537     // Call __cxa_guard_release.  This cannot throw.
2538     CGF.EmitNounwindRuntimeCall(getGuardReleaseFn(CGM, guardPtrTy),
2539                                 guardAddr.getPointer());
2540   } else {
2541     // Store 1 into the first byte of the guard variable after initialization is
2542     // complete.
2543     Builder.CreateStore(llvm::ConstantInt::get(CGM.Int8Ty, 1),
2544                         Builder.CreateElementBitCast(guardAddr, CGM.Int8Ty));
2545   }
2546 
2547   CGF.EmitBlock(EndBlock);
2548 }
2549 
2550 /// Register a global destructor using __cxa_atexit.
emitGlobalDtorWithCXAAtExit(CodeGenFunction & CGF,llvm::FunctionCallee dtor,llvm::Constant * addr,bool TLS)2551 static void emitGlobalDtorWithCXAAtExit(CodeGenFunction &CGF,
2552                                         llvm::FunctionCallee dtor,
2553                                         llvm::Constant *addr, bool TLS) {
2554   assert((TLS || CGF.getTypes().getCodeGenOpts().CXAAtExit) &&
2555          "__cxa_atexit is disabled");
2556   const char *Name = "__cxa_atexit";
2557   if (TLS) {
2558     const llvm::Triple &T = CGF.getTarget().getTriple();
2559     Name = T.isOSDarwin() ?  "_tlv_atexit" : "__cxa_thread_atexit";
2560   }
2561 
2562   // We're assuming that the destructor function is something we can
2563   // reasonably call with the default CC.  Go ahead and cast it to the
2564   // right prototype.
2565   llvm::Type *dtorTy =
2566     llvm::FunctionType::get(CGF.VoidTy, CGF.Int8PtrTy, false)->getPointerTo();
2567 
2568   // Preserve address space of addr.
2569   auto AddrAS = addr ? addr->getType()->getPointerAddressSpace() : 0;
2570   auto AddrInt8PtrTy =
2571       AddrAS ? CGF.Int8Ty->getPointerTo(AddrAS) : CGF.Int8PtrTy;
2572 
2573   // Create a variable that binds the atexit to this shared object.
2574   llvm::Constant *handle =
2575       CGF.CGM.CreateRuntimeVariable(CGF.Int8Ty, "__dso_handle");
2576   auto *GV = cast<llvm::GlobalValue>(handle->stripPointerCasts());
2577   GV->setVisibility(llvm::GlobalValue::HiddenVisibility);
2578 
2579   // extern "C" int __cxa_atexit(void (*f)(void *), void *p, void *d);
2580   llvm::Type *paramTys[] = {dtorTy, AddrInt8PtrTy, handle->getType()};
2581   llvm::FunctionType *atexitTy =
2582     llvm::FunctionType::get(CGF.IntTy, paramTys, false);
2583 
2584   // Fetch the actual function.
2585   llvm::FunctionCallee atexit = CGF.CGM.CreateRuntimeFunction(atexitTy, Name);
2586   if (llvm::Function *fn = dyn_cast<llvm::Function>(atexit.getCallee()))
2587     fn->setDoesNotThrow();
2588 
2589   if (!addr)
2590     // addr is null when we are trying to register a dtor annotated with
2591     // __attribute__((destructor)) in a constructor function. Using null here is
2592     // okay because this argument is just passed back to the destructor
2593     // function.
2594     addr = llvm::Constant::getNullValue(CGF.Int8PtrTy);
2595 
2596   llvm::Value *args[] = {llvm::ConstantExpr::getBitCast(
2597                              cast<llvm::Constant>(dtor.getCallee()), dtorTy),
2598                          llvm::ConstantExpr::getBitCast(addr, AddrInt8PtrTy),
2599                          handle};
2600   CGF.EmitNounwindRuntimeCall(atexit, args);
2601 }
2602 
createGlobalInitOrCleanupFn(CodeGen::CodeGenModule & CGM,StringRef FnName)2603 static llvm::Function *createGlobalInitOrCleanupFn(CodeGen::CodeGenModule &CGM,
2604                                                    StringRef FnName) {
2605   // Create a function that registers/unregisters destructors that have the same
2606   // priority.
2607   llvm::FunctionType *FTy = llvm::FunctionType::get(CGM.VoidTy, false);
2608   llvm::Function *GlobalInitOrCleanupFn = CGM.CreateGlobalInitOrCleanUpFunction(
2609       FTy, FnName, CGM.getTypes().arrangeNullaryFunction(), SourceLocation());
2610 
2611   return GlobalInitOrCleanupFn;
2612 }
2613 
2614 static FunctionDecl *
createGlobalInitOrCleanupFnDecl(CodeGen::CodeGenModule & CGM,StringRef FnName)2615 createGlobalInitOrCleanupFnDecl(CodeGen::CodeGenModule &CGM, StringRef FnName) {
2616   ASTContext &Ctx = CGM.getContext();
2617   QualType FunctionTy = Ctx.getFunctionType(Ctx.VoidTy, llvm::None, {});
2618   return FunctionDecl::Create(
2619       Ctx, Ctx.getTranslationUnitDecl(), SourceLocation(), SourceLocation(),
2620       &Ctx.Idents.get(FnName), FunctionTy, nullptr, SC_Static, false, false);
2621 }
2622 
unregisterGlobalDtorsWithUnAtExit()2623 void CodeGenModule::unregisterGlobalDtorsWithUnAtExit() {
2624   for (const auto &I : DtorsUsingAtExit) {
2625     int Priority = I.first;
2626     std::string GlobalCleanupFnName =
2627         std::string("__GLOBAL_cleanup_") + llvm::to_string(Priority);
2628 
2629     llvm::Function *GlobalCleanupFn =
2630         createGlobalInitOrCleanupFn(*this, GlobalCleanupFnName);
2631 
2632     FunctionDecl *GlobalCleanupFD =
2633         createGlobalInitOrCleanupFnDecl(*this, GlobalCleanupFnName);
2634 
2635     CodeGenFunction CGF(*this);
2636     CGF.StartFunction(GlobalDecl(GlobalCleanupFD), getContext().VoidTy,
2637                       GlobalCleanupFn, getTypes().arrangeNullaryFunction(),
2638                       FunctionArgList(), SourceLocation(), SourceLocation());
2639 
2640     // Get the destructor function type, void(*)(void).
2641     llvm::FunctionType *dtorFuncTy = llvm::FunctionType::get(CGF.VoidTy, false);
2642     llvm::Type *dtorTy = dtorFuncTy->getPointerTo();
2643 
2644     // Destructor functions are run/unregistered in non-ascending
2645     // order of their priorities.
2646     const llvm::TinyPtrVector<llvm::Function *> &Dtors = I.second;
2647     auto itv = Dtors.rbegin();
2648     while (itv != Dtors.rend()) {
2649       llvm::Function *Dtor = *itv;
2650 
2651       // We're assuming that the destructor function is something we can
2652       // reasonably call with the correct CC.  Go ahead and cast it to the
2653       // right prototype.
2654       llvm::Constant *dtor = llvm::ConstantExpr::getBitCast(Dtor, dtorTy);
2655       llvm::Value *V = CGF.unregisterGlobalDtorWithUnAtExit(dtor);
2656       llvm::Value *NeedsDestruct =
2657           CGF.Builder.CreateIsNull(V, "needs_destruct");
2658 
2659       llvm::BasicBlock *DestructCallBlock =
2660           CGF.createBasicBlock("destruct.call");
2661       llvm::BasicBlock *EndBlock = CGF.createBasicBlock(
2662           (itv + 1) != Dtors.rend() ? "unatexit.call" : "destruct.end");
2663       // Check if unatexit returns a value of 0. If it does, jump to
2664       // DestructCallBlock, otherwise jump to EndBlock directly.
2665       CGF.Builder.CreateCondBr(NeedsDestruct, DestructCallBlock, EndBlock);
2666 
2667       CGF.EmitBlock(DestructCallBlock);
2668 
2669       // Emit the call to casted Dtor.
2670       llvm::CallInst *CI = CGF.Builder.CreateCall(dtorFuncTy, dtor);
2671       // Make sure the call and the callee agree on calling convention.
2672       CI->setCallingConv(Dtor->getCallingConv());
2673 
2674       CGF.EmitBlock(EndBlock);
2675 
2676       itv++;
2677     }
2678 
2679     CGF.FinishFunction();
2680     AddGlobalDtor(GlobalCleanupFn, Priority);
2681   }
2682 }
2683 
registerGlobalDtorsWithAtExit()2684 void CodeGenModule::registerGlobalDtorsWithAtExit() {
2685   for (const auto &I : DtorsUsingAtExit) {
2686     int Priority = I.first;
2687     std::string GlobalInitFnName =
2688         std::string("__GLOBAL_init_") + llvm::to_string(Priority);
2689     llvm::Function *GlobalInitFn =
2690         createGlobalInitOrCleanupFn(*this, GlobalInitFnName);
2691     FunctionDecl *GlobalInitFD =
2692         createGlobalInitOrCleanupFnDecl(*this, GlobalInitFnName);
2693 
2694     CodeGenFunction CGF(*this);
2695     CGF.StartFunction(GlobalDecl(GlobalInitFD), getContext().VoidTy,
2696                       GlobalInitFn, getTypes().arrangeNullaryFunction(),
2697                       FunctionArgList(), SourceLocation(), SourceLocation());
2698 
2699     // Since constructor functions are run in non-descending order of their
2700     // priorities, destructors are registered in non-descending order of their
2701     // priorities, and since destructor functions are run in the reverse order
2702     // of their registration, destructor functions are run in non-ascending
2703     // order of their priorities.
2704     const llvm::TinyPtrVector<llvm::Function *> &Dtors = I.second;
2705     for (auto *Dtor : Dtors) {
2706       // Register the destructor function calling __cxa_atexit if it is
2707       // available. Otherwise fall back on calling atexit.
2708       if (getCodeGenOpts().CXAAtExit) {
2709         emitGlobalDtorWithCXAAtExit(CGF, Dtor, nullptr, false);
2710       } else {
2711         // Get the destructor function type, void(*)(void).
2712         llvm::Type *dtorTy =
2713             llvm::FunctionType::get(CGF.VoidTy, false)->getPointerTo();
2714 
2715         // We're assuming that the destructor function is something we can
2716         // reasonably call with the correct CC.  Go ahead and cast it to the
2717         // right prototype.
2718         CGF.registerGlobalDtorWithAtExit(
2719             llvm::ConstantExpr::getBitCast(Dtor, dtorTy));
2720       }
2721     }
2722 
2723     CGF.FinishFunction();
2724     AddGlobalCtor(GlobalInitFn, Priority, nullptr);
2725   }
2726 
2727   if (getCXXABI().useSinitAndSterm())
2728     unregisterGlobalDtorsWithUnAtExit();
2729 }
2730 
2731 /// Register a global destructor as best as we know how.
registerGlobalDtor(CodeGenFunction & CGF,const VarDecl & D,llvm::FunctionCallee dtor,llvm::Constant * addr)2732 void ItaniumCXXABI::registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
2733                                        llvm::FunctionCallee dtor,
2734                                        llvm::Constant *addr) {
2735   if (D.isNoDestroy(CGM.getContext()))
2736     return;
2737 
2738   // emitGlobalDtorWithCXAAtExit will emit a call to either __cxa_thread_atexit
2739   // or __cxa_atexit depending on whether this VarDecl is a thread-local storage
2740   // or not. CXAAtExit controls only __cxa_atexit, so use it if it is enabled.
2741   // We can always use __cxa_thread_atexit.
2742   if (CGM.getCodeGenOpts().CXAAtExit || D.getTLSKind())
2743     return emitGlobalDtorWithCXAAtExit(CGF, dtor, addr, D.getTLSKind());
2744 
2745   // In Apple kexts, we want to add a global destructor entry.
2746   // FIXME: shouldn't this be guarded by some variable?
2747   if (CGM.getLangOpts().AppleKext) {
2748     // Generate a global destructor entry.
2749     return CGM.AddCXXDtorEntry(dtor, addr);
2750   }
2751 
2752   CGF.registerGlobalDtorWithAtExit(D, dtor, addr);
2753 }
2754 
isThreadWrapperReplaceable(const VarDecl * VD,CodeGen::CodeGenModule & CGM)2755 static bool isThreadWrapperReplaceable(const VarDecl *VD,
2756                                        CodeGen::CodeGenModule &CGM) {
2757   assert(!VD->isStaticLocal() && "static local VarDecls don't need wrappers!");
2758   // Darwin prefers to have references to thread local variables to go through
2759   // the thread wrapper instead of directly referencing the backing variable.
2760   return VD->getTLSKind() == VarDecl::TLS_Dynamic &&
2761          CGM.getTarget().getTriple().isOSDarwin();
2762 }
2763 
2764 /// Get the appropriate linkage for the wrapper function. This is essentially
2765 /// the weak form of the variable's linkage; every translation unit which needs
2766 /// the wrapper emits a copy, and we want the linker to merge them.
2767 static llvm::GlobalValue::LinkageTypes
getThreadLocalWrapperLinkage(const VarDecl * VD,CodeGen::CodeGenModule & CGM)2768 getThreadLocalWrapperLinkage(const VarDecl *VD, CodeGen::CodeGenModule &CGM) {
2769   llvm::GlobalValue::LinkageTypes VarLinkage =
2770       CGM.getLLVMLinkageVarDefinition(VD, /*IsConstant=*/false);
2771 
2772   // For internal linkage variables, we don't need an external or weak wrapper.
2773   if (llvm::GlobalValue::isLocalLinkage(VarLinkage))
2774     return VarLinkage;
2775 
2776   // If the thread wrapper is replaceable, give it appropriate linkage.
2777   if (isThreadWrapperReplaceable(VD, CGM))
2778     if (!llvm::GlobalVariable::isLinkOnceLinkage(VarLinkage) &&
2779         !llvm::GlobalVariable::isWeakODRLinkage(VarLinkage))
2780       return VarLinkage;
2781   return llvm::GlobalValue::WeakODRLinkage;
2782 }
2783 
2784 llvm::Function *
getOrCreateThreadLocalWrapper(const VarDecl * VD,llvm::Value * Val)2785 ItaniumCXXABI::getOrCreateThreadLocalWrapper(const VarDecl *VD,
2786                                              llvm::Value *Val) {
2787   // Mangle the name for the thread_local wrapper function.
2788   SmallString<256> WrapperName;
2789   {
2790     llvm::raw_svector_ostream Out(WrapperName);
2791     getMangleContext().mangleItaniumThreadLocalWrapper(VD, Out);
2792   }
2793 
2794   // FIXME: If VD is a definition, we should regenerate the function attributes
2795   // before returning.
2796   if (llvm::Value *V = CGM.getModule().getNamedValue(WrapperName))
2797     return cast<llvm::Function>(V);
2798 
2799   QualType RetQT = VD->getType();
2800   if (RetQT->isReferenceType())
2801     RetQT = RetQT.getNonReferenceType();
2802 
2803   const CGFunctionInfo &FI = CGM.getTypes().arrangeBuiltinFunctionDeclaration(
2804       getContext().getPointerType(RetQT), FunctionArgList());
2805 
2806   llvm::FunctionType *FnTy = CGM.getTypes().GetFunctionType(FI);
2807   llvm::Function *Wrapper =
2808       llvm::Function::Create(FnTy, getThreadLocalWrapperLinkage(VD, CGM),
2809                              WrapperName.str(), &CGM.getModule());
2810 
2811   if (CGM.supportsCOMDAT() && Wrapper->isWeakForLinker())
2812     Wrapper->setComdat(CGM.getModule().getOrInsertComdat(Wrapper->getName()));
2813 
2814   CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, Wrapper, /*IsThunk=*/false);
2815 
2816   // Always resolve references to the wrapper at link time.
2817   if (!Wrapper->hasLocalLinkage())
2818     if (!isThreadWrapperReplaceable(VD, CGM) ||
2819         llvm::GlobalVariable::isLinkOnceLinkage(Wrapper->getLinkage()) ||
2820         llvm::GlobalVariable::isWeakODRLinkage(Wrapper->getLinkage()) ||
2821         VD->getVisibility() == HiddenVisibility)
2822       Wrapper->setVisibility(llvm::GlobalValue::HiddenVisibility);
2823 
2824   if (isThreadWrapperReplaceable(VD, CGM)) {
2825     Wrapper->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
2826     Wrapper->addFnAttr(llvm::Attribute::NoUnwind);
2827   }
2828 
2829   ThreadWrappers.push_back({VD, Wrapper});
2830   return Wrapper;
2831 }
2832 
EmitThreadLocalInitFuncs(CodeGenModule & CGM,ArrayRef<const VarDecl * > CXXThreadLocals,ArrayRef<llvm::Function * > CXXThreadLocalInits,ArrayRef<const VarDecl * > CXXThreadLocalInitVars)2833 void ItaniumCXXABI::EmitThreadLocalInitFuncs(
2834     CodeGenModule &CGM, ArrayRef<const VarDecl *> CXXThreadLocals,
2835     ArrayRef<llvm::Function *> CXXThreadLocalInits,
2836     ArrayRef<const VarDecl *> CXXThreadLocalInitVars) {
2837   llvm::Function *InitFunc = nullptr;
2838 
2839   // Separate initializers into those with ordered (or partially-ordered)
2840   // initialization and those with unordered initialization.
2841   llvm::SmallVector<llvm::Function *, 8> OrderedInits;
2842   llvm::SmallDenseMap<const VarDecl *, llvm::Function *> UnorderedInits;
2843   for (unsigned I = 0; I != CXXThreadLocalInits.size(); ++I) {
2844     if (isTemplateInstantiation(
2845             CXXThreadLocalInitVars[I]->getTemplateSpecializationKind()))
2846       UnorderedInits[CXXThreadLocalInitVars[I]->getCanonicalDecl()] =
2847           CXXThreadLocalInits[I];
2848     else
2849       OrderedInits.push_back(CXXThreadLocalInits[I]);
2850   }
2851 
2852   if (!OrderedInits.empty()) {
2853     // Generate a guarded initialization function.
2854     llvm::FunctionType *FTy =
2855         llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
2856     const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
2857     InitFunc = CGM.CreateGlobalInitOrCleanUpFunction(FTy, "__tls_init", FI,
2858                                                      SourceLocation(),
2859                                                      /*TLS=*/true);
2860     llvm::GlobalVariable *Guard = new llvm::GlobalVariable(
2861         CGM.getModule(), CGM.Int8Ty, /*isConstant=*/false,
2862         llvm::GlobalVariable::InternalLinkage,
2863         llvm::ConstantInt::get(CGM.Int8Ty, 0), "__tls_guard");
2864     Guard->setThreadLocal(true);
2865     Guard->setThreadLocalMode(CGM.GetDefaultLLVMTLSModel());
2866 
2867     CharUnits GuardAlign = CharUnits::One();
2868     Guard->setAlignment(GuardAlign.getAsAlign());
2869 
2870     CodeGenFunction(CGM).GenerateCXXGlobalInitFunc(
2871         InitFunc, OrderedInits, ConstantAddress(Guard, GuardAlign));
2872     // On Darwin platforms, use CXX_FAST_TLS calling convention.
2873     if (CGM.getTarget().getTriple().isOSDarwin()) {
2874       InitFunc->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
2875       InitFunc->addFnAttr(llvm::Attribute::NoUnwind);
2876     }
2877   }
2878 
2879   // Create declarations for thread wrappers for all thread-local variables
2880   // with non-discardable definitions in this translation unit.
2881   for (const VarDecl *VD : CXXThreadLocals) {
2882     if (VD->hasDefinition() &&
2883         !isDiscardableGVALinkage(getContext().GetGVALinkageForVariable(VD))) {
2884       llvm::GlobalValue *GV = CGM.GetGlobalValue(CGM.getMangledName(VD));
2885       getOrCreateThreadLocalWrapper(VD, GV);
2886     }
2887   }
2888 
2889   // Emit all referenced thread wrappers.
2890   for (auto VDAndWrapper : ThreadWrappers) {
2891     const VarDecl *VD = VDAndWrapper.first;
2892     llvm::GlobalVariable *Var =
2893         cast<llvm::GlobalVariable>(CGM.GetGlobalValue(CGM.getMangledName(VD)));
2894     llvm::Function *Wrapper = VDAndWrapper.second;
2895 
2896     // Some targets require that all access to thread local variables go through
2897     // the thread wrapper.  This means that we cannot attempt to create a thread
2898     // wrapper or a thread helper.
2899     if (!VD->hasDefinition()) {
2900       if (isThreadWrapperReplaceable(VD, CGM)) {
2901         Wrapper->setLinkage(llvm::Function::ExternalLinkage);
2902         continue;
2903       }
2904 
2905       // If this isn't a TU in which this variable is defined, the thread
2906       // wrapper is discardable.
2907       if (Wrapper->getLinkage() == llvm::Function::WeakODRLinkage)
2908         Wrapper->setLinkage(llvm::Function::LinkOnceODRLinkage);
2909     }
2910 
2911     CGM.SetLLVMFunctionAttributesForDefinition(nullptr, Wrapper);
2912 
2913     // Mangle the name for the thread_local initialization function.
2914     SmallString<256> InitFnName;
2915     {
2916       llvm::raw_svector_ostream Out(InitFnName);
2917       getMangleContext().mangleItaniumThreadLocalInit(VD, Out);
2918     }
2919 
2920     llvm::FunctionType *InitFnTy = llvm::FunctionType::get(CGM.VoidTy, false);
2921 
2922     // If we have a definition for the variable, emit the initialization
2923     // function as an alias to the global Init function (if any). Otherwise,
2924     // produce a declaration of the initialization function.
2925     llvm::GlobalValue *Init = nullptr;
2926     bool InitIsInitFunc = false;
2927     bool HasConstantInitialization = false;
2928     if (!usesThreadWrapperFunction(VD)) {
2929       HasConstantInitialization = true;
2930     } else if (VD->hasDefinition()) {
2931       InitIsInitFunc = true;
2932       llvm::Function *InitFuncToUse = InitFunc;
2933       if (isTemplateInstantiation(VD->getTemplateSpecializationKind()))
2934         InitFuncToUse = UnorderedInits.lookup(VD->getCanonicalDecl());
2935       if (InitFuncToUse)
2936         Init = llvm::GlobalAlias::create(Var->getLinkage(), InitFnName.str(),
2937                                          InitFuncToUse);
2938     } else {
2939       // Emit a weak global function referring to the initialization function.
2940       // This function will not exist if the TU defining the thread_local
2941       // variable in question does not need any dynamic initialization for
2942       // its thread_local variables.
2943       Init = llvm::Function::Create(InitFnTy,
2944                                     llvm::GlobalVariable::ExternalWeakLinkage,
2945                                     InitFnName.str(), &CGM.getModule());
2946       const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
2947       CGM.SetLLVMFunctionAttributes(
2948           GlobalDecl(), FI, cast<llvm::Function>(Init), /*IsThunk=*/false);
2949     }
2950 
2951     if (Init) {
2952       Init->setVisibility(Var->getVisibility());
2953       // Don't mark an extern_weak function DSO local on windows.
2954       if (!CGM.getTriple().isOSWindows() || !Init->hasExternalWeakLinkage())
2955         Init->setDSOLocal(Var->isDSOLocal());
2956     }
2957 
2958     llvm::LLVMContext &Context = CGM.getModule().getContext();
2959     llvm::BasicBlock *Entry = llvm::BasicBlock::Create(Context, "", Wrapper);
2960     CGBuilderTy Builder(CGM, Entry);
2961     if (HasConstantInitialization) {
2962       // No dynamic initialization to invoke.
2963     } else if (InitIsInitFunc) {
2964       if (Init) {
2965         llvm::CallInst *CallVal = Builder.CreateCall(InitFnTy, Init);
2966         if (isThreadWrapperReplaceable(VD, CGM)) {
2967           CallVal->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
2968           llvm::Function *Fn =
2969               cast<llvm::Function>(cast<llvm::GlobalAlias>(Init)->getAliasee());
2970           Fn->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
2971         }
2972       }
2973     } else {
2974       // Don't know whether we have an init function. Call it if it exists.
2975       llvm::Value *Have = Builder.CreateIsNotNull(Init);
2976       llvm::BasicBlock *InitBB = llvm::BasicBlock::Create(Context, "", Wrapper);
2977       llvm::BasicBlock *ExitBB = llvm::BasicBlock::Create(Context, "", Wrapper);
2978       Builder.CreateCondBr(Have, InitBB, ExitBB);
2979 
2980       Builder.SetInsertPoint(InitBB);
2981       Builder.CreateCall(InitFnTy, Init);
2982       Builder.CreateBr(ExitBB);
2983 
2984       Builder.SetInsertPoint(ExitBB);
2985     }
2986 
2987     // For a reference, the result of the wrapper function is a pointer to
2988     // the referenced object.
2989     llvm::Value *Val = Var;
2990     if (VD->getType()->isReferenceType()) {
2991       CharUnits Align = CGM.getContext().getDeclAlign(VD);
2992       Val = Builder.CreateAlignedLoad(Var->getValueType(), Var, Align);
2993     }
2994     if (Val->getType() != Wrapper->getReturnType())
2995       Val = Builder.CreatePointerBitCastOrAddrSpaceCast(
2996           Val, Wrapper->getReturnType(), "");
2997     Builder.CreateRet(Val);
2998   }
2999 }
3000 
EmitThreadLocalVarDeclLValue(CodeGenFunction & CGF,const VarDecl * VD,QualType LValType)3001 LValue ItaniumCXXABI::EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF,
3002                                                    const VarDecl *VD,
3003                                                    QualType LValType) {
3004   llvm::Value *Val = CGF.CGM.GetAddrOfGlobalVar(VD);
3005   llvm::Function *Wrapper = getOrCreateThreadLocalWrapper(VD, Val);
3006 
3007   llvm::CallInst *CallVal = CGF.Builder.CreateCall(Wrapper);
3008   CallVal->setCallingConv(Wrapper->getCallingConv());
3009 
3010   LValue LV;
3011   if (VD->getType()->isReferenceType())
3012     LV = CGF.MakeNaturalAlignAddrLValue(CallVal, LValType);
3013   else
3014     LV = CGF.MakeAddrLValue(CallVal, LValType,
3015                             CGF.getContext().getDeclAlign(VD));
3016   // FIXME: need setObjCGCLValueClass?
3017   return LV;
3018 }
3019 
3020 /// Return whether the given global decl needs a VTT parameter, which it does
3021 /// if it's a base constructor or destructor with virtual bases.
NeedsVTTParameter(GlobalDecl GD)3022 bool ItaniumCXXABI::NeedsVTTParameter(GlobalDecl GD) {
3023   const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
3024 
3025   // We don't have any virtual bases, just return early.
3026   if (!MD->getParent()->getNumVBases())
3027     return false;
3028 
3029   // Check if we have a base constructor.
3030   if (isa<CXXConstructorDecl>(MD) && GD.getCtorType() == Ctor_Base)
3031     return true;
3032 
3033   // Check if we have a base destructor.
3034   if (isa<CXXDestructorDecl>(MD) && GD.getDtorType() == Dtor_Base)
3035     return true;
3036 
3037   return false;
3038 }
3039 
3040 namespace {
3041 class ItaniumRTTIBuilder {
3042   CodeGenModule &CGM;  // Per-module state.
3043   llvm::LLVMContext &VMContext;
3044   const ItaniumCXXABI &CXXABI;  // Per-module state.
3045 
3046   /// Fields - The fields of the RTTI descriptor currently being built.
3047   SmallVector<llvm::Constant *, 16> Fields;
3048 
3049   /// GetAddrOfTypeName - Returns the mangled type name of the given type.
3050   llvm::GlobalVariable *
3051   GetAddrOfTypeName(QualType Ty, llvm::GlobalVariable::LinkageTypes Linkage);
3052 
3053   /// GetAddrOfExternalRTTIDescriptor - Returns the constant for the RTTI
3054   /// descriptor of the given type.
3055   llvm::Constant *GetAddrOfExternalRTTIDescriptor(QualType Ty);
3056 
3057   /// BuildVTablePointer - Build the vtable pointer for the given type.
3058   void BuildVTablePointer(const Type *Ty);
3059 
3060   /// BuildSIClassTypeInfo - Build an abi::__si_class_type_info, used for single
3061   /// inheritance, according to the Itanium C++ ABI, 2.9.5p6b.
3062   void BuildSIClassTypeInfo(const CXXRecordDecl *RD);
3063 
3064   /// BuildVMIClassTypeInfo - Build an abi::__vmi_class_type_info, used for
3065   /// classes with bases that do not satisfy the abi::__si_class_type_info
3066   /// constraints, according ti the Itanium C++ ABI, 2.9.5p5c.
3067   void BuildVMIClassTypeInfo(const CXXRecordDecl *RD);
3068 
3069   /// BuildPointerTypeInfo - Build an abi::__pointer_type_info struct, used
3070   /// for pointer types.
3071   void BuildPointerTypeInfo(QualType PointeeTy);
3072 
3073   /// BuildObjCObjectTypeInfo - Build the appropriate kind of
3074   /// type_info for an object type.
3075   void BuildObjCObjectTypeInfo(const ObjCObjectType *Ty);
3076 
3077   /// BuildPointerToMemberTypeInfo - Build an abi::__pointer_to_member_type_info
3078   /// struct, used for member pointer types.
3079   void BuildPointerToMemberTypeInfo(const MemberPointerType *Ty);
3080 
3081 public:
ItaniumRTTIBuilder(const ItaniumCXXABI & ABI)3082   ItaniumRTTIBuilder(const ItaniumCXXABI &ABI)
3083       : CGM(ABI.CGM), VMContext(CGM.getModule().getContext()), CXXABI(ABI) {}
3084 
3085   // Pointer type info flags.
3086   enum {
3087     /// PTI_Const - Type has const qualifier.
3088     PTI_Const = 0x1,
3089 
3090     /// PTI_Volatile - Type has volatile qualifier.
3091     PTI_Volatile = 0x2,
3092 
3093     /// PTI_Restrict - Type has restrict qualifier.
3094     PTI_Restrict = 0x4,
3095 
3096     /// PTI_Incomplete - Type is incomplete.
3097     PTI_Incomplete = 0x8,
3098 
3099     /// PTI_ContainingClassIncomplete - Containing class is incomplete.
3100     /// (in pointer to member).
3101     PTI_ContainingClassIncomplete = 0x10,
3102 
3103     /// PTI_TransactionSafe - Pointee is transaction_safe function (C++ TM TS).
3104     //PTI_TransactionSafe = 0x20,
3105 
3106     /// PTI_Noexcept - Pointee is noexcept function (C++1z).
3107     PTI_Noexcept = 0x40,
3108   };
3109 
3110   // VMI type info flags.
3111   enum {
3112     /// VMI_NonDiamondRepeat - Class has non-diamond repeated inheritance.
3113     VMI_NonDiamondRepeat = 0x1,
3114 
3115     /// VMI_DiamondShaped - Class is diamond shaped.
3116     VMI_DiamondShaped = 0x2
3117   };
3118 
3119   // Base class type info flags.
3120   enum {
3121     /// BCTI_Virtual - Base class is virtual.
3122     BCTI_Virtual = 0x1,
3123 
3124     /// BCTI_Public - Base class is public.
3125     BCTI_Public = 0x2
3126   };
3127 
3128   /// BuildTypeInfo - Build the RTTI type info struct for the given type, or
3129   /// link to an existing RTTI descriptor if one already exists.
3130   llvm::Constant *BuildTypeInfo(QualType Ty);
3131 
3132   /// BuildTypeInfo - Build the RTTI type info struct for the given type.
3133   llvm::Constant *BuildTypeInfo(
3134       QualType Ty,
3135       llvm::GlobalVariable::LinkageTypes Linkage,
3136       llvm::GlobalValue::VisibilityTypes Visibility,
3137       llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass);
3138 };
3139 }
3140 
GetAddrOfTypeName(QualType Ty,llvm::GlobalVariable::LinkageTypes Linkage)3141 llvm::GlobalVariable *ItaniumRTTIBuilder::GetAddrOfTypeName(
3142     QualType Ty, llvm::GlobalVariable::LinkageTypes Linkage) {
3143   SmallString<256> Name;
3144   llvm::raw_svector_ostream Out(Name);
3145   CGM.getCXXABI().getMangleContext().mangleCXXRTTIName(Ty, Out);
3146 
3147   // We know that the mangled name of the type starts at index 4 of the
3148   // mangled name of the typename, so we can just index into it in order to
3149   // get the mangled name of the type.
3150   llvm::Constant *Init = llvm::ConstantDataArray::getString(VMContext,
3151                                                             Name.substr(4));
3152   auto Align = CGM.getContext().getTypeAlignInChars(CGM.getContext().CharTy);
3153 
3154   llvm::GlobalVariable *GV = CGM.CreateOrReplaceCXXRuntimeVariable(
3155       Name, Init->getType(), Linkage, Align.getQuantity());
3156 
3157   GV->setInitializer(Init);
3158 
3159   return GV;
3160 }
3161 
3162 llvm::Constant *
GetAddrOfExternalRTTIDescriptor(QualType Ty)3163 ItaniumRTTIBuilder::GetAddrOfExternalRTTIDescriptor(QualType Ty) {
3164   // Mangle the RTTI name.
3165   SmallString<256> Name;
3166   llvm::raw_svector_ostream Out(Name);
3167   CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out);
3168 
3169   // Look for an existing global.
3170   llvm::GlobalVariable *GV = CGM.getModule().getNamedGlobal(Name);
3171 
3172   if (!GV) {
3173     // Create a new global variable.
3174     // Note for the future: If we would ever like to do deferred emission of
3175     // RTTI, check if emitting vtables opportunistically need any adjustment.
3176 
3177     GV = new llvm::GlobalVariable(CGM.getModule(), CGM.Int8PtrTy,
3178                                   /*isConstant=*/true,
3179                                   llvm::GlobalValue::ExternalLinkage, nullptr,
3180                                   Name);
3181     const CXXRecordDecl *RD = Ty->getAsCXXRecordDecl();
3182     CGM.setGVProperties(GV, RD);
3183     // Import the typeinfo symbol when all non-inline virtual methods are
3184     // imported.
3185     if (CGM.getTarget().hasPS4DLLImportExport()) {
3186       if (RD && CXXRecordAllNonInlineVirtualsHaveAttr<DLLImportAttr>(RD)) {
3187         GV->setDLLStorageClass(llvm::GlobalVariable::DLLImportStorageClass);
3188         CGM.setDSOLocal(GV);
3189       }
3190     }
3191   }
3192 
3193   return llvm::ConstantExpr::getBitCast(GV, CGM.Int8PtrTy);
3194 }
3195 
3196 /// TypeInfoIsInStandardLibrary - Given a builtin type, returns whether the type
3197 /// info for that type is defined in the standard library.
TypeInfoIsInStandardLibrary(const BuiltinType * Ty)3198 static bool TypeInfoIsInStandardLibrary(const BuiltinType *Ty) {
3199   // Itanium C++ ABI 2.9.2:
3200   //   Basic type information (e.g. for "int", "bool", etc.) will be kept in
3201   //   the run-time support library. Specifically, the run-time support
3202   //   library should contain type_info objects for the types X, X* and
3203   //   X const*, for every X in: void, std::nullptr_t, bool, wchar_t, char,
3204   //   unsigned char, signed char, short, unsigned short, int, unsigned int,
3205   //   long, unsigned long, long long, unsigned long long, float, double,
3206   //   long double, char16_t, char32_t, and the IEEE 754r decimal and
3207   //   half-precision floating point types.
3208   //
3209   // GCC also emits RTTI for __int128.
3210   // FIXME: We do not emit RTTI information for decimal types here.
3211 
3212   // Types added here must also be added to EmitFundamentalRTTIDescriptors.
3213   switch (Ty->getKind()) {
3214     case BuiltinType::Void:
3215     case BuiltinType::NullPtr:
3216     case BuiltinType::Bool:
3217     case BuiltinType::WChar_S:
3218     case BuiltinType::WChar_U:
3219     case BuiltinType::Char_U:
3220     case BuiltinType::Char_S:
3221     case BuiltinType::UChar:
3222     case BuiltinType::SChar:
3223     case BuiltinType::Short:
3224     case BuiltinType::UShort:
3225     case BuiltinType::Int:
3226     case BuiltinType::UInt:
3227     case BuiltinType::Long:
3228     case BuiltinType::ULong:
3229     case BuiltinType::LongLong:
3230     case BuiltinType::ULongLong:
3231     case BuiltinType::Half:
3232     case BuiltinType::Float:
3233     case BuiltinType::Double:
3234     case BuiltinType::LongDouble:
3235     case BuiltinType::Float16:
3236     case BuiltinType::Float128:
3237     case BuiltinType::Char8:
3238     case BuiltinType::Char16:
3239     case BuiltinType::Char32:
3240     case BuiltinType::Int128:
3241     case BuiltinType::UInt128:
3242       return true;
3243 
3244 #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
3245     case BuiltinType::Id:
3246 #include "clang/Basic/OpenCLImageTypes.def"
3247 #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
3248     case BuiltinType::Id:
3249 #include "clang/Basic/OpenCLExtensionTypes.def"
3250     case BuiltinType::OCLSampler:
3251     case BuiltinType::OCLEvent:
3252     case BuiltinType::OCLClkEvent:
3253     case BuiltinType::OCLQueue:
3254     case BuiltinType::OCLReserveID:
3255 #define SVE_TYPE(Name, Id, SingletonId) \
3256     case BuiltinType::Id:
3257 #include "clang/Basic/AArch64SVEACLETypes.def"
3258 #define PPC_VECTOR_TYPE(Name, Id, Size) \
3259     case BuiltinType::Id:
3260 #include "clang/Basic/PPCTypes.def"
3261 #define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
3262 #include "clang/Basic/RISCVVTypes.def"
3263     case BuiltinType::ShortAccum:
3264     case BuiltinType::Accum:
3265     case BuiltinType::LongAccum:
3266     case BuiltinType::UShortAccum:
3267     case BuiltinType::UAccum:
3268     case BuiltinType::ULongAccum:
3269     case BuiltinType::ShortFract:
3270     case BuiltinType::Fract:
3271     case BuiltinType::LongFract:
3272     case BuiltinType::UShortFract:
3273     case BuiltinType::UFract:
3274     case BuiltinType::ULongFract:
3275     case BuiltinType::SatShortAccum:
3276     case BuiltinType::SatAccum:
3277     case BuiltinType::SatLongAccum:
3278     case BuiltinType::SatUShortAccum:
3279     case BuiltinType::SatUAccum:
3280     case BuiltinType::SatULongAccum:
3281     case BuiltinType::SatShortFract:
3282     case BuiltinType::SatFract:
3283     case BuiltinType::SatLongFract:
3284     case BuiltinType::SatUShortFract:
3285     case BuiltinType::SatUFract:
3286     case BuiltinType::SatULongFract:
3287     case BuiltinType::BFloat16:
3288       return false;
3289 
3290     case BuiltinType::Dependent:
3291 #define BUILTIN_TYPE(Id, SingletonId)
3292 #define PLACEHOLDER_TYPE(Id, SingletonId) \
3293     case BuiltinType::Id:
3294 #include "clang/AST/BuiltinTypes.def"
3295       llvm_unreachable("asking for RRTI for a placeholder type!");
3296 
3297     case BuiltinType::ObjCId:
3298     case BuiltinType::ObjCClass:
3299     case BuiltinType::ObjCSel:
3300       llvm_unreachable("FIXME: Objective-C types are unsupported!");
3301   }
3302 
3303   llvm_unreachable("Invalid BuiltinType Kind!");
3304 }
3305 
TypeInfoIsInStandardLibrary(const PointerType * PointerTy)3306 static bool TypeInfoIsInStandardLibrary(const PointerType *PointerTy) {
3307   QualType PointeeTy = PointerTy->getPointeeType();
3308   const BuiltinType *BuiltinTy = dyn_cast<BuiltinType>(PointeeTy);
3309   if (!BuiltinTy)
3310     return false;
3311 
3312   // Check the qualifiers.
3313   Qualifiers Quals = PointeeTy.getQualifiers();
3314   Quals.removeConst();
3315 
3316   if (!Quals.empty())
3317     return false;
3318 
3319   return TypeInfoIsInStandardLibrary(BuiltinTy);
3320 }
3321 
3322 /// IsStandardLibraryRTTIDescriptor - Returns whether the type
3323 /// information for the given type exists in the standard library.
IsStandardLibraryRTTIDescriptor(QualType Ty)3324 static bool IsStandardLibraryRTTIDescriptor(QualType Ty) {
3325   // Type info for builtin types is defined in the standard library.
3326   if (const BuiltinType *BuiltinTy = dyn_cast<BuiltinType>(Ty))
3327     return TypeInfoIsInStandardLibrary(BuiltinTy);
3328 
3329   // Type info for some pointer types to builtin types is defined in the
3330   // standard library.
3331   if (const PointerType *PointerTy = dyn_cast<PointerType>(Ty))
3332     return TypeInfoIsInStandardLibrary(PointerTy);
3333 
3334   return false;
3335 }
3336 
3337 /// ShouldUseExternalRTTIDescriptor - Returns whether the type information for
3338 /// the given type exists somewhere else, and that we should not emit the type
3339 /// information in this translation unit.  Assumes that it is not a
3340 /// standard-library type.
ShouldUseExternalRTTIDescriptor(CodeGenModule & CGM,QualType Ty)3341 static bool ShouldUseExternalRTTIDescriptor(CodeGenModule &CGM,
3342                                             QualType Ty) {
3343   ASTContext &Context = CGM.getContext();
3344 
3345   // If RTTI is disabled, assume it might be disabled in the
3346   // translation unit that defines any potential key function, too.
3347   if (!Context.getLangOpts().RTTI) return false;
3348 
3349   if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) {
3350     const CXXRecordDecl *RD = cast<CXXRecordDecl>(RecordTy->getDecl());
3351     if (!RD->hasDefinition())
3352       return false;
3353 
3354     if (!RD->isDynamicClass())
3355       return false;
3356 
3357     // FIXME: this may need to be reconsidered if the key function
3358     // changes.
3359     // N.B. We must always emit the RTTI data ourselves if there exists a key
3360     // function.
3361     bool IsDLLImport = RD->hasAttr<DLLImportAttr>();
3362 
3363     // Don't import the RTTI but emit it locally.
3364     if (CGM.getTriple().isWindowsGNUEnvironment())
3365       return false;
3366 
3367     if (CGM.getVTables().isVTableExternal(RD)) {
3368       if (CGM.getTarget().hasPS4DLLImportExport())
3369         return true;
3370 
3371       return IsDLLImport && !CGM.getTriple().isWindowsItaniumEnvironment()
3372                  ? false
3373                  : true;
3374     }
3375     if (IsDLLImport)
3376       return true;
3377   }
3378 
3379   return false;
3380 }
3381 
3382 /// IsIncompleteClassType - Returns whether the given record type is incomplete.
IsIncompleteClassType(const RecordType * RecordTy)3383 static bool IsIncompleteClassType(const RecordType *RecordTy) {
3384   return !RecordTy->getDecl()->isCompleteDefinition();
3385 }
3386 
3387 /// ContainsIncompleteClassType - Returns whether the given type contains an
3388 /// incomplete class type. This is true if
3389 ///
3390 ///   * The given type is an incomplete class type.
3391 ///   * The given type is a pointer type whose pointee type contains an
3392 ///     incomplete class type.
3393 ///   * The given type is a member pointer type whose class is an incomplete
3394 ///     class type.
3395 ///   * The given type is a member pointer type whoise pointee type contains an
3396 ///     incomplete class type.
3397 /// is an indirect or direct pointer to an incomplete class type.
ContainsIncompleteClassType(QualType Ty)3398 static bool ContainsIncompleteClassType(QualType Ty) {
3399   if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) {
3400     if (IsIncompleteClassType(RecordTy))
3401       return true;
3402   }
3403 
3404   if (const PointerType *PointerTy = dyn_cast<PointerType>(Ty))
3405     return ContainsIncompleteClassType(PointerTy->getPointeeType());
3406 
3407   if (const MemberPointerType *MemberPointerTy =
3408       dyn_cast<MemberPointerType>(Ty)) {
3409     // Check if the class type is incomplete.
3410     const RecordType *ClassType = cast<RecordType>(MemberPointerTy->getClass());
3411     if (IsIncompleteClassType(ClassType))
3412       return true;
3413 
3414     return ContainsIncompleteClassType(MemberPointerTy->getPointeeType());
3415   }
3416 
3417   return false;
3418 }
3419 
3420 // CanUseSingleInheritance - Return whether the given record decl has a "single,
3421 // public, non-virtual base at offset zero (i.e. the derived class is dynamic
3422 // iff the base is)", according to Itanium C++ ABI, 2.95p6b.
CanUseSingleInheritance(const CXXRecordDecl * RD)3423 static bool CanUseSingleInheritance(const CXXRecordDecl *RD) {
3424   // Check the number of bases.
3425   if (RD->getNumBases() != 1)
3426     return false;
3427 
3428   // Get the base.
3429   CXXRecordDecl::base_class_const_iterator Base = RD->bases_begin();
3430 
3431   // Check that the base is not virtual.
3432   if (Base->isVirtual())
3433     return false;
3434 
3435   // Check that the base is public.
3436   if (Base->getAccessSpecifier() != AS_public)
3437     return false;
3438 
3439   // Check that the class is dynamic iff the base is.
3440   auto *BaseDecl =
3441       cast<CXXRecordDecl>(Base->getType()->castAs<RecordType>()->getDecl());
3442   if (!BaseDecl->isEmpty() &&
3443       BaseDecl->isDynamicClass() != RD->isDynamicClass())
3444     return false;
3445 
3446   return true;
3447 }
3448 
BuildVTablePointer(const Type * Ty)3449 void ItaniumRTTIBuilder::BuildVTablePointer(const Type *Ty) {
3450   // abi::__class_type_info.
3451   static const char * const ClassTypeInfo =
3452     "_ZTVN10__cxxabiv117__class_type_infoE";
3453   // abi::__si_class_type_info.
3454   static const char * const SIClassTypeInfo =
3455     "_ZTVN10__cxxabiv120__si_class_type_infoE";
3456   // abi::__vmi_class_type_info.
3457   static const char * const VMIClassTypeInfo =
3458     "_ZTVN10__cxxabiv121__vmi_class_type_infoE";
3459 
3460   const char *VTableName = nullptr;
3461 
3462   switch (Ty->getTypeClass()) {
3463 #define TYPE(Class, Base)
3464 #define ABSTRACT_TYPE(Class, Base)
3465 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
3466 #define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
3467 #define DEPENDENT_TYPE(Class, Base) case Type::Class:
3468 #include "clang/AST/TypeNodes.inc"
3469     llvm_unreachable("Non-canonical and dependent types shouldn't get here");
3470 
3471   case Type::LValueReference:
3472   case Type::RValueReference:
3473     llvm_unreachable("References shouldn't get here");
3474 
3475   case Type::Auto:
3476   case Type::DeducedTemplateSpecialization:
3477     llvm_unreachable("Undeduced type shouldn't get here");
3478 
3479   case Type::Pipe:
3480     llvm_unreachable("Pipe types shouldn't get here");
3481 
3482   case Type::Builtin:
3483   case Type::ExtInt:
3484   // GCC treats vector and complex types as fundamental types.
3485   case Type::Vector:
3486   case Type::ExtVector:
3487   case Type::ConstantMatrix:
3488   case Type::Complex:
3489   case Type::Atomic:
3490   // FIXME: GCC treats block pointers as fundamental types?!
3491   case Type::BlockPointer:
3492     // abi::__fundamental_type_info.
3493     VTableName = "_ZTVN10__cxxabiv123__fundamental_type_infoE";
3494     break;
3495 
3496   case Type::ConstantArray:
3497   case Type::IncompleteArray:
3498   case Type::VariableArray:
3499     // abi::__array_type_info.
3500     VTableName = "_ZTVN10__cxxabiv117__array_type_infoE";
3501     break;
3502 
3503   case Type::FunctionNoProto:
3504   case Type::FunctionProto:
3505     // abi::__function_type_info.
3506     VTableName = "_ZTVN10__cxxabiv120__function_type_infoE";
3507     break;
3508 
3509   case Type::Enum:
3510     // abi::__enum_type_info.
3511     VTableName = "_ZTVN10__cxxabiv116__enum_type_infoE";
3512     break;
3513 
3514   case Type::Record: {
3515     const CXXRecordDecl *RD =
3516       cast<CXXRecordDecl>(cast<RecordType>(Ty)->getDecl());
3517 
3518     if (!RD->hasDefinition() || !RD->getNumBases()) {
3519       VTableName = ClassTypeInfo;
3520     } else if (CanUseSingleInheritance(RD)) {
3521       VTableName = SIClassTypeInfo;
3522     } else {
3523       VTableName = VMIClassTypeInfo;
3524     }
3525 
3526     break;
3527   }
3528 
3529   case Type::ObjCObject:
3530     // Ignore protocol qualifiers.
3531     Ty = cast<ObjCObjectType>(Ty)->getBaseType().getTypePtr();
3532 
3533     // Handle id and Class.
3534     if (isa<BuiltinType>(Ty)) {
3535       VTableName = ClassTypeInfo;
3536       break;
3537     }
3538 
3539     assert(isa<ObjCInterfaceType>(Ty));
3540     LLVM_FALLTHROUGH;
3541 
3542   case Type::ObjCInterface:
3543     if (cast<ObjCInterfaceType>(Ty)->getDecl()->getSuperClass()) {
3544       VTableName = SIClassTypeInfo;
3545     } else {
3546       VTableName = ClassTypeInfo;
3547     }
3548     break;
3549 
3550   case Type::ObjCObjectPointer:
3551   case Type::Pointer:
3552     // abi::__pointer_type_info.
3553     VTableName = "_ZTVN10__cxxabiv119__pointer_type_infoE";
3554     break;
3555 
3556   case Type::MemberPointer:
3557     // abi::__pointer_to_member_type_info.
3558     VTableName = "_ZTVN10__cxxabiv129__pointer_to_member_type_infoE";
3559     break;
3560   }
3561 
3562   llvm::Constant *VTable = nullptr;
3563 
3564   // Check if the alias exists. If it doesn't, then get or create the global.
3565   if (CGM.getItaniumVTableContext().isRelativeLayout())
3566     VTable = CGM.getModule().getNamedAlias(VTableName);
3567   if (!VTable)
3568     VTable = CGM.getModule().getOrInsertGlobal(VTableName, CGM.Int8PtrTy);
3569 
3570   CGM.setDSOLocal(cast<llvm::GlobalValue>(VTable->stripPointerCasts()));
3571 
3572   llvm::Type *PtrDiffTy =
3573       CGM.getTypes().ConvertType(CGM.getContext().getPointerDiffType());
3574 
3575   // The vtable address point is 2.
3576   if (CGM.getItaniumVTableContext().isRelativeLayout()) {
3577     // The vtable address point is 8 bytes after its start:
3578     // 4 for the offset to top + 4 for the relative offset to rtti.
3579     llvm::Constant *Eight = llvm::ConstantInt::get(CGM.Int32Ty, 8);
3580     VTable = llvm::ConstantExpr::getBitCast(VTable, CGM.Int8PtrTy);
3581     VTable =
3582         llvm::ConstantExpr::getInBoundsGetElementPtr(CGM.Int8Ty, VTable, Eight);
3583   } else {
3584     llvm::Constant *Two = llvm::ConstantInt::get(PtrDiffTy, 2);
3585     VTable = llvm::ConstantExpr::getInBoundsGetElementPtr(CGM.Int8PtrTy, VTable,
3586                                                           Two);
3587   }
3588   VTable = llvm::ConstantExpr::getBitCast(VTable, CGM.Int8PtrTy);
3589 
3590   Fields.push_back(VTable);
3591 }
3592 
3593 /// Return the linkage that the type info and type info name constants
3594 /// should have for the given type.
getTypeInfoLinkage(CodeGenModule & CGM,QualType Ty)3595 static llvm::GlobalVariable::LinkageTypes getTypeInfoLinkage(CodeGenModule &CGM,
3596                                                              QualType Ty) {
3597   // Itanium C++ ABI 2.9.5p7:
3598   //   In addition, it and all of the intermediate abi::__pointer_type_info
3599   //   structs in the chain down to the abi::__class_type_info for the
3600   //   incomplete class type must be prevented from resolving to the
3601   //   corresponding type_info structs for the complete class type, possibly
3602   //   by making them local static objects. Finally, a dummy class RTTI is
3603   //   generated for the incomplete type that will not resolve to the final
3604   //   complete class RTTI (because the latter need not exist), possibly by
3605   //   making it a local static object.
3606   if (ContainsIncompleteClassType(Ty))
3607     return llvm::GlobalValue::InternalLinkage;
3608 
3609   switch (Ty->getLinkage()) {
3610   case NoLinkage:
3611   case InternalLinkage:
3612   case UniqueExternalLinkage:
3613     return llvm::GlobalValue::InternalLinkage;
3614 
3615   case VisibleNoLinkage:
3616   case ModuleInternalLinkage:
3617   case ModuleLinkage:
3618   case ExternalLinkage:
3619     // RTTI is not enabled, which means that this type info struct is going
3620     // to be used for exception handling. Give it linkonce_odr linkage.
3621     if (!CGM.getLangOpts().RTTI)
3622       return llvm::GlobalValue::LinkOnceODRLinkage;
3623 
3624     if (const RecordType *Record = dyn_cast<RecordType>(Ty)) {
3625       const CXXRecordDecl *RD = cast<CXXRecordDecl>(Record->getDecl());
3626       if (RD->hasAttr<WeakAttr>())
3627         return llvm::GlobalValue::WeakODRLinkage;
3628       if (CGM.getTriple().isWindowsItaniumEnvironment())
3629         if (RD->hasAttr<DLLImportAttr>() &&
3630             ShouldUseExternalRTTIDescriptor(CGM, Ty))
3631           return llvm::GlobalValue::ExternalLinkage;
3632       // MinGW always uses LinkOnceODRLinkage for type info.
3633       if (RD->isDynamicClass() &&
3634           !CGM.getContext()
3635                .getTargetInfo()
3636                .getTriple()
3637                .isWindowsGNUEnvironment())
3638         return CGM.getVTableLinkage(RD);
3639     }
3640 
3641     return llvm::GlobalValue::LinkOnceODRLinkage;
3642   }
3643 
3644   llvm_unreachable("Invalid linkage!");
3645 }
3646 
BuildTypeInfo(QualType Ty)3647 llvm::Constant *ItaniumRTTIBuilder::BuildTypeInfo(QualType Ty) {
3648   // We want to operate on the canonical type.
3649   Ty = Ty.getCanonicalType();
3650 
3651   // Check if we've already emitted an RTTI descriptor for this type.
3652   SmallString<256> Name;
3653   llvm::raw_svector_ostream Out(Name);
3654   CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out);
3655 
3656   llvm::GlobalVariable *OldGV = CGM.getModule().getNamedGlobal(Name);
3657   if (OldGV && !OldGV->isDeclaration()) {
3658     assert(!OldGV->hasAvailableExternallyLinkage() &&
3659            "available_externally typeinfos not yet implemented");
3660 
3661     return llvm::ConstantExpr::getBitCast(OldGV, CGM.Int8PtrTy);
3662   }
3663 
3664   // Check if there is already an external RTTI descriptor for this type.
3665   if (IsStandardLibraryRTTIDescriptor(Ty) ||
3666       ShouldUseExternalRTTIDescriptor(CGM, Ty))
3667     return GetAddrOfExternalRTTIDescriptor(Ty);
3668 
3669   // Emit the standard library with external linkage.
3670   llvm::GlobalVariable::LinkageTypes Linkage = getTypeInfoLinkage(CGM, Ty);
3671 
3672   // Give the type_info object and name the formal visibility of the
3673   // type itself.
3674   llvm::GlobalValue::VisibilityTypes llvmVisibility;
3675   if (llvm::GlobalValue::isLocalLinkage(Linkage))
3676     // If the linkage is local, only default visibility makes sense.
3677     llvmVisibility = llvm::GlobalValue::DefaultVisibility;
3678   else if (CXXABI.classifyRTTIUniqueness(Ty, Linkage) ==
3679            ItaniumCXXABI::RUK_NonUniqueHidden)
3680     llvmVisibility = llvm::GlobalValue::HiddenVisibility;
3681   else
3682     llvmVisibility = CodeGenModule::GetLLVMVisibility(Ty->getVisibility());
3683 
3684   llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass =
3685       llvm::GlobalValue::DefaultStorageClass;
3686   if (CGM.getTriple().isWindowsItaniumEnvironment()) {
3687     auto RD = Ty->getAsCXXRecordDecl();
3688     if (RD && RD->hasAttr<DLLExportAttr>())
3689       DLLStorageClass = llvm::GlobalValue::DLLExportStorageClass;
3690   }
3691 
3692   return BuildTypeInfo(Ty, Linkage, llvmVisibility, DLLStorageClass);
3693 }
3694 
BuildTypeInfo(QualType Ty,llvm::GlobalVariable::LinkageTypes Linkage,llvm::GlobalValue::VisibilityTypes Visibility,llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass)3695 llvm::Constant *ItaniumRTTIBuilder::BuildTypeInfo(
3696       QualType Ty,
3697       llvm::GlobalVariable::LinkageTypes Linkage,
3698       llvm::GlobalValue::VisibilityTypes Visibility,
3699       llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass) {
3700   // Add the vtable pointer.
3701   BuildVTablePointer(cast<Type>(Ty));
3702 
3703   // And the name.
3704   llvm::GlobalVariable *TypeName = GetAddrOfTypeName(Ty, Linkage);
3705   llvm::Constant *TypeNameField;
3706 
3707   // If we're supposed to demote the visibility, be sure to set a flag
3708   // to use a string comparison for type_info comparisons.
3709   ItaniumCXXABI::RTTIUniquenessKind RTTIUniqueness =
3710       CXXABI.classifyRTTIUniqueness(Ty, Linkage);
3711   if (RTTIUniqueness != ItaniumCXXABI::RUK_Unique) {
3712     // The flag is the sign bit, which on ARM64 is defined to be clear
3713     // for global pointers.  This is very ARM64-specific.
3714     TypeNameField = llvm::ConstantExpr::getPtrToInt(TypeName, CGM.Int64Ty);
3715     llvm::Constant *flag =
3716         llvm::ConstantInt::get(CGM.Int64Ty, ((uint64_t)1) << 63);
3717     TypeNameField = llvm::ConstantExpr::getAdd(TypeNameField, flag);
3718     TypeNameField =
3719         llvm::ConstantExpr::getIntToPtr(TypeNameField, CGM.Int8PtrTy);
3720   } else {
3721     TypeNameField = llvm::ConstantExpr::getBitCast(TypeName, CGM.Int8PtrTy);
3722   }
3723   Fields.push_back(TypeNameField);
3724 
3725   switch (Ty->getTypeClass()) {
3726 #define TYPE(Class, Base)
3727 #define ABSTRACT_TYPE(Class, Base)
3728 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
3729 #define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
3730 #define DEPENDENT_TYPE(Class, Base) case Type::Class:
3731 #include "clang/AST/TypeNodes.inc"
3732     llvm_unreachable("Non-canonical and dependent types shouldn't get here");
3733 
3734   // GCC treats vector types as fundamental types.
3735   case Type::Builtin:
3736   case Type::Vector:
3737   case Type::ExtVector:
3738   case Type::ConstantMatrix:
3739   case Type::Complex:
3740   case Type::BlockPointer:
3741     // Itanium C++ ABI 2.9.5p4:
3742     // abi::__fundamental_type_info adds no data members to std::type_info.
3743     break;
3744 
3745   case Type::LValueReference:
3746   case Type::RValueReference:
3747     llvm_unreachable("References shouldn't get here");
3748 
3749   case Type::Auto:
3750   case Type::DeducedTemplateSpecialization:
3751     llvm_unreachable("Undeduced type shouldn't get here");
3752 
3753   case Type::Pipe:
3754     break;
3755 
3756   case Type::ExtInt:
3757     break;
3758 
3759   case Type::ConstantArray:
3760   case Type::IncompleteArray:
3761   case Type::VariableArray:
3762     // Itanium C++ ABI 2.9.5p5:
3763     // abi::__array_type_info adds no data members to std::type_info.
3764     break;
3765 
3766   case Type::FunctionNoProto:
3767   case Type::FunctionProto:
3768     // Itanium C++ ABI 2.9.5p5:
3769     // abi::__function_type_info adds no data members to std::type_info.
3770     break;
3771 
3772   case Type::Enum:
3773     // Itanium C++ ABI 2.9.5p5:
3774     // abi::__enum_type_info adds no data members to std::type_info.
3775     break;
3776 
3777   case Type::Record: {
3778     const CXXRecordDecl *RD =
3779       cast<CXXRecordDecl>(cast<RecordType>(Ty)->getDecl());
3780     if (!RD->hasDefinition() || !RD->getNumBases()) {
3781       // We don't need to emit any fields.
3782       break;
3783     }
3784 
3785     if (CanUseSingleInheritance(RD))
3786       BuildSIClassTypeInfo(RD);
3787     else
3788       BuildVMIClassTypeInfo(RD);
3789 
3790     break;
3791   }
3792 
3793   case Type::ObjCObject:
3794   case Type::ObjCInterface:
3795     BuildObjCObjectTypeInfo(cast<ObjCObjectType>(Ty));
3796     break;
3797 
3798   case Type::ObjCObjectPointer:
3799     BuildPointerTypeInfo(cast<ObjCObjectPointerType>(Ty)->getPointeeType());
3800     break;
3801 
3802   case Type::Pointer:
3803     BuildPointerTypeInfo(cast<PointerType>(Ty)->getPointeeType());
3804     break;
3805 
3806   case Type::MemberPointer:
3807     BuildPointerToMemberTypeInfo(cast<MemberPointerType>(Ty));
3808     break;
3809 
3810   case Type::Atomic:
3811     // No fields, at least for the moment.
3812     break;
3813   }
3814 
3815   llvm::Constant *Init = llvm::ConstantStruct::getAnon(Fields);
3816 
3817   SmallString<256> Name;
3818   llvm::raw_svector_ostream Out(Name);
3819   CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out);
3820   llvm::Module &M = CGM.getModule();
3821   llvm::GlobalVariable *OldGV = M.getNamedGlobal(Name);
3822   llvm::GlobalVariable *GV =
3823       new llvm::GlobalVariable(M, Init->getType(),
3824                                /*isConstant=*/true, Linkage, Init, Name);
3825 
3826   // Export the typeinfo in the same circumstances as the vtable is exported.
3827   auto GVDLLStorageClass = DLLStorageClass;
3828   if (CGM.getTarget().hasPS4DLLImportExport()) {
3829     if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) {
3830       const CXXRecordDecl *RD = cast<CXXRecordDecl>(RecordTy->getDecl());
3831       if (RD->hasAttr<DLLExportAttr>() ||
3832           CXXRecordAllNonInlineVirtualsHaveAttr<DLLExportAttr>(RD)) {
3833         GVDLLStorageClass = llvm::GlobalVariable::DLLExportStorageClass;
3834       }
3835     }
3836   }
3837 
3838   // If there's already an old global variable, replace it with the new one.
3839   if (OldGV) {
3840     GV->takeName(OldGV);
3841     llvm::Constant *NewPtr =
3842       llvm::ConstantExpr::getBitCast(GV, OldGV->getType());
3843     OldGV->replaceAllUsesWith(NewPtr);
3844     OldGV->eraseFromParent();
3845   }
3846 
3847   if (CGM.supportsCOMDAT() && GV->isWeakForLinker())
3848     GV->setComdat(M.getOrInsertComdat(GV->getName()));
3849 
3850   CharUnits Align =
3851       CGM.getContext().toCharUnitsFromBits(CGM.getTarget().getPointerAlign(0));
3852   GV->setAlignment(Align.getAsAlign());
3853 
3854   // The Itanium ABI specifies that type_info objects must be globally
3855   // unique, with one exception: if the type is an incomplete class
3856   // type or a (possibly indirect) pointer to one.  That exception
3857   // affects the general case of comparing type_info objects produced
3858   // by the typeid operator, which is why the comparison operators on
3859   // std::type_info generally use the type_info name pointers instead
3860   // of the object addresses.  However, the language's built-in uses
3861   // of RTTI generally require class types to be complete, even when
3862   // manipulating pointers to those class types.  This allows the
3863   // implementation of dynamic_cast to rely on address equality tests,
3864   // which is much faster.
3865 
3866   // All of this is to say that it's important that both the type_info
3867   // object and the type_info name be uniqued when weakly emitted.
3868 
3869   TypeName->setVisibility(Visibility);
3870   CGM.setDSOLocal(TypeName);
3871 
3872   GV->setVisibility(Visibility);
3873   CGM.setDSOLocal(GV);
3874 
3875   TypeName->setDLLStorageClass(DLLStorageClass);
3876   GV->setDLLStorageClass(CGM.getTarget().hasPS4DLLImportExport()
3877                              ? GVDLLStorageClass
3878                              : DLLStorageClass);
3879 
3880   TypeName->setPartition(CGM.getCodeGenOpts().SymbolPartition);
3881   GV->setPartition(CGM.getCodeGenOpts().SymbolPartition);
3882 
3883   return llvm::ConstantExpr::getBitCast(GV, CGM.Int8PtrTy);
3884 }
3885 
3886 /// BuildObjCObjectTypeInfo - Build the appropriate kind of type_info
3887 /// for the given Objective-C object type.
BuildObjCObjectTypeInfo(const ObjCObjectType * OT)3888 void ItaniumRTTIBuilder::BuildObjCObjectTypeInfo(const ObjCObjectType *OT) {
3889   // Drop qualifiers.
3890   const Type *T = OT->getBaseType().getTypePtr();
3891   assert(isa<BuiltinType>(T) || isa<ObjCInterfaceType>(T));
3892 
3893   // The builtin types are abi::__class_type_infos and don't require
3894   // extra fields.
3895   if (isa<BuiltinType>(T)) return;
3896 
3897   ObjCInterfaceDecl *Class = cast<ObjCInterfaceType>(T)->getDecl();
3898   ObjCInterfaceDecl *Super = Class->getSuperClass();
3899 
3900   // Root classes are also __class_type_info.
3901   if (!Super) return;
3902 
3903   QualType SuperTy = CGM.getContext().getObjCInterfaceType(Super);
3904 
3905   // Everything else is single inheritance.
3906   llvm::Constant *BaseTypeInfo =
3907       ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(SuperTy);
3908   Fields.push_back(BaseTypeInfo);
3909 }
3910 
3911 /// BuildSIClassTypeInfo - Build an abi::__si_class_type_info, used for single
3912 /// inheritance, according to the Itanium C++ ABI, 2.95p6b.
BuildSIClassTypeInfo(const CXXRecordDecl * RD)3913 void ItaniumRTTIBuilder::BuildSIClassTypeInfo(const CXXRecordDecl *RD) {
3914   // Itanium C++ ABI 2.9.5p6b:
3915   // It adds to abi::__class_type_info a single member pointing to the
3916   // type_info structure for the base type,
3917   llvm::Constant *BaseTypeInfo =
3918     ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(RD->bases_begin()->getType());
3919   Fields.push_back(BaseTypeInfo);
3920 }
3921 
3922 namespace {
3923   /// SeenBases - Contains virtual and non-virtual bases seen when traversing
3924   /// a class hierarchy.
3925   struct SeenBases {
3926     llvm::SmallPtrSet<const CXXRecordDecl *, 16> NonVirtualBases;
3927     llvm::SmallPtrSet<const CXXRecordDecl *, 16> VirtualBases;
3928   };
3929 }
3930 
3931 /// ComputeVMIClassTypeInfoFlags - Compute the value of the flags member in
3932 /// abi::__vmi_class_type_info.
3933 ///
ComputeVMIClassTypeInfoFlags(const CXXBaseSpecifier * Base,SeenBases & Bases)3934 static unsigned ComputeVMIClassTypeInfoFlags(const CXXBaseSpecifier *Base,
3935                                              SeenBases &Bases) {
3936 
3937   unsigned Flags = 0;
3938 
3939   auto *BaseDecl =
3940       cast<CXXRecordDecl>(Base->getType()->castAs<RecordType>()->getDecl());
3941 
3942   if (Base->isVirtual()) {
3943     // Mark the virtual base as seen.
3944     if (!Bases.VirtualBases.insert(BaseDecl).second) {
3945       // If this virtual base has been seen before, then the class is diamond
3946       // shaped.
3947       Flags |= ItaniumRTTIBuilder::VMI_DiamondShaped;
3948     } else {
3949       if (Bases.NonVirtualBases.count(BaseDecl))
3950         Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat;
3951     }
3952   } else {
3953     // Mark the non-virtual base as seen.
3954     if (!Bases.NonVirtualBases.insert(BaseDecl).second) {
3955       // If this non-virtual base has been seen before, then the class has non-
3956       // diamond shaped repeated inheritance.
3957       Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat;
3958     } else {
3959       if (Bases.VirtualBases.count(BaseDecl))
3960         Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat;
3961     }
3962   }
3963 
3964   // Walk all bases.
3965   for (const auto &I : BaseDecl->bases())
3966     Flags |= ComputeVMIClassTypeInfoFlags(&I, Bases);
3967 
3968   return Flags;
3969 }
3970 
ComputeVMIClassTypeInfoFlags(const CXXRecordDecl * RD)3971 static unsigned ComputeVMIClassTypeInfoFlags(const CXXRecordDecl *RD) {
3972   unsigned Flags = 0;
3973   SeenBases Bases;
3974 
3975   // Walk all bases.
3976   for (const auto &I : RD->bases())
3977     Flags |= ComputeVMIClassTypeInfoFlags(&I, Bases);
3978 
3979   return Flags;
3980 }
3981 
3982 /// BuildVMIClassTypeInfo - Build an abi::__vmi_class_type_info, used for
3983 /// classes with bases that do not satisfy the abi::__si_class_type_info
3984 /// constraints, according ti the Itanium C++ ABI, 2.9.5p5c.
BuildVMIClassTypeInfo(const CXXRecordDecl * RD)3985 void ItaniumRTTIBuilder::BuildVMIClassTypeInfo(const CXXRecordDecl *RD) {
3986   llvm::Type *UnsignedIntLTy =
3987     CGM.getTypes().ConvertType(CGM.getContext().UnsignedIntTy);
3988 
3989   // Itanium C++ ABI 2.9.5p6c:
3990   //   __flags is a word with flags describing details about the class
3991   //   structure, which may be referenced by using the __flags_masks
3992   //   enumeration. These flags refer to both direct and indirect bases.
3993   unsigned Flags = ComputeVMIClassTypeInfoFlags(RD);
3994   Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags));
3995 
3996   // Itanium C++ ABI 2.9.5p6c:
3997   //   __base_count is a word with the number of direct proper base class
3998   //   descriptions that follow.
3999   Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, RD->getNumBases()));
4000 
4001   if (!RD->getNumBases())
4002     return;
4003 
4004   // Now add the base class descriptions.
4005 
4006   // Itanium C++ ABI 2.9.5p6c:
4007   //   __base_info[] is an array of base class descriptions -- one for every
4008   //   direct proper base. Each description is of the type:
4009   //
4010   //   struct abi::__base_class_type_info {
4011   //   public:
4012   //     const __class_type_info *__base_type;
4013   //     long __offset_flags;
4014   //
4015   //     enum __offset_flags_masks {
4016   //       __virtual_mask = 0x1,
4017   //       __public_mask = 0x2,
4018   //       __offset_shift = 8
4019   //     };
4020   //   };
4021 
4022   // If we're in mingw and 'long' isn't wide enough for a pointer, use 'long
4023   // long' instead of 'long' for __offset_flags. libstdc++abi uses long long on
4024   // LLP64 platforms.
4025   // FIXME: Consider updating libc++abi to match, and extend this logic to all
4026   // LLP64 platforms.
4027   QualType OffsetFlagsTy = CGM.getContext().LongTy;
4028   const TargetInfo &TI = CGM.getContext().getTargetInfo();
4029   if (TI.getTriple().isOSCygMing() && TI.getPointerWidth(0) > TI.getLongWidth())
4030     OffsetFlagsTy = CGM.getContext().LongLongTy;
4031   llvm::Type *OffsetFlagsLTy =
4032       CGM.getTypes().ConvertType(OffsetFlagsTy);
4033 
4034   for (const auto &Base : RD->bases()) {
4035     // The __base_type member points to the RTTI for the base type.
4036     Fields.push_back(ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(Base.getType()));
4037 
4038     auto *BaseDecl =
4039         cast<CXXRecordDecl>(Base.getType()->castAs<RecordType>()->getDecl());
4040 
4041     int64_t OffsetFlags = 0;
4042 
4043     // All but the lower 8 bits of __offset_flags are a signed offset.
4044     // For a non-virtual base, this is the offset in the object of the base
4045     // subobject. For a virtual base, this is the offset in the virtual table of
4046     // the virtual base offset for the virtual base referenced (negative).
4047     CharUnits Offset;
4048     if (Base.isVirtual())
4049       Offset =
4050         CGM.getItaniumVTableContext().getVirtualBaseOffsetOffset(RD, BaseDecl);
4051     else {
4052       const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
4053       Offset = Layout.getBaseClassOffset(BaseDecl);
4054     };
4055 
4056     OffsetFlags = uint64_t(Offset.getQuantity()) << 8;
4057 
4058     // The low-order byte of __offset_flags contains flags, as given by the
4059     // masks from the enumeration __offset_flags_masks.
4060     if (Base.isVirtual())
4061       OffsetFlags |= BCTI_Virtual;
4062     if (Base.getAccessSpecifier() == AS_public)
4063       OffsetFlags |= BCTI_Public;
4064 
4065     Fields.push_back(llvm::ConstantInt::get(OffsetFlagsLTy, OffsetFlags));
4066   }
4067 }
4068 
4069 /// Compute the flags for a __pbase_type_info, and remove the corresponding
4070 /// pieces from \p Type.
extractPBaseFlags(ASTContext & Ctx,QualType & Type)4071 static unsigned extractPBaseFlags(ASTContext &Ctx, QualType &Type) {
4072   unsigned Flags = 0;
4073 
4074   if (Type.isConstQualified())
4075     Flags |= ItaniumRTTIBuilder::PTI_Const;
4076   if (Type.isVolatileQualified())
4077     Flags |= ItaniumRTTIBuilder::PTI_Volatile;
4078   if (Type.isRestrictQualified())
4079     Flags |= ItaniumRTTIBuilder::PTI_Restrict;
4080   Type = Type.getUnqualifiedType();
4081 
4082   // Itanium C++ ABI 2.9.5p7:
4083   //   When the abi::__pbase_type_info is for a direct or indirect pointer to an
4084   //   incomplete class type, the incomplete target type flag is set.
4085   if (ContainsIncompleteClassType(Type))
4086     Flags |= ItaniumRTTIBuilder::PTI_Incomplete;
4087 
4088   if (auto *Proto = Type->getAs<FunctionProtoType>()) {
4089     if (Proto->isNothrow()) {
4090       Flags |= ItaniumRTTIBuilder::PTI_Noexcept;
4091       Type = Ctx.getFunctionTypeWithExceptionSpec(Type, EST_None);
4092     }
4093   }
4094 
4095   return Flags;
4096 }
4097 
4098 /// BuildPointerTypeInfo - Build an abi::__pointer_type_info struct,
4099 /// used for pointer types.
BuildPointerTypeInfo(QualType PointeeTy)4100 void ItaniumRTTIBuilder::BuildPointerTypeInfo(QualType PointeeTy) {
4101   // Itanium C++ ABI 2.9.5p7:
4102   //   __flags is a flag word describing the cv-qualification and other
4103   //   attributes of the type pointed to
4104   unsigned Flags = extractPBaseFlags(CGM.getContext(), PointeeTy);
4105 
4106   llvm::Type *UnsignedIntLTy =
4107     CGM.getTypes().ConvertType(CGM.getContext().UnsignedIntTy);
4108   Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags));
4109 
4110   // Itanium C++ ABI 2.9.5p7:
4111   //  __pointee is a pointer to the std::type_info derivation for the
4112   //  unqualified type being pointed to.
4113   llvm::Constant *PointeeTypeInfo =
4114       ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(PointeeTy);
4115   Fields.push_back(PointeeTypeInfo);
4116 }
4117 
4118 /// BuildPointerToMemberTypeInfo - Build an abi::__pointer_to_member_type_info
4119 /// struct, used for member pointer types.
4120 void
BuildPointerToMemberTypeInfo(const MemberPointerType * Ty)4121 ItaniumRTTIBuilder::BuildPointerToMemberTypeInfo(const MemberPointerType *Ty) {
4122   QualType PointeeTy = Ty->getPointeeType();
4123 
4124   // Itanium C++ ABI 2.9.5p7:
4125   //   __flags is a flag word describing the cv-qualification and other
4126   //   attributes of the type pointed to.
4127   unsigned Flags = extractPBaseFlags(CGM.getContext(), PointeeTy);
4128 
4129   const RecordType *ClassType = cast<RecordType>(Ty->getClass());
4130   if (IsIncompleteClassType(ClassType))
4131     Flags |= PTI_ContainingClassIncomplete;
4132 
4133   llvm::Type *UnsignedIntLTy =
4134     CGM.getTypes().ConvertType(CGM.getContext().UnsignedIntTy);
4135   Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags));
4136 
4137   // Itanium C++ ABI 2.9.5p7:
4138   //   __pointee is a pointer to the std::type_info derivation for the
4139   //   unqualified type being pointed to.
4140   llvm::Constant *PointeeTypeInfo =
4141       ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(PointeeTy);
4142   Fields.push_back(PointeeTypeInfo);
4143 
4144   // Itanium C++ ABI 2.9.5p9:
4145   //   __context is a pointer to an abi::__class_type_info corresponding to the
4146   //   class type containing the member pointed to
4147   //   (e.g., the "A" in "int A::*").
4148   Fields.push_back(
4149       ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(QualType(ClassType, 0)));
4150 }
4151 
getAddrOfRTTIDescriptor(QualType Ty)4152 llvm::Constant *ItaniumCXXABI::getAddrOfRTTIDescriptor(QualType Ty) {
4153   return ItaniumRTTIBuilder(*this).BuildTypeInfo(Ty);
4154 }
4155 
EmitFundamentalRTTIDescriptors(const CXXRecordDecl * RD)4156 void ItaniumCXXABI::EmitFundamentalRTTIDescriptors(const CXXRecordDecl *RD) {
4157   // Types added here must also be added to TypeInfoIsInStandardLibrary.
4158   QualType FundamentalTypes[] = {
4159       getContext().VoidTy,             getContext().NullPtrTy,
4160       getContext().BoolTy,             getContext().WCharTy,
4161       getContext().CharTy,             getContext().UnsignedCharTy,
4162       getContext().SignedCharTy,       getContext().ShortTy,
4163       getContext().UnsignedShortTy,    getContext().IntTy,
4164       getContext().UnsignedIntTy,      getContext().LongTy,
4165       getContext().UnsignedLongTy,     getContext().LongLongTy,
4166       getContext().UnsignedLongLongTy, getContext().Int128Ty,
4167       getContext().UnsignedInt128Ty,   getContext().HalfTy,
4168       getContext().FloatTy,            getContext().DoubleTy,
4169       getContext().LongDoubleTy,       getContext().Float128Ty,
4170       getContext().Char8Ty,            getContext().Char16Ty,
4171       getContext().Char32Ty
4172   };
4173   llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass =
4174       RD->hasAttr<DLLExportAttr>()
4175       ? llvm::GlobalValue::DLLExportStorageClass
4176       : llvm::GlobalValue::DefaultStorageClass;
4177   llvm::GlobalValue::VisibilityTypes Visibility =
4178       CodeGenModule::GetLLVMVisibility(RD->getVisibility());
4179   for (const QualType &FundamentalType : FundamentalTypes) {
4180     QualType PointerType = getContext().getPointerType(FundamentalType);
4181     QualType PointerTypeConst = getContext().getPointerType(
4182         FundamentalType.withConst());
4183     for (QualType Type : {FundamentalType, PointerType, PointerTypeConst})
4184       ItaniumRTTIBuilder(*this).BuildTypeInfo(
4185           Type, llvm::GlobalValue::ExternalLinkage,
4186           Visibility, DLLStorageClass);
4187   }
4188 }
4189 
4190 /// What sort of uniqueness rules should we use for the RTTI for the
4191 /// given type?
classifyRTTIUniqueness(QualType CanTy,llvm::GlobalValue::LinkageTypes Linkage) const4192 ItaniumCXXABI::RTTIUniquenessKind ItaniumCXXABI::classifyRTTIUniqueness(
4193     QualType CanTy, llvm::GlobalValue::LinkageTypes Linkage) const {
4194   if (shouldRTTIBeUnique())
4195     return RUK_Unique;
4196 
4197   // It's only necessary for linkonce_odr or weak_odr linkage.
4198   if (Linkage != llvm::GlobalValue::LinkOnceODRLinkage &&
4199       Linkage != llvm::GlobalValue::WeakODRLinkage)
4200     return RUK_Unique;
4201 
4202   // It's only necessary with default visibility.
4203   if (CanTy->getVisibility() != DefaultVisibility)
4204     return RUK_Unique;
4205 
4206   // If we're not required to publish this symbol, hide it.
4207   if (Linkage == llvm::GlobalValue::LinkOnceODRLinkage)
4208     return RUK_NonUniqueHidden;
4209 
4210   // If we're required to publish this symbol, as we might be under an
4211   // explicit instantiation, leave it with default visibility but
4212   // enable string-comparisons.
4213   assert(Linkage == llvm::GlobalValue::WeakODRLinkage);
4214   return RUK_NonUniqueVisible;
4215 }
4216 
4217 // Find out how to codegen the complete destructor and constructor
4218 namespace {
4219 enum class StructorCodegen { Emit, RAUW, Alias, COMDAT };
4220 }
getCodegenToUse(CodeGenModule & CGM,const CXXMethodDecl * MD)4221 static StructorCodegen getCodegenToUse(CodeGenModule &CGM,
4222                                        const CXXMethodDecl *MD) {
4223   if (!CGM.getCodeGenOpts().CXXCtorDtorAliases)
4224     return StructorCodegen::Emit;
4225 
4226   // The complete and base structors are not equivalent if there are any virtual
4227   // bases, so emit separate functions.
4228   if (MD->getParent()->getNumVBases())
4229     return StructorCodegen::Emit;
4230 
4231   GlobalDecl AliasDecl;
4232   if (const auto *DD = dyn_cast<CXXDestructorDecl>(MD)) {
4233     AliasDecl = GlobalDecl(DD, Dtor_Complete);
4234   } else {
4235     const auto *CD = cast<CXXConstructorDecl>(MD);
4236     AliasDecl = GlobalDecl(CD, Ctor_Complete);
4237   }
4238   llvm::GlobalValue::LinkageTypes Linkage = CGM.getFunctionLinkage(AliasDecl);
4239 
4240   if (llvm::GlobalValue::isDiscardableIfUnused(Linkage))
4241     return StructorCodegen::RAUW;
4242 
4243   // FIXME: Should we allow available_externally aliases?
4244   if (!llvm::GlobalAlias::isValidLinkage(Linkage))
4245     return StructorCodegen::RAUW;
4246 
4247   if (llvm::GlobalValue::isWeakForLinker(Linkage)) {
4248     // Only ELF and wasm support COMDATs with arbitrary names (C5/D5).
4249     if (CGM.getTarget().getTriple().isOSBinFormatELF() ||
4250         CGM.getTarget().getTriple().isOSBinFormatWasm())
4251       return StructorCodegen::COMDAT;
4252     return StructorCodegen::Emit;
4253   }
4254 
4255   return StructorCodegen::Alias;
4256 }
4257 
emitConstructorDestructorAlias(CodeGenModule & CGM,GlobalDecl AliasDecl,GlobalDecl TargetDecl)4258 static void emitConstructorDestructorAlias(CodeGenModule &CGM,
4259                                            GlobalDecl AliasDecl,
4260                                            GlobalDecl TargetDecl) {
4261   llvm::GlobalValue::LinkageTypes Linkage = CGM.getFunctionLinkage(AliasDecl);
4262 
4263   StringRef MangledName = CGM.getMangledName(AliasDecl);
4264   llvm::GlobalValue *Entry = CGM.GetGlobalValue(MangledName);
4265   if (Entry && !Entry->isDeclaration())
4266     return;
4267 
4268   auto *Aliasee = cast<llvm::GlobalValue>(CGM.GetAddrOfGlobal(TargetDecl));
4269 
4270   // Create the alias with no name.
4271   auto *Alias = llvm::GlobalAlias::create(Linkage, "", Aliasee);
4272 
4273   // Constructors and destructors are always unnamed_addr.
4274   Alias->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
4275 
4276   // Switch any previous uses to the alias.
4277   if (Entry) {
4278     assert(Entry->getType() == Aliasee->getType() &&
4279            "declaration exists with different type");
4280     Alias->takeName(Entry);
4281     Entry->replaceAllUsesWith(Alias);
4282     Entry->eraseFromParent();
4283   } else {
4284     Alias->setName(MangledName);
4285   }
4286 
4287   // Finally, set up the alias with its proper name and attributes.
4288   CGM.SetCommonAttributes(AliasDecl, Alias);
4289 }
4290 
emitCXXStructor(GlobalDecl GD)4291 void ItaniumCXXABI::emitCXXStructor(GlobalDecl GD) {
4292   auto *MD = cast<CXXMethodDecl>(GD.getDecl());
4293   auto *CD = dyn_cast<CXXConstructorDecl>(MD);
4294   const CXXDestructorDecl *DD = CD ? nullptr : cast<CXXDestructorDecl>(MD);
4295 
4296   StructorCodegen CGType = getCodegenToUse(CGM, MD);
4297 
4298   if (CD ? GD.getCtorType() == Ctor_Complete
4299          : GD.getDtorType() == Dtor_Complete) {
4300     GlobalDecl BaseDecl;
4301     if (CD)
4302       BaseDecl = GD.getWithCtorType(Ctor_Base);
4303     else
4304       BaseDecl = GD.getWithDtorType(Dtor_Base);
4305 
4306     if (CGType == StructorCodegen::Alias || CGType == StructorCodegen::COMDAT) {
4307       emitConstructorDestructorAlias(CGM, GD, BaseDecl);
4308       return;
4309     }
4310 
4311     if (CGType == StructorCodegen::RAUW) {
4312       StringRef MangledName = CGM.getMangledName(GD);
4313       auto *Aliasee = CGM.GetAddrOfGlobal(BaseDecl);
4314       CGM.addReplacement(MangledName, Aliasee);
4315       return;
4316     }
4317   }
4318 
4319   // The base destructor is equivalent to the base destructor of its
4320   // base class if there is exactly one non-virtual base class with a
4321   // non-trivial destructor, there are no fields with a non-trivial
4322   // destructor, and the body of the destructor is trivial.
4323   if (DD && GD.getDtorType() == Dtor_Base &&
4324       CGType != StructorCodegen::COMDAT &&
4325       !CGM.TryEmitBaseDestructorAsAlias(DD))
4326     return;
4327 
4328   // FIXME: The deleting destructor is equivalent to the selected operator
4329   // delete if:
4330   //  * either the delete is a destroying operator delete or the destructor
4331   //    would be trivial if it weren't virtual,
4332   //  * the conversion from the 'this' parameter to the first parameter of the
4333   //    destructor is equivalent to a bitcast,
4334   //  * the destructor does not have an implicit "this" return, and
4335   //  * the operator delete has the same calling convention and IR function type
4336   //    as the destructor.
4337   // In such cases we should try to emit the deleting dtor as an alias to the
4338   // selected 'operator delete'.
4339 
4340   llvm::Function *Fn = CGM.codegenCXXStructor(GD);
4341 
4342   if (CGType == StructorCodegen::COMDAT) {
4343     SmallString<256> Buffer;
4344     llvm::raw_svector_ostream Out(Buffer);
4345     if (DD)
4346       getMangleContext().mangleCXXDtorComdat(DD, Out);
4347     else
4348       getMangleContext().mangleCXXCtorComdat(CD, Out);
4349     llvm::Comdat *C = CGM.getModule().getOrInsertComdat(Out.str());
4350     Fn->setComdat(C);
4351   } else {
4352     CGM.maybeSetTrivialComdat(*MD, *Fn);
4353   }
4354 }
4355 
getBeginCatchFn(CodeGenModule & CGM)4356 static llvm::FunctionCallee getBeginCatchFn(CodeGenModule &CGM) {
4357   // void *__cxa_begin_catch(void*);
4358   llvm::FunctionType *FTy = llvm::FunctionType::get(
4359       CGM.Int8PtrTy, CGM.Int8PtrTy, /*isVarArg=*/false);
4360 
4361   return CGM.CreateRuntimeFunction(FTy, "__cxa_begin_catch");
4362 }
4363 
getEndCatchFn(CodeGenModule & CGM)4364 static llvm::FunctionCallee getEndCatchFn(CodeGenModule &CGM) {
4365   // void __cxa_end_catch();
4366   llvm::FunctionType *FTy =
4367       llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
4368 
4369   return CGM.CreateRuntimeFunction(FTy, "__cxa_end_catch");
4370 }
4371 
getGetExceptionPtrFn(CodeGenModule & CGM)4372 static llvm::FunctionCallee getGetExceptionPtrFn(CodeGenModule &CGM) {
4373   // void *__cxa_get_exception_ptr(void*);
4374   llvm::FunctionType *FTy = llvm::FunctionType::get(
4375       CGM.Int8PtrTy, CGM.Int8PtrTy, /*isVarArg=*/false);
4376 
4377   return CGM.CreateRuntimeFunction(FTy, "__cxa_get_exception_ptr");
4378 }
4379 
4380 namespace {
4381   /// A cleanup to call __cxa_end_catch.  In many cases, the caught
4382   /// exception type lets us state definitively that the thrown exception
4383   /// type does not have a destructor.  In particular:
4384   ///   - Catch-alls tell us nothing, so we have to conservatively
4385   ///     assume that the thrown exception might have a destructor.
4386   ///   - Catches by reference behave according to their base types.
4387   ///   - Catches of non-record types will only trigger for exceptions
4388   ///     of non-record types, which never have destructors.
4389   ///   - Catches of record types can trigger for arbitrary subclasses
4390   ///     of the caught type, so we have to assume the actual thrown
4391   ///     exception type might have a throwing destructor, even if the
4392   ///     caught type's destructor is trivial or nothrow.
4393   struct CallEndCatch final : EHScopeStack::Cleanup {
CallEndCatch__anon87ecd9350911::CallEndCatch4394     CallEndCatch(bool MightThrow) : MightThrow(MightThrow) {}
4395     bool MightThrow;
4396 
Emit__anon87ecd9350911::CallEndCatch4397     void Emit(CodeGenFunction &CGF, Flags flags) override {
4398       if (!MightThrow) {
4399         CGF.EmitNounwindRuntimeCall(getEndCatchFn(CGF.CGM));
4400         return;
4401       }
4402 
4403       CGF.EmitRuntimeCallOrInvoke(getEndCatchFn(CGF.CGM));
4404     }
4405   };
4406 }
4407 
4408 /// Emits a call to __cxa_begin_catch and enters a cleanup to call
4409 /// __cxa_end_catch.
4410 ///
4411 /// \param EndMightThrow - true if __cxa_end_catch might throw
CallBeginCatch(CodeGenFunction & CGF,llvm::Value * Exn,bool EndMightThrow)4412 static llvm::Value *CallBeginCatch(CodeGenFunction &CGF,
4413                                    llvm::Value *Exn,
4414                                    bool EndMightThrow) {
4415   llvm::CallInst *call =
4416     CGF.EmitNounwindRuntimeCall(getBeginCatchFn(CGF.CGM), Exn);
4417 
4418   CGF.EHStack.pushCleanup<CallEndCatch>(NormalAndEHCleanup, EndMightThrow);
4419 
4420   return call;
4421 }
4422 
4423 /// A "special initializer" callback for initializing a catch
4424 /// parameter during catch initialization.
InitCatchParam(CodeGenFunction & CGF,const VarDecl & CatchParam,Address ParamAddr,SourceLocation Loc)4425 static void InitCatchParam(CodeGenFunction &CGF,
4426                            const VarDecl &CatchParam,
4427                            Address ParamAddr,
4428                            SourceLocation Loc) {
4429   // Load the exception from where the landing pad saved it.
4430   llvm::Value *Exn = CGF.getExceptionFromSlot();
4431 
4432   CanQualType CatchType =
4433     CGF.CGM.getContext().getCanonicalType(CatchParam.getType());
4434   llvm::Type *LLVMCatchTy = CGF.ConvertTypeForMem(CatchType);
4435 
4436   // If we're catching by reference, we can just cast the object
4437   // pointer to the appropriate pointer.
4438   if (isa<ReferenceType>(CatchType)) {
4439     QualType CaughtType = cast<ReferenceType>(CatchType)->getPointeeType();
4440     bool EndCatchMightThrow = CaughtType->isRecordType();
4441 
4442     // __cxa_begin_catch returns the adjusted object pointer.
4443     llvm::Value *AdjustedExn = CallBeginCatch(CGF, Exn, EndCatchMightThrow);
4444 
4445     // We have no way to tell the personality function that we're
4446     // catching by reference, so if we're catching a pointer,
4447     // __cxa_begin_catch will actually return that pointer by value.
4448     if (const PointerType *PT = dyn_cast<PointerType>(CaughtType)) {
4449       QualType PointeeType = PT->getPointeeType();
4450 
4451       // When catching by reference, generally we should just ignore
4452       // this by-value pointer and use the exception object instead.
4453       if (!PointeeType->isRecordType()) {
4454 
4455         // Exn points to the struct _Unwind_Exception header, which
4456         // we have to skip past in order to reach the exception data.
4457         unsigned HeaderSize =
4458           CGF.CGM.getTargetCodeGenInfo().getSizeOfUnwindException();
4459         AdjustedExn = CGF.Builder.CreateConstGEP1_32(Exn, HeaderSize);
4460 
4461       // However, if we're catching a pointer-to-record type that won't
4462       // work, because the personality function might have adjusted
4463       // the pointer.  There's actually no way for us to fully satisfy
4464       // the language/ABI contract here:  we can't use Exn because it
4465       // might have the wrong adjustment, but we can't use the by-value
4466       // pointer because it's off by a level of abstraction.
4467       //
4468       // The current solution is to dump the adjusted pointer into an
4469       // alloca, which breaks language semantics (because changing the
4470       // pointer doesn't change the exception) but at least works.
4471       // The better solution would be to filter out non-exact matches
4472       // and rethrow them, but this is tricky because the rethrow
4473       // really needs to be catchable by other sites at this landing
4474       // pad.  The best solution is to fix the personality function.
4475       } else {
4476         // Pull the pointer for the reference type off.
4477         llvm::Type *PtrTy =
4478           cast<llvm::PointerType>(LLVMCatchTy)->getElementType();
4479 
4480         // Create the temporary and write the adjusted pointer into it.
4481         Address ExnPtrTmp =
4482           CGF.CreateTempAlloca(PtrTy, CGF.getPointerAlign(), "exn.byref.tmp");
4483         llvm::Value *Casted = CGF.Builder.CreateBitCast(AdjustedExn, PtrTy);
4484         CGF.Builder.CreateStore(Casted, ExnPtrTmp);
4485 
4486         // Bind the reference to the temporary.
4487         AdjustedExn = ExnPtrTmp.getPointer();
4488       }
4489     }
4490 
4491     llvm::Value *ExnCast =
4492       CGF.Builder.CreateBitCast(AdjustedExn, LLVMCatchTy, "exn.byref");
4493     CGF.Builder.CreateStore(ExnCast, ParamAddr);
4494     return;
4495   }
4496 
4497   // Scalars and complexes.
4498   TypeEvaluationKind TEK = CGF.getEvaluationKind(CatchType);
4499   if (TEK != TEK_Aggregate) {
4500     llvm::Value *AdjustedExn = CallBeginCatch(CGF, Exn, false);
4501 
4502     // If the catch type is a pointer type, __cxa_begin_catch returns
4503     // the pointer by value.
4504     if (CatchType->hasPointerRepresentation()) {
4505       llvm::Value *CastExn =
4506         CGF.Builder.CreateBitCast(AdjustedExn, LLVMCatchTy, "exn.casted");
4507 
4508       switch (CatchType.getQualifiers().getObjCLifetime()) {
4509       case Qualifiers::OCL_Strong:
4510         CastExn = CGF.EmitARCRetainNonBlock(CastExn);
4511         LLVM_FALLTHROUGH;
4512 
4513       case Qualifiers::OCL_None:
4514       case Qualifiers::OCL_ExplicitNone:
4515       case Qualifiers::OCL_Autoreleasing:
4516         CGF.Builder.CreateStore(CastExn, ParamAddr);
4517         return;
4518 
4519       case Qualifiers::OCL_Weak:
4520         CGF.EmitARCInitWeak(ParamAddr, CastExn);
4521         return;
4522       }
4523       llvm_unreachable("bad ownership qualifier!");
4524     }
4525 
4526     // Otherwise, it returns a pointer into the exception object.
4527 
4528     llvm::Type *PtrTy = LLVMCatchTy->getPointerTo(0); // addrspace 0 ok
4529     llvm::Value *Cast = CGF.Builder.CreateBitCast(AdjustedExn, PtrTy);
4530 
4531     LValue srcLV = CGF.MakeNaturalAlignAddrLValue(Cast, CatchType);
4532     LValue destLV = CGF.MakeAddrLValue(ParamAddr, CatchType);
4533     switch (TEK) {
4534     case TEK_Complex:
4535       CGF.EmitStoreOfComplex(CGF.EmitLoadOfComplex(srcLV, Loc), destLV,
4536                              /*init*/ true);
4537       return;
4538     case TEK_Scalar: {
4539       llvm::Value *ExnLoad = CGF.EmitLoadOfScalar(srcLV, Loc);
4540       CGF.EmitStoreOfScalar(ExnLoad, destLV, /*init*/ true);
4541       return;
4542     }
4543     case TEK_Aggregate:
4544       llvm_unreachable("evaluation kind filtered out!");
4545     }
4546     llvm_unreachable("bad evaluation kind");
4547   }
4548 
4549   assert(isa<RecordType>(CatchType) && "unexpected catch type!");
4550   auto catchRD = CatchType->getAsCXXRecordDecl();
4551   CharUnits caughtExnAlignment = CGF.CGM.getClassPointerAlignment(catchRD);
4552 
4553   llvm::Type *PtrTy = LLVMCatchTy->getPointerTo(0); // addrspace 0 ok
4554 
4555   // Check for a copy expression.  If we don't have a copy expression,
4556   // that means a trivial copy is okay.
4557   const Expr *copyExpr = CatchParam.getInit();
4558   if (!copyExpr) {
4559     llvm::Value *rawAdjustedExn = CallBeginCatch(CGF, Exn, true);
4560     Address adjustedExn(CGF.Builder.CreateBitCast(rawAdjustedExn, PtrTy),
4561                         caughtExnAlignment);
4562     LValue Dest = CGF.MakeAddrLValue(ParamAddr, CatchType);
4563     LValue Src = CGF.MakeAddrLValue(adjustedExn, CatchType);
4564     CGF.EmitAggregateCopy(Dest, Src, CatchType, AggValueSlot::DoesNotOverlap);
4565     return;
4566   }
4567 
4568   // We have to call __cxa_get_exception_ptr to get the adjusted
4569   // pointer before copying.
4570   llvm::CallInst *rawAdjustedExn =
4571     CGF.EmitNounwindRuntimeCall(getGetExceptionPtrFn(CGF.CGM), Exn);
4572 
4573   // Cast that to the appropriate type.
4574   Address adjustedExn(CGF.Builder.CreateBitCast(rawAdjustedExn, PtrTy),
4575                       caughtExnAlignment);
4576 
4577   // The copy expression is defined in terms of an OpaqueValueExpr.
4578   // Find it and map it to the adjusted expression.
4579   CodeGenFunction::OpaqueValueMapping
4580     opaque(CGF, OpaqueValueExpr::findInCopyConstruct(copyExpr),
4581            CGF.MakeAddrLValue(adjustedExn, CatchParam.getType()));
4582 
4583   // Call the copy ctor in a terminate scope.
4584   CGF.EHStack.pushTerminate();
4585 
4586   // Perform the copy construction.
4587   CGF.EmitAggExpr(copyExpr,
4588                   AggValueSlot::forAddr(ParamAddr, Qualifiers(),
4589                                         AggValueSlot::IsNotDestructed,
4590                                         AggValueSlot::DoesNotNeedGCBarriers,
4591                                         AggValueSlot::IsNotAliased,
4592                                         AggValueSlot::DoesNotOverlap));
4593 
4594   // Leave the terminate scope.
4595   CGF.EHStack.popTerminate();
4596 
4597   // Undo the opaque value mapping.
4598   opaque.pop();
4599 
4600   // Finally we can call __cxa_begin_catch.
4601   CallBeginCatch(CGF, Exn, true);
4602 }
4603 
4604 /// Begins a catch statement by initializing the catch variable and
4605 /// calling __cxa_begin_catch.
emitBeginCatch(CodeGenFunction & CGF,const CXXCatchStmt * S)4606 void ItaniumCXXABI::emitBeginCatch(CodeGenFunction &CGF,
4607                                    const CXXCatchStmt *S) {
4608   // We have to be very careful with the ordering of cleanups here:
4609   //   C++ [except.throw]p4:
4610   //     The destruction [of the exception temporary] occurs
4611   //     immediately after the destruction of the object declared in
4612   //     the exception-declaration in the handler.
4613   //
4614   // So the precise ordering is:
4615   //   1.  Construct catch variable.
4616   //   2.  __cxa_begin_catch
4617   //   3.  Enter __cxa_end_catch cleanup
4618   //   4.  Enter dtor cleanup
4619   //
4620   // We do this by using a slightly abnormal initialization process.
4621   // Delegation sequence:
4622   //   - ExitCXXTryStmt opens a RunCleanupsScope
4623   //     - EmitAutoVarAlloca creates the variable and debug info
4624   //       - InitCatchParam initializes the variable from the exception
4625   //       - CallBeginCatch calls __cxa_begin_catch
4626   //       - CallBeginCatch enters the __cxa_end_catch cleanup
4627   //     - EmitAutoVarCleanups enters the variable destructor cleanup
4628   //   - EmitCXXTryStmt emits the code for the catch body
4629   //   - EmitCXXTryStmt close the RunCleanupsScope
4630 
4631   VarDecl *CatchParam = S->getExceptionDecl();
4632   if (!CatchParam) {
4633     llvm::Value *Exn = CGF.getExceptionFromSlot();
4634     CallBeginCatch(CGF, Exn, true);
4635     return;
4636   }
4637 
4638   // Emit the local.
4639   CodeGenFunction::AutoVarEmission var = CGF.EmitAutoVarAlloca(*CatchParam);
4640   InitCatchParam(CGF, *CatchParam, var.getObjectAddress(CGF), S->getBeginLoc());
4641   CGF.EmitAutoVarCleanups(var);
4642 }
4643 
4644 /// Get or define the following function:
4645 ///   void @__clang_call_terminate(i8* %exn) nounwind noreturn
4646 /// This code is used only in C++.
getClangCallTerminateFn(CodeGenModule & CGM)4647 static llvm::FunctionCallee getClangCallTerminateFn(CodeGenModule &CGM) {
4648   llvm::FunctionType *fnTy =
4649     llvm::FunctionType::get(CGM.VoidTy, CGM.Int8PtrTy, /*isVarArg=*/false);
4650   llvm::FunctionCallee fnRef = CGM.CreateRuntimeFunction(
4651       fnTy, "__clang_call_terminate", llvm::AttributeList(), /*Local=*/true);
4652   llvm::Function *fn =
4653       cast<llvm::Function>(fnRef.getCallee()->stripPointerCasts());
4654   if (fn->empty()) {
4655     fn->setDoesNotThrow();
4656     fn->setDoesNotReturn();
4657 
4658     // What we really want is to massively penalize inlining without
4659     // forbidding it completely.  The difference between that and
4660     // 'noinline' is negligible.
4661     fn->addFnAttr(llvm::Attribute::NoInline);
4662 
4663     // Allow this function to be shared across translation units, but
4664     // we don't want it to turn into an exported symbol.
4665     fn->setLinkage(llvm::Function::LinkOnceODRLinkage);
4666     fn->setVisibility(llvm::Function::HiddenVisibility);
4667     if (CGM.supportsCOMDAT())
4668       fn->setComdat(CGM.getModule().getOrInsertComdat(fn->getName()));
4669 
4670     // Set up the function.
4671     llvm::BasicBlock *entry =
4672         llvm::BasicBlock::Create(CGM.getLLVMContext(), "", fn);
4673     CGBuilderTy builder(CGM, entry);
4674 
4675     // Pull the exception pointer out of the parameter list.
4676     llvm::Value *exn = &*fn->arg_begin();
4677 
4678     // Call __cxa_begin_catch(exn).
4679     llvm::CallInst *catchCall = builder.CreateCall(getBeginCatchFn(CGM), exn);
4680     catchCall->setDoesNotThrow();
4681     catchCall->setCallingConv(CGM.getRuntimeCC());
4682 
4683     // Call std::terminate().
4684     llvm::CallInst *termCall = builder.CreateCall(CGM.getTerminateFn());
4685     termCall->setDoesNotThrow();
4686     termCall->setDoesNotReturn();
4687     termCall->setCallingConv(CGM.getRuntimeCC());
4688 
4689     // std::terminate cannot return.
4690     builder.CreateUnreachable();
4691   }
4692   return fnRef;
4693 }
4694 
4695 llvm::CallInst *
emitTerminateForUnexpectedException(CodeGenFunction & CGF,llvm::Value * Exn)4696 ItaniumCXXABI::emitTerminateForUnexpectedException(CodeGenFunction &CGF,
4697                                                    llvm::Value *Exn) {
4698   // In C++, we want to call __cxa_begin_catch() before terminating.
4699   if (Exn) {
4700     assert(CGF.CGM.getLangOpts().CPlusPlus);
4701     return CGF.EmitNounwindRuntimeCall(getClangCallTerminateFn(CGF.CGM), Exn);
4702   }
4703   return CGF.EmitNounwindRuntimeCall(CGF.CGM.getTerminateFn());
4704 }
4705 
4706 std::pair<llvm::Value *, const CXXRecordDecl *>
LoadVTablePtr(CodeGenFunction & CGF,Address This,const CXXRecordDecl * RD)4707 ItaniumCXXABI::LoadVTablePtr(CodeGenFunction &CGF, Address This,
4708                              const CXXRecordDecl *RD) {
4709   return {CGF.GetVTablePtr(This, CGM.Int8PtrTy, RD), RD};
4710 }
4711 
emitBeginCatch(CodeGenFunction & CGF,const CXXCatchStmt * C)4712 void WebAssemblyCXXABI::emitBeginCatch(CodeGenFunction &CGF,
4713                                        const CXXCatchStmt *C) {
4714   if (CGF.getTarget().hasFeature("exception-handling"))
4715     CGF.EHStack.pushCleanup<CatchRetScope>(
4716         NormalCleanup, cast<llvm::CatchPadInst>(CGF.CurrentFuncletPad));
4717   ItaniumCXXABI::emitBeginCatch(CGF, C);
4718 }
4719 
4720 llvm::CallInst *
emitTerminateForUnexpectedException(CodeGenFunction & CGF,llvm::Value * Exn)4721 WebAssemblyCXXABI::emitTerminateForUnexpectedException(CodeGenFunction &CGF,
4722                                                        llvm::Value *Exn) {
4723   // Itanium ABI calls __clang_call_terminate(), which __cxa_begin_catch() on
4724   // the violating exception to mark it handled, but it is currently hard to do
4725   // with wasm EH instruction structure with catch/catch_all, we just call
4726   // std::terminate and ignore the violating exception as in CGCXXABI.
4727   // TODO Consider code transformation that makes calling __clang_call_terminate
4728   // possible.
4729   return CGCXXABI::emitTerminateForUnexpectedException(CGF, Exn);
4730 }
4731 
4732 /// Register a global destructor as best as we know how.
registerGlobalDtor(CodeGenFunction & CGF,const VarDecl & D,llvm::FunctionCallee dtor,llvm::Constant * addr)4733 void XLCXXABI::registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
4734                                   llvm::FunctionCallee dtor,
4735                                   llvm::Constant *addr) {
4736   if (D.getTLSKind() != VarDecl::TLS_None)
4737     llvm::report_fatal_error("thread local storage not yet implemented on AIX");
4738 
4739   // Create __dtor function for the var decl.
4740   llvm::Function *dtorStub = CGF.createAtExitStub(D, dtor, addr);
4741 
4742   // Register above __dtor with atexit().
4743   CGF.registerGlobalDtorWithAtExit(dtorStub);
4744 
4745   // Emit __finalize function to unregister __dtor and (as appropriate) call
4746   // __dtor.
4747   emitCXXStermFinalizer(D, dtorStub, addr);
4748 }
4749 
emitCXXStermFinalizer(const VarDecl & D,llvm::Function * dtorStub,llvm::Constant * addr)4750 void XLCXXABI::emitCXXStermFinalizer(const VarDecl &D, llvm::Function *dtorStub,
4751                                      llvm::Constant *addr) {
4752   llvm::FunctionType *FTy = llvm::FunctionType::get(CGM.VoidTy, false);
4753   SmallString<256> FnName;
4754   {
4755     llvm::raw_svector_ostream Out(FnName);
4756     getMangleContext().mangleDynamicStermFinalizer(&D, Out);
4757   }
4758 
4759   // Create the finalization action associated with a variable.
4760   const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
4761   llvm::Function *StermFinalizer = CGM.CreateGlobalInitOrCleanUpFunction(
4762       FTy, FnName.str(), FI, D.getLocation());
4763 
4764   CodeGenFunction CGF(CGM);
4765 
4766   CGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, StermFinalizer, FI,
4767                     FunctionArgList(), D.getLocation(),
4768                     D.getInit()->getExprLoc());
4769 
4770   // The unatexit subroutine unregisters __dtor functions that were previously
4771   // registered by the atexit subroutine. If the referenced function is found,
4772   // the unatexit returns a value of 0, meaning that the cleanup is still
4773   // pending (and we should call the __dtor function).
4774   llvm::Value *V = CGF.unregisterGlobalDtorWithUnAtExit(dtorStub);
4775 
4776   llvm::Value *NeedsDestruct = CGF.Builder.CreateIsNull(V, "needs_destruct");
4777 
4778   llvm::BasicBlock *DestructCallBlock = CGF.createBasicBlock("destruct.call");
4779   llvm::BasicBlock *EndBlock = CGF.createBasicBlock("destruct.end");
4780 
4781   // Check if unatexit returns a value of 0. If it does, jump to
4782   // DestructCallBlock, otherwise jump to EndBlock directly.
4783   CGF.Builder.CreateCondBr(NeedsDestruct, DestructCallBlock, EndBlock);
4784 
4785   CGF.EmitBlock(DestructCallBlock);
4786 
4787   // Emit the call to dtorStub.
4788   llvm::CallInst *CI = CGF.Builder.CreateCall(dtorStub);
4789 
4790   // Make sure the call and the callee agree on calling convention.
4791   CI->setCallingConv(dtorStub->getCallingConv());
4792 
4793   CGF.EmitBlock(EndBlock);
4794 
4795   CGF.FinishFunction();
4796 
4797   if (auto *IPA = D.getAttr<InitPriorityAttr>()) {
4798     CGM.AddCXXPrioritizedStermFinalizerEntry(StermFinalizer,
4799                                              IPA->getPriority());
4800   } else if (isTemplateInstantiation(D.getTemplateSpecializationKind()) ||
4801              getContext().GetGVALinkageForVariable(&D) == GVA_DiscardableODR) {
4802     // According to C++ [basic.start.init]p2, class template static data
4803     // members (i.e., implicitly or explicitly instantiated specializations)
4804     // have unordered initialization. As a consequence, we can put them into
4805     // their own llvm.global_dtors entry.
4806     CGM.AddCXXStermFinalizerToGlobalDtor(StermFinalizer, 65535);
4807   } else {
4808     CGM.AddCXXStermFinalizerEntry(StermFinalizer);
4809   }
4810 }
4811