1 //===------- ItaniumCXXABI.cpp - Emit LLVM Code from ASTs for a Module ----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This provides C++ code generation targeting the Itanium C++ ABI.  The class
10 // in this file generates structures that follow the Itanium C++ ABI, which is
11 // documented at:
12 //  http://www.codesourcery.com/public/cxx-abi/abi.html
13 //  http://www.codesourcery.com/public/cxx-abi/abi-eh.html
14 //
15 // It also supports the closely-related ARM ABI, documented at:
16 // http://infocenter.arm.com/help/topic/com.arm.doc.ihi0041c/IHI0041C_cppabi.pdf
17 //
18 //===----------------------------------------------------------------------===//
19 
20 #include "CGCXXABI.h"
21 #include "CGCleanup.h"
22 #include "CGRecordLayout.h"
23 #include "CGVTables.h"
24 #include "CodeGenFunction.h"
25 #include "CodeGenModule.h"
26 #include "TargetInfo.h"
27 #include "clang/AST/Attr.h"
28 #include "clang/AST/Mangle.h"
29 #include "clang/AST/StmtCXX.h"
30 #include "clang/AST/Type.h"
31 #include "clang/CodeGen/ConstantInitBuilder.h"
32 #include "llvm/IR/Constants.h"
33 #include "llvm/IR/DataLayout.h"
34 #include "llvm/IR/GlobalValue.h"
35 #include "llvm/IR/Instructions.h"
36 #include "llvm/IR/Intrinsics.h"
37 #include "llvm/IR/Value.h"
38 #include "llvm/Support/ScopedPrinter.h"
39 
40 using namespace clang;
41 using namespace CodeGen;
42 
43 namespace {
44 class ItaniumCXXABI : public CodeGen::CGCXXABI {
45   /// VTables - All the vtables which have been defined.
46   llvm::DenseMap<const CXXRecordDecl *, llvm::GlobalVariable *> VTables;
47 
48   /// All the thread wrapper functions that have been used.
49   llvm::SmallVector<std::pair<const VarDecl *, llvm::Function *>, 8>
50       ThreadWrappers;
51 
52 protected:
53   bool UseARMMethodPtrABI;
54   bool UseARMGuardVarABI;
55   bool Use32BitVTableOffsetABI;
56 
getMangleContext()57   ItaniumMangleContext &getMangleContext() {
58     return cast<ItaniumMangleContext>(CodeGen::CGCXXABI::getMangleContext());
59   }
60 
61 public:
ItaniumCXXABI(CodeGen::CodeGenModule & CGM,bool UseARMMethodPtrABI=false,bool UseARMGuardVarABI=false)62   ItaniumCXXABI(CodeGen::CodeGenModule &CGM,
63                 bool UseARMMethodPtrABI = false,
64                 bool UseARMGuardVarABI = false) :
65     CGCXXABI(CGM), UseARMMethodPtrABI(UseARMMethodPtrABI),
66     UseARMGuardVarABI(UseARMGuardVarABI),
67     Use32BitVTableOffsetABI(false) { }
68 
69   bool classifyReturnType(CGFunctionInfo &FI) const override;
70 
getRecordArgABI(const CXXRecordDecl * RD) const71   RecordArgABI getRecordArgABI(const CXXRecordDecl *RD) const override {
72     // If C++ prohibits us from making a copy, pass by address.
73     if (!RD->canPassInRegisters())
74       return RAA_Indirect;
75     return RAA_Default;
76   }
77 
isThisCompleteObject(GlobalDecl GD) const78   bool isThisCompleteObject(GlobalDecl GD) const override {
79     // The Itanium ABI has separate complete-object vs.  base-object
80     // variants of both constructors and destructors.
81     if (isa<CXXDestructorDecl>(GD.getDecl())) {
82       switch (GD.getDtorType()) {
83       case Dtor_Complete:
84       case Dtor_Deleting:
85         return true;
86 
87       case Dtor_Base:
88         return false;
89 
90       case Dtor_Comdat:
91         llvm_unreachable("emitting dtor comdat as function?");
92       }
93       llvm_unreachable("bad dtor kind");
94     }
95     if (isa<CXXConstructorDecl>(GD.getDecl())) {
96       switch (GD.getCtorType()) {
97       case Ctor_Complete:
98         return true;
99 
100       case Ctor_Base:
101         return false;
102 
103       case Ctor_CopyingClosure:
104       case Ctor_DefaultClosure:
105         llvm_unreachable("closure ctors in Itanium ABI?");
106 
107       case Ctor_Comdat:
108         llvm_unreachable("emitting ctor comdat as function?");
109       }
110       llvm_unreachable("bad dtor kind");
111     }
112 
113     // No other kinds.
114     return false;
115   }
116 
117   bool isZeroInitializable(const MemberPointerType *MPT) override;
118 
119   llvm::Type *ConvertMemberPointerType(const MemberPointerType *MPT) override;
120 
121   CGCallee
122     EmitLoadOfMemberFunctionPointer(CodeGenFunction &CGF,
123                                     const Expr *E,
124                                     Address This,
125                                     llvm::Value *&ThisPtrForCall,
126                                     llvm::Value *MemFnPtr,
127                                     const MemberPointerType *MPT) override;
128 
129   llvm::Value *
130     EmitMemberDataPointerAddress(CodeGenFunction &CGF, const Expr *E,
131                                  Address Base,
132                                  llvm::Value *MemPtr,
133                                  const MemberPointerType *MPT) override;
134 
135   llvm::Value *EmitMemberPointerConversion(CodeGenFunction &CGF,
136                                            const CastExpr *E,
137                                            llvm::Value *Src) override;
138   llvm::Constant *EmitMemberPointerConversion(const CastExpr *E,
139                                               llvm::Constant *Src) override;
140 
141   llvm::Constant *EmitNullMemberPointer(const MemberPointerType *MPT) override;
142 
143   llvm::Constant *EmitMemberFunctionPointer(const CXXMethodDecl *MD) override;
144   llvm::Constant *EmitMemberDataPointer(const MemberPointerType *MPT,
145                                         CharUnits offset) override;
146   llvm::Constant *EmitMemberPointer(const APValue &MP, QualType MPT) override;
147   llvm::Constant *BuildMemberPointer(const CXXMethodDecl *MD,
148                                      CharUnits ThisAdjustment);
149 
150   llvm::Value *EmitMemberPointerComparison(CodeGenFunction &CGF,
151                                            llvm::Value *L, llvm::Value *R,
152                                            const MemberPointerType *MPT,
153                                            bool Inequality) override;
154 
155   llvm::Value *EmitMemberPointerIsNotNull(CodeGenFunction &CGF,
156                                          llvm::Value *Addr,
157                                          const MemberPointerType *MPT) override;
158 
159   void emitVirtualObjectDelete(CodeGenFunction &CGF, const CXXDeleteExpr *DE,
160                                Address Ptr, QualType ElementType,
161                                const CXXDestructorDecl *Dtor) override;
162 
163   void emitRethrow(CodeGenFunction &CGF, bool isNoReturn) override;
164   void emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) override;
165 
166   void emitBeginCatch(CodeGenFunction &CGF, const CXXCatchStmt *C) override;
167 
168   llvm::CallInst *
169   emitTerminateForUnexpectedException(CodeGenFunction &CGF,
170                                       llvm::Value *Exn) override;
171 
172   void EmitFundamentalRTTIDescriptors(const CXXRecordDecl *RD);
173   llvm::Constant *getAddrOfRTTIDescriptor(QualType Ty) override;
174   CatchTypeInfo
getAddrOfCXXCatchHandlerType(QualType Ty,QualType CatchHandlerType)175   getAddrOfCXXCatchHandlerType(QualType Ty,
176                                QualType CatchHandlerType) override {
177     return CatchTypeInfo{getAddrOfRTTIDescriptor(Ty), 0};
178   }
179 
180   bool shouldTypeidBeNullChecked(bool IsDeref, QualType SrcRecordTy) override;
181   void EmitBadTypeidCall(CodeGenFunction &CGF) override;
182   llvm::Value *EmitTypeid(CodeGenFunction &CGF, QualType SrcRecordTy,
183                           Address ThisPtr,
184                           llvm::Type *StdTypeInfoPtrTy) override;
185 
186   bool shouldDynamicCastCallBeNullChecked(bool SrcIsPtr,
187                                           QualType SrcRecordTy) override;
188 
189   llvm::Value *EmitDynamicCastCall(CodeGenFunction &CGF, Address Value,
190                                    QualType SrcRecordTy, QualType DestTy,
191                                    QualType DestRecordTy,
192                                    llvm::BasicBlock *CastEnd) override;
193 
194   llvm::Value *EmitDynamicCastToVoid(CodeGenFunction &CGF, Address Value,
195                                      QualType SrcRecordTy,
196                                      QualType DestTy) override;
197 
198   bool EmitBadCastCall(CodeGenFunction &CGF) override;
199 
200   llvm::Value *
201     GetVirtualBaseClassOffset(CodeGenFunction &CGF, Address This,
202                               const CXXRecordDecl *ClassDecl,
203                               const CXXRecordDecl *BaseClassDecl) override;
204 
205   void EmitCXXConstructors(const CXXConstructorDecl *D) override;
206 
207   AddedStructorArgCounts
208   buildStructorSignature(GlobalDecl GD,
209                          SmallVectorImpl<CanQualType> &ArgTys) override;
210 
useThunkForDtorVariant(const CXXDestructorDecl * Dtor,CXXDtorType DT) const211   bool useThunkForDtorVariant(const CXXDestructorDecl *Dtor,
212                               CXXDtorType DT) const override {
213     // Itanium does not emit any destructor variant as an inline thunk.
214     // Delegating may occur as an optimization, but all variants are either
215     // emitted with external linkage or as linkonce if they are inline and used.
216     return false;
217   }
218 
219   void EmitCXXDestructors(const CXXDestructorDecl *D) override;
220 
221   void addImplicitStructorParams(CodeGenFunction &CGF, QualType &ResTy,
222                                  FunctionArgList &Params) override;
223 
224   void EmitInstanceFunctionProlog(CodeGenFunction &CGF) override;
225 
226   AddedStructorArgs getImplicitConstructorArgs(CodeGenFunction &CGF,
227                                                const CXXConstructorDecl *D,
228                                                CXXCtorType Type,
229                                                bool ForVirtualBase,
230                                                bool Delegating) override;
231 
232   llvm::Value *getCXXDestructorImplicitParam(CodeGenFunction &CGF,
233                                              const CXXDestructorDecl *DD,
234                                              CXXDtorType Type,
235                                              bool ForVirtualBase,
236                                              bool Delegating) override;
237 
238   void EmitDestructorCall(CodeGenFunction &CGF, const CXXDestructorDecl *DD,
239                           CXXDtorType Type, bool ForVirtualBase,
240                           bool Delegating, Address This,
241                           QualType ThisTy) override;
242 
243   void emitVTableDefinitions(CodeGenVTables &CGVT,
244                              const CXXRecordDecl *RD) override;
245 
246   bool isVirtualOffsetNeededForVTableField(CodeGenFunction &CGF,
247                                            CodeGenFunction::VPtr Vptr) override;
248 
doStructorsInitializeVPtrs(const CXXRecordDecl * VTableClass)249   bool doStructorsInitializeVPtrs(const CXXRecordDecl *VTableClass) override {
250     return true;
251   }
252 
253   llvm::Constant *
254   getVTableAddressPoint(BaseSubobject Base,
255                         const CXXRecordDecl *VTableClass) override;
256 
257   llvm::Value *getVTableAddressPointInStructor(
258       CodeGenFunction &CGF, const CXXRecordDecl *VTableClass,
259       BaseSubobject Base, const CXXRecordDecl *NearestVBase) override;
260 
261   llvm::Value *getVTableAddressPointInStructorWithVTT(
262       CodeGenFunction &CGF, const CXXRecordDecl *VTableClass,
263       BaseSubobject Base, const CXXRecordDecl *NearestVBase);
264 
265   llvm::Constant *
266   getVTableAddressPointForConstExpr(BaseSubobject Base,
267                                     const CXXRecordDecl *VTableClass) override;
268 
269   llvm::GlobalVariable *getAddrOfVTable(const CXXRecordDecl *RD,
270                                         CharUnits VPtrOffset) override;
271 
272   CGCallee getVirtualFunctionPointer(CodeGenFunction &CGF, GlobalDecl GD,
273                                      Address This, llvm::Type *Ty,
274                                      SourceLocation Loc) override;
275 
276   llvm::Value *EmitVirtualDestructorCall(CodeGenFunction &CGF,
277                                          const CXXDestructorDecl *Dtor,
278                                          CXXDtorType DtorType, Address This,
279                                          DeleteOrMemberCallExpr E) override;
280 
281   void emitVirtualInheritanceTables(const CXXRecordDecl *RD) override;
282 
283   bool canSpeculativelyEmitVTable(const CXXRecordDecl *RD) const override;
284   bool canSpeculativelyEmitVTableAsBaseClass(const CXXRecordDecl *RD) const;
285 
setThunkLinkage(llvm::Function * Thunk,bool ForVTable,GlobalDecl GD,bool ReturnAdjustment)286   void setThunkLinkage(llvm::Function *Thunk, bool ForVTable, GlobalDecl GD,
287                        bool ReturnAdjustment) override {
288     // Allow inlining of thunks by emitting them with available_externally
289     // linkage together with vtables when needed.
290     if (ForVTable && !Thunk->hasLocalLinkage())
291       Thunk->setLinkage(llvm::GlobalValue::AvailableExternallyLinkage);
292     CGM.setGVProperties(Thunk, GD);
293   }
294 
exportThunk()295   bool exportThunk() override { return true; }
296 
297   llvm::Value *performThisAdjustment(CodeGenFunction &CGF, Address This,
298                                      const ThisAdjustment &TA) override;
299 
300   llvm::Value *performReturnAdjustment(CodeGenFunction &CGF, Address Ret,
301                                        const ReturnAdjustment &RA) override;
302 
getSrcArgforCopyCtor(const CXXConstructorDecl *,FunctionArgList & Args) const303   size_t getSrcArgforCopyCtor(const CXXConstructorDecl *,
304                               FunctionArgList &Args) const override {
305     assert(!Args.empty() && "expected the arglist to not be empty!");
306     return Args.size() - 1;
307   }
308 
GetPureVirtualCallName()309   StringRef GetPureVirtualCallName() override { return "__cxa_pure_virtual"; }
GetDeletedVirtualCallName()310   StringRef GetDeletedVirtualCallName() override
311     { return "__cxa_deleted_virtual"; }
312 
313   CharUnits getArrayCookieSizeImpl(QualType elementType) override;
314   Address InitializeArrayCookie(CodeGenFunction &CGF,
315                                 Address NewPtr,
316                                 llvm::Value *NumElements,
317                                 const CXXNewExpr *expr,
318                                 QualType ElementType) override;
319   llvm::Value *readArrayCookieImpl(CodeGenFunction &CGF,
320                                    Address allocPtr,
321                                    CharUnits cookieSize) override;
322 
323   void EmitGuardedInit(CodeGenFunction &CGF, const VarDecl &D,
324                        llvm::GlobalVariable *DeclPtr,
325                        bool PerformInit) override;
326   void registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
327                           llvm::FunctionCallee dtor,
328                           llvm::Constant *addr) override;
329 
330   llvm::Function *getOrCreateThreadLocalWrapper(const VarDecl *VD,
331                                                 llvm::Value *Val);
332   void EmitThreadLocalInitFuncs(
333       CodeGenModule &CGM,
334       ArrayRef<const VarDecl *> CXXThreadLocals,
335       ArrayRef<llvm::Function *> CXXThreadLocalInits,
336       ArrayRef<const VarDecl *> CXXThreadLocalInitVars) override;
337 
338   /// Determine whether we will definitely emit this variable with a constant
339   /// initializer, either because the language semantics demand it or because
340   /// we know that the initializer is a constant.
isEmittedWithConstantInitializer(const VarDecl * VD) const341   bool isEmittedWithConstantInitializer(const VarDecl *VD) const {
342     VD = VD->getMostRecentDecl();
343     if (VD->hasAttr<ConstInitAttr>())
344       return true;
345 
346     // All later checks examine the initializer specified on the variable. If
347     // the variable is weak, such examination would not be correct.
348     if (VD->isWeak() || VD->hasAttr<SelectAnyAttr>())
349       return false;
350 
351     const VarDecl *InitDecl = VD->getInitializingDeclaration();
352     if (!InitDecl)
353       return false;
354 
355     // If there's no initializer to run, this is constant initialization.
356     if (!InitDecl->hasInit())
357       return true;
358 
359     // If we have the only definition, we don't need a thread wrapper if we
360     // will emit the value as a constant.
361     if (isUniqueGVALinkage(getContext().GetGVALinkageForVariable(VD)))
362       return !VD->needsDestruction(getContext()) && InitDecl->evaluateValue();
363 
364     // Otherwise, we need a thread wrapper unless we know that every
365     // translation unit will emit the value as a constant. We rely on
366     // ICE-ness not varying between translation units, which isn't actually
367     // guaranteed by the standard but is necessary for sanity.
368     return InitDecl->isInitKnownICE() && InitDecl->isInitICE();
369   }
370 
usesThreadWrapperFunction(const VarDecl * VD) const371   bool usesThreadWrapperFunction(const VarDecl *VD) const override {
372     return !isEmittedWithConstantInitializer(VD) ||
373            VD->needsDestruction(getContext());
374   }
375   LValue EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF, const VarDecl *VD,
376                                       QualType LValType) override;
377 
378   bool NeedsVTTParameter(GlobalDecl GD) override;
379 
380   /**************************** RTTI Uniqueness ******************************/
381 
382 protected:
383   /// Returns true if the ABI requires RTTI type_info objects to be unique
384   /// across a program.
shouldRTTIBeUnique() const385   virtual bool shouldRTTIBeUnique() const { return true; }
386 
387 public:
388   /// What sort of unique-RTTI behavior should we use?
389   enum RTTIUniquenessKind {
390     /// We are guaranteeing, or need to guarantee, that the RTTI string
391     /// is unique.
392     RUK_Unique,
393 
394     /// We are not guaranteeing uniqueness for the RTTI string, so we
395     /// can demote to hidden visibility but must use string comparisons.
396     RUK_NonUniqueHidden,
397 
398     /// We are not guaranteeing uniqueness for the RTTI string, so we
399     /// have to use string comparisons, but we also have to emit it with
400     /// non-hidden visibility.
401     RUK_NonUniqueVisible
402   };
403 
404   /// Return the required visibility status for the given type and linkage in
405   /// the current ABI.
406   RTTIUniquenessKind
407   classifyRTTIUniqueness(QualType CanTy,
408                          llvm::GlobalValue::LinkageTypes Linkage) const;
409   friend class ItaniumRTTIBuilder;
410 
411   void emitCXXStructor(GlobalDecl GD) override;
412 
413   std::pair<llvm::Value *, const CXXRecordDecl *>
414   LoadVTablePtr(CodeGenFunction &CGF, Address This,
415                 const CXXRecordDecl *RD) override;
416 
417  private:
hasAnyUnusedVirtualInlineFunction(const CXXRecordDecl * RD) const418    bool hasAnyUnusedVirtualInlineFunction(const CXXRecordDecl *RD) const {
419      const auto &VtableLayout =
420          CGM.getItaniumVTableContext().getVTableLayout(RD);
421 
422      for (const auto &VtableComponent : VtableLayout.vtable_components()) {
423        // Skip empty slot.
424        if (!VtableComponent.isUsedFunctionPointerKind())
425          continue;
426 
427        const CXXMethodDecl *Method = VtableComponent.getFunctionDecl();
428        if (!Method->getCanonicalDecl()->isInlined())
429          continue;
430 
431        StringRef Name = CGM.getMangledName(VtableComponent.getGlobalDecl());
432        auto *Entry = CGM.GetGlobalValue(Name);
433        // This checks if virtual inline function has already been emitted.
434        // Note that it is possible that this inline function would be emitted
435        // after trying to emit vtable speculatively. Because of this we do
436        // an extra pass after emitting all deferred vtables to find and emit
437        // these vtables opportunistically.
438        if (!Entry || Entry->isDeclaration())
439          return true;
440      }
441      return false;
442   }
443 
isVTableHidden(const CXXRecordDecl * RD) const444   bool isVTableHidden(const CXXRecordDecl *RD) const {
445     const auto &VtableLayout =
446             CGM.getItaniumVTableContext().getVTableLayout(RD);
447 
448     for (const auto &VtableComponent : VtableLayout.vtable_components()) {
449       if (VtableComponent.isRTTIKind()) {
450         const CXXRecordDecl *RTTIDecl = VtableComponent.getRTTIDecl();
451         if (RTTIDecl->getVisibility() == Visibility::HiddenVisibility)
452           return true;
453       } else if (VtableComponent.isUsedFunctionPointerKind()) {
454         const CXXMethodDecl *Method = VtableComponent.getFunctionDecl();
455         if (Method->getVisibility() == Visibility::HiddenVisibility &&
456             !Method->isDefined())
457           return true;
458       }
459     }
460     return false;
461   }
462 };
463 
464 class ARMCXXABI : public ItaniumCXXABI {
465 public:
ARMCXXABI(CodeGen::CodeGenModule & CGM)466   ARMCXXABI(CodeGen::CodeGenModule &CGM) :
467     ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true,
468                   /*UseARMGuardVarABI=*/true) {}
469 
HasThisReturn(GlobalDecl GD) const470   bool HasThisReturn(GlobalDecl GD) const override {
471     return (isa<CXXConstructorDecl>(GD.getDecl()) || (
472               isa<CXXDestructorDecl>(GD.getDecl()) &&
473               GD.getDtorType() != Dtor_Deleting));
474   }
475 
476   void EmitReturnFromThunk(CodeGenFunction &CGF, RValue RV,
477                            QualType ResTy) override;
478 
479   CharUnits getArrayCookieSizeImpl(QualType elementType) override;
480   Address InitializeArrayCookie(CodeGenFunction &CGF,
481                                 Address NewPtr,
482                                 llvm::Value *NumElements,
483                                 const CXXNewExpr *expr,
484                                 QualType ElementType) override;
485   llvm::Value *readArrayCookieImpl(CodeGenFunction &CGF, Address allocPtr,
486                                    CharUnits cookieSize) override;
487 };
488 
489 class iOS64CXXABI : public ARMCXXABI {
490 public:
iOS64CXXABI(CodeGen::CodeGenModule & CGM)491   iOS64CXXABI(CodeGen::CodeGenModule &CGM) : ARMCXXABI(CGM) {
492     Use32BitVTableOffsetABI = true;
493   }
494 
495   // ARM64 libraries are prepared for non-unique RTTI.
shouldRTTIBeUnique() const496   bool shouldRTTIBeUnique() const override { return false; }
497 };
498 
499 class FuchsiaCXXABI final : public ItaniumCXXABI {
500 public:
FuchsiaCXXABI(CodeGen::CodeGenModule & CGM)501   explicit FuchsiaCXXABI(CodeGen::CodeGenModule &CGM)
502       : ItaniumCXXABI(CGM) {}
503 
504 private:
HasThisReturn(GlobalDecl GD) const505   bool HasThisReturn(GlobalDecl GD) const override {
506     return isa<CXXConstructorDecl>(GD.getDecl()) ||
507            (isa<CXXDestructorDecl>(GD.getDecl()) &&
508             GD.getDtorType() != Dtor_Deleting);
509   }
510 };
511 
512 class WebAssemblyCXXABI final : public ItaniumCXXABI {
513 public:
WebAssemblyCXXABI(CodeGen::CodeGenModule & CGM)514   explicit WebAssemblyCXXABI(CodeGen::CodeGenModule &CGM)
515       : ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true,
516                       /*UseARMGuardVarABI=*/true) {}
517   void emitBeginCatch(CodeGenFunction &CGF, const CXXCatchStmt *C) override;
518 
519 private:
HasThisReturn(GlobalDecl GD) const520   bool HasThisReturn(GlobalDecl GD) const override {
521     return isa<CXXConstructorDecl>(GD.getDecl()) ||
522            (isa<CXXDestructorDecl>(GD.getDecl()) &&
523             GD.getDtorType() != Dtor_Deleting);
524   }
canCallMismatchedFunctionType() const525   bool canCallMismatchedFunctionType() const override { return false; }
526 };
527 
528 class XLCXXABI final : public ItaniumCXXABI {
529 public:
XLCXXABI(CodeGen::CodeGenModule & CGM)530   explicit XLCXXABI(CodeGen::CodeGenModule &CGM)
531       : ItaniumCXXABI(CGM) {}
532 
533   void registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
534                           llvm::FunctionCallee dtor,
535                           llvm::Constant *addr) override;
536 
useSinitAndSterm() const537   bool useSinitAndSterm() const override { return true; }
538 
539 private:
540   void emitCXXStermFinalizer(const VarDecl &D, llvm::Function *dtorStub,
541                              llvm::Constant *addr);
542 };
543 }
544 
CreateItaniumCXXABI(CodeGenModule & CGM)545 CodeGen::CGCXXABI *CodeGen::CreateItaniumCXXABI(CodeGenModule &CGM) {
546   switch (CGM.getTarget().getCXXABI().getKind()) {
547   // For IR-generation purposes, there's no significant difference
548   // between the ARM and iOS ABIs.
549   case TargetCXXABI::GenericARM:
550   case TargetCXXABI::iOS:
551   case TargetCXXABI::WatchOS:
552     return new ARMCXXABI(CGM);
553 
554   case TargetCXXABI::iOS64:
555     return new iOS64CXXABI(CGM);
556 
557   case TargetCXXABI::Fuchsia:
558     return new FuchsiaCXXABI(CGM);
559 
560   // Note that AArch64 uses the generic ItaniumCXXABI class since it doesn't
561   // include the other 32-bit ARM oddities: constructor/destructor return values
562   // and array cookies.
563   case TargetCXXABI::GenericAArch64:
564     return new ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true,
565                              /*UseARMGuardVarABI=*/true);
566 
567   case TargetCXXABI::GenericMIPS:
568     return new ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true);
569 
570   case TargetCXXABI::WebAssembly:
571     return new WebAssemblyCXXABI(CGM);
572 
573   case TargetCXXABI::XL:
574     return new XLCXXABI(CGM);
575 
576   case TargetCXXABI::GenericItanium:
577     if (CGM.getContext().getTargetInfo().getTriple().getArch()
578         == llvm::Triple::le32) {
579       // For PNaCl, use ARM-style method pointers so that PNaCl code
580       // does not assume anything about the alignment of function
581       // pointers.
582       return new ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true);
583     }
584     return new ItaniumCXXABI(CGM);
585 
586   case TargetCXXABI::Microsoft:
587     llvm_unreachable("Microsoft ABI is not Itanium-based");
588   }
589   llvm_unreachable("bad ABI kind");
590 }
591 
592 llvm::Type *
ConvertMemberPointerType(const MemberPointerType * MPT)593 ItaniumCXXABI::ConvertMemberPointerType(const MemberPointerType *MPT) {
594   if (MPT->isMemberDataPointer())
595     return CGM.PtrDiffTy;
596   if (getContext().getTargetInfo().areAllPointersCapabilities()) {
597     return llvm::StructType::get(CGM.VoidPtrTy, CGM.PtrDiffTy);
598   }
599   return llvm::StructType::get(CGM.PtrDiffTy, CGM.PtrDiffTy);
600 }
601 
602 /// In the Itanium and ARM ABIs, method pointers have the form:
603 ///   struct { ptrdiff_t ptr; ptrdiff_t adj; } memptr;
604 ///
605 /// In the Itanium ABI:
606 ///  - method pointers are virtual if (memptr.ptr & 1) is nonzero
607 ///  - the this-adjustment is (memptr.adj)
608 ///  - the virtual offset is (memptr.ptr - 1)
609 ///
610 /// In the ARM ABI:
611 ///  - method pointers are virtual if (memptr.adj & 1) is nonzero
612 ///  - the this-adjustment is (memptr.adj >> 1)
613 ///  - the virtual offset is (memptr.ptr)
614 /// ARM uses 'adj' for the virtual flag because Thumb functions
615 /// may be only single-byte aligned.
616 ///
617 /// XXXAR: TODO: add a CHERI ABI similar to ARM but using tag bit
618 ///
619 /// If the member is virtual, the adjusted 'this' pointer points
620 /// to a vtable pointer from which the virtual offset is applied.
621 ///
622 /// If the member is non-virtual, memptr.ptr is the address of
623 /// the function to call.
EmitLoadOfMemberFunctionPointer(CodeGenFunction & CGF,const Expr * E,Address ThisAddr,llvm::Value * & ThisPtrForCall,llvm::Value * MemFnPtr,const MemberPointerType * MPT)624 CGCallee ItaniumCXXABI::EmitLoadOfMemberFunctionPointer(
625     CodeGenFunction &CGF, const Expr *E, Address ThisAddr,
626     llvm::Value *&ThisPtrForCall,
627     llvm::Value *MemFnPtr, const MemberPointerType *MPT) {
628   CGBuilderTy &Builder = CGF.Builder;
629 
630   const FunctionProtoType *FPT =
631     MPT->getPointeeType()->getAs<FunctionProtoType>();
632   auto *RD =
633       cast<CXXRecordDecl>(MPT->getClass()->castAs<RecordType>()->getDecl());
634 
635   llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(
636       CGM.getTypes().arrangeCXXMethodType(RD, FPT, /*FD=*/nullptr));
637 
638   llvm::Constant *ptrdiff_1 = llvm::ConstantInt::get(CGM.PtrDiffTy, 1);
639 
640   llvm::BasicBlock *FnVirtual = CGF.createBasicBlock("memptr.virtual");
641   llvm::BasicBlock *FnNonVirtual = CGF.createBasicBlock("memptr.nonvirtual");
642   llvm::BasicBlock *FnEnd = CGF.createBasicBlock("memptr.end");
643 
644   // Extract memptr.adj, which is in the second field.
645   llvm::Value *RawAdj = Builder.CreateExtractValue(MemFnPtr, 1, "memptr.adj");
646 
647   // Compute the true adjustment.
648   llvm::Value *Adj = RawAdj;
649   if (UseARMMethodPtrABI)
650     Adj = Builder.CreateAShr(Adj, ptrdiff_1, "memptr.adj.shifted");
651 
652   unsigned DefaultAS = CGM.getTargetCodeGenInfo().getDefaultAS();
653   // Apply the adjustment and cast back to the original struct type
654   // for consistency.
655   llvm::Value *This = ThisAddr.getPointer();
656   llvm::Value *VTableAddr = Builder.CreateBitCast(This, CGM.Int8PtrTy,
657                                                   "this.not.adjusted");
658   VTableAddr = Builder.CreateInBoundsGEP(VTableAddr, Adj, "memptr.vtable.addr");
659   This = Builder.CreateBitCast(VTableAddr, This->getType(), "this.adjusted");
660   ThisPtrForCall = This;
661 
662   // Load the function pointer.
663   llvm::Value *FnPtr = Builder.CreateExtractValue(MemFnPtr, 0, "memptr.ptr");
664 
665   // If the LSB in the function pointer is 1, the function pointer points to
666   // a virtual function.
667   // TODO: use new CHERI CXX ABI
668   bool IsCheriABI = getContext().getTargetInfo().areAllPointersCapabilities();
669   llvm::Value *IsVirtual;
670   if (UseARMMethodPtrABI) // XXXAR: TODO: use tag bit instead
671     IsVirtual = Builder.CreateAnd(RawAdj, ptrdiff_1);
672   else {
673     llvm::Value *FnPtrAddr;
674     if (CGF.CGM.getDataLayout().isFatPointer(FnPtr->getType()))
675       FnPtrAddr =
676           CGF.getTargetHooks().getPointerAddress(CGF, FnPtr, "memptr.ptr.addr");
677     else
678       FnPtrAddr = FnPtr;
679 
680     IsVirtual = Builder.CreateAnd(FnPtrAddr, ptrdiff_1);
681   }
682   IsVirtual = Builder.CreateIsNotNull(IsVirtual, "memptr.isvirtual");
683   Builder.CreateCondBr(IsVirtual, FnVirtual, FnNonVirtual);
684 
685   // In the virtual path, the adjustment left 'This' pointing to the
686   // vtable of the correct base subobject.  The "function pointer" is an
687   // offset within the vtable (+1 for the virtual flag on non-ARM).
688   CGF.EmitBlock(FnVirtual);
689 
690   // Cast the adjusted this to a pointer to vtable pointer and load.
691   llvm::Type *VTableTy = CGM.Int8PtrTy;
692   CharUnits VTablePtrAlign =
693     CGF.CGM.getDynamicOffsetAlignment(ThisAddr.getAlignment(), RD,
694                                       CGF.getPointerAlign());
695   llvm::Value *VTable =
696     CGF.GetVTablePtr(Address(VTableAddr, VTablePtrAlign), VTableTy, RD);
697 
698   // Apply the offset.
699   // On ARM64, to reserve extra space in virtual member function pointers,
700   // we only pay attention to the low 32 bits of the offset.
701   llvm::Value *VTableOffset =
702       Builder.CreatePtrToInt(FnPtr, CGM.PtrDiffTy, "memptr.vtable.offset");
703   if (!UseARMMethodPtrABI)
704     VTableOffset = Builder.CreateSub(VTableOffset, ptrdiff_1);
705   if (Use32BitVTableOffsetABI) {
706     VTableOffset = Builder.CreateTrunc(VTableOffset, CGF.Int32Ty);
707     VTableOffset = Builder.CreateZExt(VTableOffset, CGM.PtrDiffTy);
708   }
709 
710   // Check the address of the function pointer if CFI on member function
711   // pointers is enabled.
712   llvm::Constant *CheckSourceLocation;
713   llvm::Constant *CheckTypeDesc;
714   bool ShouldEmitCFICheck = CGF.SanOpts.has(SanitizerKind::CFIMFCall) &&
715                             CGM.HasHiddenLTOVisibility(RD);
716   bool ShouldEmitVFEInfo = CGM.getCodeGenOpts().VirtualFunctionElimination &&
717                            CGM.HasHiddenLTOVisibility(RD);
718   bool ShouldEmitWPDInfo =
719       CGM.getCodeGenOpts().WholeProgramVTables &&
720       // Don't insert type tests if we are forcing public std visibility.
721       !CGM.HasLTOVisibilityPublicStd(RD);
722   llvm::Value *VirtualFn = nullptr;
723 
724   {
725     CodeGenFunction::SanitizerScope SanScope(&CGF);
726     llvm::Value *TypeId = nullptr;
727     llvm::Value *CheckResult = nullptr;
728 
729     if (ShouldEmitCFICheck || ShouldEmitVFEInfo || ShouldEmitWPDInfo) {
730       // If doing CFI, VFE or WPD, we will need the metadata node to check
731       // against.
732       llvm::Metadata *MD =
733           CGM.CreateMetadataIdentifierForVirtualMemPtrType(QualType(MPT, 0));
734       TypeId = llvm::MetadataAsValue::get(CGF.getLLVMContext(), MD);
735     }
736 
737     if (ShouldEmitVFEInfo) {
738       llvm::Value *VFPAddr = Builder.CreateGEP(VTable, VTableOffset);
739 
740       // If doing VFE, load from the vtable with a type.checked.load intrinsic
741       // call. Note that we use the GEP to calculate the address to load from
742       // and pass 0 as the offset to the intrinsic. This is because every
743       // vtable slot of the correct type is marked with matching metadata, and
744       // we know that the load must be from one of these slots.
745       llvm::Value *CheckedLoad = Builder.CreateCall(
746           CGM.getIntrinsic(llvm::Intrinsic::type_checked_load),
747           {VFPAddr, llvm::ConstantInt::get(CGM.Int32Ty, 0), TypeId});
748       CheckResult = Builder.CreateExtractValue(CheckedLoad, 1);
749       VirtualFn = Builder.CreateExtractValue(CheckedLoad, 0);
750       VirtualFn = Builder.CreateBitCast(VirtualFn, FTy->getPointerTo(DefaultAS),
751                                         "memptr.virtualfn");
752     } else {
753       // When not doing VFE, emit a normal load, as it allows more
754       // optimisations than type.checked.load.
755       if (ShouldEmitCFICheck || ShouldEmitWPDInfo) {
756         llvm::Value *VFPAddr = Builder.CreateGEP(VTable, VTableOffset);
757         CheckResult = Builder.CreateCall(
758             CGM.getIntrinsic(llvm::Intrinsic::type_test),
759             {Builder.CreateBitCast(VFPAddr, CGF.Int8PtrTy), TypeId});
760       }
761 
762       if (CGM.getItaniumVTableContext().isRelativeLayout()) {
763         VirtualFn = CGF.Builder.CreateCall(
764             CGM.getIntrinsic(llvm::Intrinsic::load_relative,
765                              {VTableOffset->getType()}),
766             {VTable, VTableOffset});
767         VirtualFn = CGF.Builder.CreateBitCast(VirtualFn, FTy->getPointerTo(DefaultAS));
768       } else {
769         llvm::Value *VFPAddr = CGF.Builder.CreateGEP(VTable, VTableOffset);
770         VFPAddr = CGF.Builder.CreateBitCast(
771             VFPAddr, FTy->getPointerTo(DefaultAS)->getPointerTo(DefaultAS));
772         VirtualFn = CGF.Builder.CreateAlignedLoad(
773             VFPAddr, CGF.getPointerAlign(), "memptr.virtualfn");
774       }
775     }
776     assert(VirtualFn && "Virtual fuction pointer not created!");
777     assert((!ShouldEmitCFICheck || !ShouldEmitVFEInfo || !ShouldEmitWPDInfo ||
778             CheckResult) &&
779            "Check result required but not created!");
780 
781     if (ShouldEmitCFICheck) {
782       // If doing CFI, emit the check.
783       CheckSourceLocation = CGF.EmitCheckSourceLocation(E->getBeginLoc());
784       CheckTypeDesc = CGF.EmitCheckTypeDescriptor(QualType(MPT, 0));
785       llvm::Constant *StaticData[] = {
786           llvm::ConstantInt::get(CGF.Int8Ty, CodeGenFunction::CFITCK_VMFCall),
787           CheckSourceLocation,
788           CheckTypeDesc,
789       };
790 
791       if (CGM.getCodeGenOpts().SanitizeTrap.has(SanitizerKind::CFIMFCall)) {
792         CGF.EmitTrapCheck(CheckResult);
793       } else {
794         llvm::Value *AllVtables = llvm::MetadataAsValue::get(
795             CGM.getLLVMContext(),
796             llvm::MDString::get(CGM.getLLVMContext(), "all-vtables"));
797         llvm::Value *ValidVtable = Builder.CreateCall(
798             CGM.getIntrinsic(llvm::Intrinsic::type_test), {VTable, AllVtables});
799         CGF.EmitCheck(std::make_pair(CheckResult, SanitizerKind::CFIMFCall),
800                       SanitizerHandler::CFICheckFail, StaticData,
801                       {VTable, ValidVtable});
802       }
803 
804       FnVirtual = Builder.GetInsertBlock();
805     }
806   } // End of sanitizer scope
807 
808   CGF.EmitBranch(FnEnd);
809 
810   // In the non-virtual path, the function pointer is actually a
811   // function pointer.
812   CGF.EmitBlock(FnNonVirtual);
813   llvm::Value *NonVirtualFn;
814   if (IsCheriABI) {
815     NonVirtualFn = Builder.CreatePointerCast(FnPtr, FTy->getPointerTo(DefaultAS),
816                                              "memptr.nonvirtualfn");
817   } else {
818     NonVirtualFn = Builder.CreateIntToPtr(FnPtr, FTy->getPointerTo(DefaultAS),
819                                           "memptr.nonvirtualfn");
820   }
821 
822   // Check the function pointer if CFI on member function pointers is enabled.
823   if (ShouldEmitCFICheck) {
824     CXXRecordDecl *RD = MPT->getClass()->getAsCXXRecordDecl();
825     if (RD->hasDefinition()) {
826       CodeGenFunction::SanitizerScope SanScope(&CGF);
827 
828       llvm::Constant *StaticData[] = {
829           llvm::ConstantInt::get(CGF.Int8Ty, CodeGenFunction::CFITCK_NVMFCall),
830           CheckSourceLocation,
831           CheckTypeDesc,
832       };
833 
834       llvm::Value *Bit = Builder.getFalse();
835       llvm::Value *CastedNonVirtualFn =
836           Builder.CreateBitCast(NonVirtualFn, CGF.Int8PtrTy);
837       for (const CXXRecordDecl *Base : CGM.getMostBaseClasses(RD)) {
838         llvm::Metadata *MD = CGM.CreateMetadataIdentifierForType(
839             getContext().getMemberPointerType(
840                 MPT->getPointeeType(),
841                 getContext().getRecordType(Base).getTypePtr()));
842         llvm::Value *TypeId =
843             llvm::MetadataAsValue::get(CGF.getLLVMContext(), MD);
844 
845         llvm::Value *TypeTest =
846             Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::type_test),
847                                {CastedNonVirtualFn, TypeId});
848         Bit = Builder.CreateOr(Bit, TypeTest);
849       }
850 
851       CGF.EmitCheck(std::make_pair(Bit, SanitizerKind::CFIMFCall),
852                     SanitizerHandler::CFICheckFail, StaticData,
853                     {CastedNonVirtualFn, llvm::UndefValue::get(CGF.IntPtrTy)});
854 
855       FnNonVirtual = Builder.GetInsertBlock();
856     }
857   }
858 
859   // We're done.
860   CGF.EmitBlock(FnEnd);
861   llvm::PHINode *CalleePtr = Builder.CreatePHI(FTy->getPointerTo(DefaultAS), 2);
862   CalleePtr->addIncoming(VirtualFn, FnVirtual);
863   CalleePtr->addIncoming(NonVirtualFn, FnNonVirtual);
864 
865   CGCallee Callee(FPT, CalleePtr);
866   return Callee;
867 }
868 
869 /// Compute an l-value by applying the given pointer-to-member to a
870 /// base object.
EmitMemberDataPointerAddress(CodeGenFunction & CGF,const Expr * E,Address Base,llvm::Value * MemPtr,const MemberPointerType * MPT)871 llvm::Value *ItaniumCXXABI::EmitMemberDataPointerAddress(
872     CodeGenFunction &CGF, const Expr *E, Address Base, llvm::Value *MemPtr,
873     const MemberPointerType *MPT) {
874   assert(MemPtr->getType() == CGM.PtrDiffTy);
875 
876   CGBuilderTy &Builder = CGF.Builder;
877 
878   // Cast to char*.
879   Base = Builder.CreateElementBitCast(Base, CGF.Int8Ty);
880 
881   // Apply the offset, which we assume is non-null.
882   llvm::Value *Addr =
883     Builder.CreateInBoundsGEP(Base.getPointer(), MemPtr, "memptr.offset");
884 
885   // Cast the address to the appropriate pointer type, adopting the
886   // address space of the base pointer.
887   llvm::Type *PType = CGF.ConvertTypeForMem(MPT->getPointeeType())
888                             ->getPointerTo(Base.getAddressSpace());
889   return Builder.CreateBitCast(Addr, PType);
890 }
891 
892 /// Perform a bitcast, derived-to-base, or base-to-derived member pointer
893 /// conversion.
894 ///
895 /// Bitcast conversions are always a no-op under Itanium.
896 ///
897 /// Obligatory offset/adjustment diagram:
898 ///         <-- offset -->          <-- adjustment -->
899 ///   |--------------------------|----------------------|--------------------|
900 ///   ^Derived address point     ^Base address point    ^Member address point
901 ///
902 /// So when converting a base member pointer to a derived member pointer,
903 /// we add the offset to the adjustment because the address point has
904 /// decreased;  and conversely, when converting a derived MP to a base MP
905 /// we subtract the offset from the adjustment because the address point
906 /// has increased.
907 ///
908 /// The standard forbids (at compile time) conversion to and from
909 /// virtual bases, which is why we don't have to consider them here.
910 ///
911 /// The standard forbids (at run time) casting a derived MP to a base
912 /// MP when the derived MP does not point to a member of the base.
913 /// This is why -1 is a reasonable choice for null data member
914 /// pointers.
915 llvm::Value *
EmitMemberPointerConversion(CodeGenFunction & CGF,const CastExpr * E,llvm::Value * src)916 ItaniumCXXABI::EmitMemberPointerConversion(CodeGenFunction &CGF,
917                                            const CastExpr *E,
918                                            llvm::Value *src) {
919   assert(E->getCastKind() == CK_DerivedToBaseMemberPointer ||
920          E->getCastKind() == CK_BaseToDerivedMemberPointer ||
921          E->getCastKind() == CK_ReinterpretMemberPointer);
922 
923   // XXXAR: FIXME: CHERI
924 
925   // Under Itanium, reinterprets don't require any additional processing.
926   if (E->getCastKind() == CK_ReinterpretMemberPointer) return src;
927 
928   // Use constant emission if we can.
929   if (isa<llvm::Constant>(src))
930     return EmitMemberPointerConversion(E, cast<llvm::Constant>(src));
931 
932   llvm::Constant *adj = getMemberPointerAdjustment(E);
933   if (!adj) return src;
934 
935   CGBuilderTy &Builder = CGF.Builder;
936   bool isDerivedToBase = (E->getCastKind() == CK_DerivedToBaseMemberPointer);
937 
938   const MemberPointerType *destTy =
939     E->getType()->castAs<MemberPointerType>();
940 
941   // For member data pointers, this is just a matter of adding the
942   // offset if the source is non-null.
943   if (destTy->isMemberDataPointer()) {
944     llvm::Value *dst;
945     if (isDerivedToBase)
946       dst = Builder.CreateNSWSub(src, adj, "adj");
947     else
948       dst = Builder.CreateNSWAdd(src, adj, "adj");
949 
950     // Null check.
951     llvm::Value *null = llvm::Constant::getAllOnesValue(src->getType());
952     llvm::Value *isNull = Builder.CreateICmpEQ(src, null, "memptr.isnull");
953     return Builder.CreateSelect(isNull, src, dst);
954   }
955 
956   // The this-adjustment is left-shifted by 1 on ARM.
957   if (UseARMMethodPtrABI) {
958     uint64_t offset = cast<llvm::ConstantInt>(adj)->getZExtValue();
959     offset <<= 1;
960     adj = llvm::ConstantInt::get(adj->getType(), offset);
961   }
962 
963   llvm::Value *srcAdj = Builder.CreateExtractValue(src, 1, "src.adj");
964   llvm::Value *dstAdj;
965   if (isDerivedToBase)
966     dstAdj = Builder.CreateNSWSub(srcAdj, adj, "adj");
967   else
968     dstAdj = Builder.CreateNSWAdd(srcAdj, adj, "adj");
969 
970   return Builder.CreateInsertValue(src, dstAdj, 1);
971 }
972 
973 llvm::Constant *
EmitMemberPointerConversion(const CastExpr * E,llvm::Constant * src)974 ItaniumCXXABI::EmitMemberPointerConversion(const CastExpr *E,
975                                            llvm::Constant *src) {
976   assert(E->getCastKind() == CK_DerivedToBaseMemberPointer ||
977          E->getCastKind() == CK_BaseToDerivedMemberPointer ||
978          E->getCastKind() == CK_ReinterpretMemberPointer);
979 
980   // XXXAR: FIXME: CHERI
981 
982   // Under Itanium, reinterprets don't require any additional processing.
983   if (E->getCastKind() == CK_ReinterpretMemberPointer) return src;
984 
985   // If the adjustment is trivial, we don't need to do anything.
986   llvm::Constant *adj = getMemberPointerAdjustment(E);
987   if (!adj) return src;
988 
989   bool isDerivedToBase = (E->getCastKind() == CK_DerivedToBaseMemberPointer);
990 
991   const MemberPointerType *destTy =
992     E->getType()->castAs<MemberPointerType>();
993 
994   // For member data pointers, this is just a matter of adding the
995   // offset if the source is non-null.
996   if (destTy->isMemberDataPointer()) {
997     // null maps to null.
998     if (src->isAllOnesValue()) return src;
999 
1000     if (isDerivedToBase)
1001       return llvm::ConstantExpr::getNSWSub(src, adj);
1002     else
1003       return llvm::ConstantExpr::getNSWAdd(src, adj);
1004   }
1005 
1006   // The this-adjustment is left-shifted by 1 on ARM.
1007   if (UseARMMethodPtrABI) {
1008     uint64_t offset = cast<llvm::ConstantInt>(adj)->getZExtValue();
1009     offset <<= 1;
1010     adj = llvm::ConstantInt::get(adj->getType(), offset);
1011   }
1012 
1013   llvm::Constant *srcAdj = llvm::ConstantExpr::getExtractValue(src, 1);
1014   llvm::Constant *dstAdj;
1015   if (isDerivedToBase)
1016     dstAdj = llvm::ConstantExpr::getNSWSub(srcAdj, adj);
1017   else
1018     dstAdj = llvm::ConstantExpr::getNSWAdd(srcAdj, adj);
1019 
1020   return llvm::ConstantExpr::getInsertValue(src, dstAdj, 1);
1021 }
1022 
1023 llvm::Constant *
EmitNullMemberPointer(const MemberPointerType * MPT)1024 ItaniumCXXABI::EmitNullMemberPointer(const MemberPointerType *MPT) {
1025   // Itanium C++ ABI 2.3:
1026   //   A NULL pointer is represented as -1.
1027   if (MPT->isMemberDataPointer())
1028     return llvm::ConstantInt::get(CGM.PtrDiffTy, -1ULL, /*isSigned=*/true);
1029 
1030   llvm::Constant *Zero = llvm::ConstantInt::get(CGM.PtrDiffTy, 0);
1031   llvm::Constant *Null =
1032       CGM.getContext().getTargetInfo().areAllPointersCapabilities() ?
1033       llvm::ConstantPointerNull::get(CGM.VoidPtrTy) : Zero;
1034   llvm::Constant *Values[2] = { Null, Zero };
1035   return llvm::ConstantStruct::getAnon(Values);
1036 }
1037 
1038 llvm::Constant *
EmitMemberDataPointer(const MemberPointerType * MPT,CharUnits offset)1039 ItaniumCXXABI::EmitMemberDataPointer(const MemberPointerType *MPT,
1040                                      CharUnits offset) {
1041   // Itanium C++ ABI 2.3:
1042   //   A pointer to data member is an offset from the base address of
1043   //   the class object containing it, represented as a ptrdiff_t
1044   return llvm::ConstantInt::get(CGM.PtrDiffTy, offset.getQuantity());
1045 }
1046 
1047 llvm::Constant *
EmitMemberFunctionPointer(const CXXMethodDecl * MD)1048 ItaniumCXXABI::EmitMemberFunctionPointer(const CXXMethodDecl *MD) {
1049   return BuildMemberPointer(MD, CharUnits::Zero());
1050 }
1051 
BuildMemberPointer(const CXXMethodDecl * MD,CharUnits ThisAdjustment)1052 llvm::Constant *ItaniumCXXABI::BuildMemberPointer(const CXXMethodDecl *MD,
1053                                                   CharUnits ThisAdjustment) {
1054   assert(MD->isInstance() && "Member function must not be static!");
1055 
1056   CodeGenTypes &Types = CGM.getTypes();
1057   const TargetInfo& TI = CGM.getContext().getTargetInfo();
1058 
1059   // Get the function pointer (or index if this is a virtual function).
1060   llvm::Constant *MemPtr[2];
1061   if (MD->isVirtual()) {
1062     uint64_t Index = CGM.getItaniumVTableContext().getMethodVTableIndex(MD);
1063     uint64_t VTableOffset;
1064     if (CGM.getItaniumVTableContext().isRelativeLayout()) {
1065       // Multiply by 4-byte relative offsets.
1066       VTableOffset = Index * 4;
1067     } else {
1068       const ASTContext &Context = getContext();
1069       CharUnits PointerWidth = Context.toCharUnitsFromBits(
1070           Context.getTargetInfo().getPointerWidth(0));
1071       VTableOffset = Index * PointerWidth.getQuantity();
1072     }
1073 
1074     auto getVtableOffsetAsPointer = [&](uint64_t Offset) {
1075       llvm::Constant *Value = llvm::ConstantInt::get(CGM.PtrDiffTy, Offset);
1076       if (TI.areAllPointersCapabilities()) {
1077         return CGM.getNullDerivedConstantCapability(CGM.VoidPtrTy, Value);
1078       }
1079       return Value;
1080     };
1081     if (UseARMMethodPtrABI) {
1082       // ARM C++ ABI 3.2.1:
1083       //   This ABI specifies that adj contains twice the this
1084       //   adjustment, plus 1 if the member function is virtual. The
1085       //   least significant bit of adj then makes exactly the same
1086       //   discrimination as the least significant bit of ptr does for
1087       //   Itanium.
1088       MemPtr[0] = getVtableOffsetAsPointer(VTableOffset);
1089       MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy,
1090                                          2 * ThisAdjustment.getQuantity() + 1);
1091     } else {
1092       // Itanium C++ ABI 2.3:
1093       //   For a virtual function, [the pointer field] is 1 plus the
1094       //   virtual table offset (in bytes) of the function,
1095       //   represented as a ptrdiff_t.
1096       MemPtr[0] = getVtableOffsetAsPointer(VTableOffset + 1);
1097       MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy,
1098                                          ThisAdjustment.getQuantity());
1099     }
1100   } else {
1101     const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
1102     llvm::Type *Ty;
1103     // Check whether the function has a computable LLVM signature.
1104     if (Types.isFuncTypeConvertible(FPT)) {
1105       // The function has a computable LLVM signature; use the correct type.
1106       Ty = Types.GetFunctionType(Types.arrangeCXXMethodDeclaration(MD));
1107     } else {
1108       // Use an arbitrary non-function type to tell GetAddrOfFunction that the
1109       // function type is incomplete.
1110       Ty = CGM.PtrDiffTy;
1111     }
1112     llvm::Constant *addr = CGM.GetAddrOfFunction(MD, Ty);
1113 
1114     if (TI.areAllPointersCapabilities()) {
1115       MemPtr[0] = llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(
1116           addr, CGM.VoidPtrTy);
1117     } else {
1118       MemPtr[0] = llvm::ConstantExpr::getPtrToInt(addr, CGM.PtrDiffTy);
1119     }
1120     MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy,
1121                                        (UseARMMethodPtrABI ? 2 : 1) *
1122                                        ThisAdjustment.getQuantity());
1123   }
1124   auto Result = llvm::ConstantStruct::getAnon(MemPtr, false);
1125   // TODO: how to set the required alignment?
1126   return Result;
1127 }
1128 
1129 
1130 
EmitMemberPointer(const APValue & MP,QualType MPType)1131 llvm::Constant *ItaniumCXXABI::EmitMemberPointer(const APValue &MP,
1132                                                  QualType MPType) {
1133   const MemberPointerType *MPT = MPType->castAs<MemberPointerType>();
1134   const ValueDecl *MPD = MP.getMemberPointerDecl();
1135   if (!MPD)
1136     return EmitNullMemberPointer(MPT);
1137 
1138   CharUnits ThisAdjustment = getMemberPointerPathAdjustment(MP);
1139 
1140   if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(MPD))
1141     return BuildMemberPointer(MD, ThisAdjustment);
1142 
1143   CharUnits FieldOffset =
1144     getContext().toCharUnitsFromBits(getContext().getFieldOffset(MPD));
1145   return EmitMemberDataPointer(MPT, ThisAdjustment + FieldOffset);
1146 }
1147 
1148 /// The comparison algorithm is pretty easy: the member pointers are
1149 /// the same if they're either bitwise identical *or* both null.
1150 ///
1151 /// ARM is different here only because null-ness is more complicated.
1152 llvm::Value *
EmitMemberPointerComparison(CodeGenFunction & CGF,llvm::Value * L,llvm::Value * R,const MemberPointerType * MPT,bool Inequality)1153 ItaniumCXXABI::EmitMemberPointerComparison(CodeGenFunction &CGF,
1154                                            llvm::Value *L,
1155                                            llvm::Value *R,
1156                                            const MemberPointerType *MPT,
1157                                            bool Inequality) {
1158   CGBuilderTy &Builder = CGF.Builder;
1159 
1160   llvm::ICmpInst::Predicate Eq;
1161   llvm::Instruction::BinaryOps And, Or;
1162   if (Inequality) {
1163     Eq = llvm::ICmpInst::ICMP_NE;
1164     And = llvm::Instruction::Or;
1165     Or = llvm::Instruction::And;
1166   } else {
1167     Eq = llvm::ICmpInst::ICMP_EQ;
1168     And = llvm::Instruction::And;
1169     Or = llvm::Instruction::Or;
1170   }
1171 
1172   // Member data pointers are easy because there's a unique null
1173   // value, so it just comes down to bitwise equality.
1174   if (MPT->isMemberDataPointer())
1175     return Builder.CreateICmp(Eq, L, R);
1176 
1177   // For member function pointers, the tautologies are more complex.
1178   // The Itanium tautology is:
1179   //   (L == R) <==> (L.ptr == R.ptr && (L.ptr == 0 || L.adj == R.adj))
1180   // The ARM tautology is:
1181   //   (L == R) <==> (L.ptr == R.ptr &&
1182   //                  (L.adj == R.adj ||
1183   //                   (L.ptr == 0 && ((L.adj|R.adj) & 1) == 0)))
1184   // The inequality tautologies have exactly the same structure, except
1185   // applying De Morgan's laws.
1186 
1187   llvm::Value *LPtr = Builder.CreateExtractValue(L, 0, "lhs.memptr.ptr");
1188   llvm::Value *RPtr = Builder.CreateExtractValue(R, 0, "rhs.memptr.ptr");
1189 
1190   // This condition tests whether L.ptr == R.ptr.  This must always be
1191   // true for equality to hold.
1192   llvm::Value *PtrEq = Builder.CreateICmp(Eq, LPtr, RPtr, "cmp.ptr");
1193 
1194   // This condition, together with the assumption that L.ptr == R.ptr,
1195   // tests whether the pointers are both null.  ARM imposes an extra
1196   // condition.
1197   llvm::Value *Null = llvm::Constant::getNullValue(LPtr->getType());
1198   llvm::Value *EqZero = Builder.CreateICmp(Eq, LPtr, Null, "cmp.ptr.null");
1199 
1200   // This condition tests whether L.adj == R.adj.  If this isn't
1201   // true, the pointers are unequal unless they're both null.
1202   llvm::Value *LAdj = Builder.CreateExtractValue(L, 1, "lhs.memptr.adj");
1203   llvm::Value *RAdj = Builder.CreateExtractValue(R, 1, "rhs.memptr.adj");
1204   llvm::Value *AdjEq = Builder.CreateICmp(Eq, LAdj, RAdj, "cmp.adj");
1205 
1206   // XXXAR: we should just use the tag bit instead of checking low bit in adj
1207 
1208   // Null member function pointers on ARM clear the low bit of Adj,
1209   // so the zero condition has to check that neither low bit is set.
1210   if (UseARMMethodPtrABI) {
1211     llvm::Value *Zero = llvm::ConstantInt::get(CGM.PtrDiffTy, 0);
1212     llvm::Value *One = llvm::ConstantInt::get(CGM.PtrDiffTy, 1);
1213 
1214     // Compute (l.adj | r.adj) & 1 and test it against zero.
1215     llvm::Value *OrAdj = Builder.CreateOr(LAdj, RAdj, "or.adj");
1216     llvm::Value *OrAdjAnd1 = Builder.CreateAnd(OrAdj, One);
1217     llvm::Value *OrAdjAnd1EqZero = Builder.CreateICmp(Eq, OrAdjAnd1, Zero,
1218                                                       "cmp.or.adj");
1219     EqZero = Builder.CreateBinOp(And, EqZero, OrAdjAnd1EqZero);
1220   }
1221 
1222   // Tie together all our conditions.
1223   llvm::Value *Result = Builder.CreateBinOp(Or, EqZero, AdjEq);
1224   Result = Builder.CreateBinOp(And, PtrEq, Result,
1225                                Inequality ? "memptr.ne" : "memptr.eq");
1226   return Result;
1227 }
1228 
1229 llvm::Value *
EmitMemberPointerIsNotNull(CodeGenFunction & CGF,llvm::Value * MemPtr,const MemberPointerType * MPT)1230 ItaniumCXXABI::EmitMemberPointerIsNotNull(CodeGenFunction &CGF,
1231                                           llvm::Value *MemPtr,
1232                                           const MemberPointerType *MPT) {
1233   CGBuilderTy &Builder = CGF.Builder;
1234 
1235   /// For member data pointers, this is just a check against -1.
1236   if (MPT->isMemberDataPointer()) {
1237     assert(MemPtr->getType() == CGM.PtrDiffTy);
1238     llvm::Value *NegativeOne =
1239       llvm::Constant::getAllOnesValue(MemPtr->getType());
1240     return Builder.CreateICmpNE(MemPtr, NegativeOne, "memptr.tobool");
1241   }
1242 
1243   // In Itanium, a member function pointer is not null if 'ptr' is not null.
1244   llvm::Value *Ptr = Builder.CreateExtractValue(MemPtr, 0, "memptr.ptr");
1245   llvm::Value *Result = Builder.CreateIsNotNull(Ptr, "memptr.tobool");
1246 
1247   // XXXAR: we should just use the tag bit instead of checking adj
1248 
1249   // On ARM, a member function pointer is also non-null if the low bit of 'adj'
1250   // (the virtual bit) is set.
1251   if (UseARMMethodPtrABI) {
1252     llvm::Value *Adj = Builder.CreateExtractValue(MemPtr, 1, "memptr.adj");
1253     assert(Adj->getType() == CGM.PtrDiffTy);
1254     llvm::Constant *Zero = llvm::ConstantInt::get(CGM.PtrDiffTy, 0);
1255     llvm::Constant *One = llvm::ConstantInt::get(CGM.PtrDiffTy, 1);
1256     llvm::Value *VirtualBit = Builder.CreateAnd(Adj, One, "memptr.virtualbit");
1257     llvm::Value *IsVirtual = Builder.CreateICmpNE(VirtualBit, Zero,
1258                                                   "memptr.isvirtual");
1259     Result = Builder.CreateOr(Result, IsVirtual, "memptr.isnonnull");
1260   }
1261 
1262   return Result;
1263 }
1264 
classifyReturnType(CGFunctionInfo & FI) const1265 bool ItaniumCXXABI::classifyReturnType(CGFunctionInfo &FI) const {
1266   const CXXRecordDecl *RD = FI.getReturnType()->getAsCXXRecordDecl();
1267   if (!RD)
1268     return false;
1269 
1270   // If C++ prohibits us from making a copy, return by address.
1271   if (!RD->canPassInRegisters()) {
1272     auto Align = CGM.getContext().getTypeAlignInChars(FI.getReturnType());
1273     FI.getReturnInfo() = ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
1274     return true;
1275   }
1276   return false;
1277 }
1278 
1279 /// The Itanium ABI requires non-zero initialization only for data
1280 /// member pointers, for which '0' is a valid offset.
isZeroInitializable(const MemberPointerType * MPT)1281 bool ItaniumCXXABI::isZeroInitializable(const MemberPointerType *MPT) {
1282   return MPT->isMemberFunctionPointer();
1283 }
1284 
1285 /// The Itanium ABI always places an offset to the complete object
1286 /// at entry -2 in the vtable.
emitVirtualObjectDelete(CodeGenFunction & CGF,const CXXDeleteExpr * DE,Address Ptr,QualType ElementType,const CXXDestructorDecl * Dtor)1287 void ItaniumCXXABI::emitVirtualObjectDelete(CodeGenFunction &CGF,
1288                                             const CXXDeleteExpr *DE,
1289                                             Address Ptr,
1290                                             QualType ElementType,
1291                                             const CXXDestructorDecl *Dtor) {
1292   bool UseGlobalDelete = DE->isGlobalDelete();
1293   if (UseGlobalDelete) {
1294     // Derive the complete-object pointer, which is what we need
1295     // to pass to the deallocation function.
1296 
1297     // Grab the vtable pointer as an intptr_t*.
1298     // XXXAR: is this correct or is it ptrdiff_t?
1299     unsigned DefaultAS = CGM.getTargetCodeGenInfo().getDefaultAS();
1300     auto *ClassDecl =
1301         cast<CXXRecordDecl>(ElementType->castAs<RecordType>()->getDecl());
1302     llvm::Value *VTable =
1303         CGF.GetVTablePtr(Ptr, CGF.PtrDiffTy->getPointerTo(DefaultAS), ClassDecl);
1304 
1305     // Track back to entry -2 and pull out the offset there.
1306     llvm::Value *OffsetPtr = CGF.Builder.CreateConstInBoundsGEP1_64(
1307         VTable, -2, "complete-offset.ptr");
1308     llvm::Value *Offset =
1309       CGF.Builder.CreateAlignedLoad(OffsetPtr, CGF.getPointerAlign());
1310 
1311     // Apply the offset.
1312     llvm::Value *CompletePtr =
1313       CGF.Builder.CreateBitCast(Ptr.getPointer(), CGF.Int8PtrTy);
1314     CompletePtr = CGF.Builder.CreateInBoundsGEP(CompletePtr, Offset);
1315 
1316     // If we're supposed to call the global delete, make sure we do so
1317     // even if the destructor throws.
1318     CGF.pushCallObjectDeleteCleanup(DE->getOperatorDelete(), CompletePtr,
1319                                     ElementType);
1320   }
1321 
1322   // FIXME: Provide a source location here even though there's no
1323   // CXXMemberCallExpr for dtor call.
1324   CXXDtorType DtorType = UseGlobalDelete ? Dtor_Complete : Dtor_Deleting;
1325   EmitVirtualDestructorCall(CGF, Dtor, DtorType, Ptr, DE);
1326 
1327   if (UseGlobalDelete)
1328     CGF.PopCleanupBlock();
1329 }
1330 
emitRethrow(CodeGenFunction & CGF,bool isNoReturn)1331 void ItaniumCXXABI::emitRethrow(CodeGenFunction &CGF, bool isNoReturn) {
1332   // void __cxa_rethrow();
1333 
1334   llvm::FunctionType *FTy =
1335     llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
1336 
1337   llvm::FunctionCallee Fn = CGM.CreateRuntimeFunction(FTy, "__cxa_rethrow");
1338 
1339   if (isNoReturn)
1340     CGF.EmitNoreturnRuntimeCallOrInvoke(Fn, None);
1341   else
1342     CGF.EmitRuntimeCallOrInvoke(Fn);
1343 }
1344 
getAllocateExceptionFn(CodeGenModule & CGM)1345 static llvm::FunctionCallee getAllocateExceptionFn(CodeGenModule &CGM) {
1346   // void *__cxa_allocate_exception(size_t thrown_size);
1347 
1348   llvm::FunctionType *FTy =
1349     llvm::FunctionType::get(CGM.Int8PtrTy, CGM.SizeTy, /*isVarArg=*/false);
1350 
1351   return CGM.CreateRuntimeFunction(FTy, "__cxa_allocate_exception");
1352 }
1353 
getThrowFn(CodeGenModule & CGM)1354 static llvm::FunctionCallee getThrowFn(CodeGenModule &CGM) {
1355   // void __cxa_throw(void *thrown_exception, std::type_info *tinfo,
1356   //                  void (*dest) (void *));
1357 
1358   llvm::Type *Args[3] = { CGM.Int8PtrTy, CGM.Int8PtrTy, CGM.Int8PtrTy };
1359   llvm::FunctionType *FTy =
1360     llvm::FunctionType::get(CGM.VoidTy, Args, /*isVarArg=*/false);
1361 
1362   return CGM.CreateRuntimeFunction(FTy, "__cxa_throw");
1363 }
1364 
emitThrow(CodeGenFunction & CGF,const CXXThrowExpr * E)1365 void ItaniumCXXABI::emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) {
1366   QualType ThrowType = E->getSubExpr()->getType();
1367   // Now allocate the exception object.
1368   llvm::Type *SizeTy = CGF.ConvertType(getContext().getSizeType());
1369   uint64_t TypeSize = getContext().getTypeSizeInChars(ThrowType).getQuantity();
1370 
1371   llvm::FunctionCallee AllocExceptionFn = getAllocateExceptionFn(CGM);
1372   llvm::CallInst *ExceptionPtr = CGF.EmitNounwindRuntimeCall(
1373       AllocExceptionFn, llvm::ConstantInt::get(SizeTy, TypeSize), "exception");
1374 
1375   CharUnits ExnAlign = CGF.getContext().getExnObjectAlignment();
1376   CGF.EmitAnyExprToExn(E->getSubExpr(), Address(ExceptionPtr, ExnAlign));
1377 
1378   // Now throw the exception.
1379   llvm::Constant *TypeInfo = CGM.GetAddrOfRTTIDescriptor(ThrowType,
1380                                                          /*ForEH=*/true);
1381 
1382   // The address of the destructor.  If the exception type has a
1383   // trivial destructor (or isn't a record), we just pass null.
1384   llvm::Constant *Dtor = nullptr;
1385   if (const RecordType *RecordTy = ThrowType->getAs<RecordType>()) {
1386     CXXRecordDecl *Record = cast<CXXRecordDecl>(RecordTy->getDecl());
1387     if (!Record->hasTrivialDestructor()) {
1388       CXXDestructorDecl *DtorD = Record->getDestructor();
1389       Dtor = CGM.getAddrOfCXXStructor(GlobalDecl(DtorD, Dtor_Complete));
1390       Dtor = llvm::ConstantExpr::getBitCast(Dtor, CGM.Int8PtrTy);
1391     }
1392   }
1393   if (!Dtor) Dtor = llvm::Constant::getNullValue(CGM.Int8PtrTy);
1394 
1395   llvm::Value *args[] = { ExceptionPtr, TypeInfo, Dtor };
1396   CGF.EmitNoreturnRuntimeCallOrInvoke(getThrowFn(CGM), args);
1397 }
1398 
getItaniumDynamicCastFn(CodeGenFunction & CGF)1399 static llvm::FunctionCallee getItaniumDynamicCastFn(CodeGenFunction &CGF) {
1400   // void *__dynamic_cast(const void *sub,
1401   //                      const abi::__class_type_info *src,
1402   //                      const abi::__class_type_info *dst,
1403   //                      std::ptrdiff_t src2dst_offset);
1404 
1405   llvm::Type *Int8PtrTy = CGF.Int8PtrTy;
1406   llvm::Type *PtrDiffTy =
1407     CGF.ConvertType(CGF.getContext().getPointerDiffType());
1408 
1409   llvm::Type *Args[4] = { Int8PtrTy, Int8PtrTy, Int8PtrTy, PtrDiffTy };
1410 
1411   llvm::FunctionType *FTy = llvm::FunctionType::get(Int8PtrTy, Args, false);
1412 
1413   // Mark the function as nounwind readonly.
1414   llvm::Attribute::AttrKind FuncAttrs[] = { llvm::Attribute::NoUnwind,
1415                                             llvm::Attribute::ReadOnly };
1416   llvm::AttributeList Attrs = llvm::AttributeList::get(
1417       CGF.getLLVMContext(), llvm::AttributeList::FunctionIndex, FuncAttrs);
1418 
1419   return CGF.CGM.CreateRuntimeFunction(FTy, "__dynamic_cast", Attrs);
1420 }
1421 
getBadCastFn(CodeGenFunction & CGF)1422 static llvm::FunctionCallee getBadCastFn(CodeGenFunction &CGF) {
1423   // void __cxa_bad_cast();
1424   llvm::FunctionType *FTy = llvm::FunctionType::get(CGF.VoidTy, false);
1425   return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_cast");
1426 }
1427 
1428 /// Compute the src2dst_offset hint as described in the
1429 /// Itanium C++ ABI [2.9.7]
computeOffsetHint(ASTContext & Context,const CXXRecordDecl * Src,const CXXRecordDecl * Dst)1430 static CharUnits computeOffsetHint(ASTContext &Context,
1431                                    const CXXRecordDecl *Src,
1432                                    const CXXRecordDecl *Dst) {
1433   CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true,
1434                      /*DetectVirtual=*/false);
1435 
1436   // If Dst is not derived from Src we can skip the whole computation below and
1437   // return that Src is not a public base of Dst.  Record all inheritance paths.
1438   if (!Dst->isDerivedFrom(Src, Paths))
1439     return CharUnits::fromQuantity(-2ULL);
1440 
1441   unsigned NumPublicPaths = 0;
1442   CharUnits Offset;
1443 
1444   // Now walk all possible inheritance paths.
1445   for (const CXXBasePath &Path : Paths) {
1446     if (Path.Access != AS_public)  // Ignore non-public inheritance.
1447       continue;
1448 
1449     ++NumPublicPaths;
1450 
1451     for (const CXXBasePathElement &PathElement : Path) {
1452       // If the path contains a virtual base class we can't give any hint.
1453       // -1: no hint.
1454       if (PathElement.Base->isVirtual())
1455         return CharUnits::fromQuantity(-1ULL);
1456 
1457       if (NumPublicPaths > 1) // Won't use offsets, skip computation.
1458         continue;
1459 
1460       // Accumulate the base class offsets.
1461       const ASTRecordLayout &L = Context.getASTRecordLayout(PathElement.Class);
1462       Offset += L.getBaseClassOffset(
1463           PathElement.Base->getType()->getAsCXXRecordDecl());
1464     }
1465   }
1466 
1467   // -2: Src is not a public base of Dst.
1468   if (NumPublicPaths == 0)
1469     return CharUnits::fromQuantity(-2ULL);
1470 
1471   // -3: Src is a multiple public base type but never a virtual base type.
1472   if (NumPublicPaths > 1)
1473     return CharUnits::fromQuantity(-3ULL);
1474 
1475   // Otherwise, the Src type is a unique public nonvirtual base type of Dst.
1476   // Return the offset of Src from the origin of Dst.
1477   return Offset;
1478 }
1479 
getBadTypeidFn(CodeGenFunction & CGF)1480 static llvm::FunctionCallee getBadTypeidFn(CodeGenFunction &CGF) {
1481   // void __cxa_bad_typeid();
1482   llvm::FunctionType *FTy = llvm::FunctionType::get(CGF.VoidTy, false);
1483 
1484   return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_typeid");
1485 }
1486 
shouldTypeidBeNullChecked(bool IsDeref,QualType SrcRecordTy)1487 bool ItaniumCXXABI::shouldTypeidBeNullChecked(bool IsDeref,
1488                                               QualType SrcRecordTy) {
1489   return IsDeref;
1490 }
1491 
EmitBadTypeidCall(CodeGenFunction & CGF)1492 void ItaniumCXXABI::EmitBadTypeidCall(CodeGenFunction &CGF) {
1493   llvm::FunctionCallee Fn = getBadTypeidFn(CGF);
1494   llvm::CallBase *Call = CGF.EmitRuntimeCallOrInvoke(Fn);
1495   Call->setDoesNotReturn();
1496   CGF.Builder.CreateUnreachable();
1497 }
1498 
EmitTypeid(CodeGenFunction & CGF,QualType SrcRecordTy,Address ThisPtr,llvm::Type * StdTypeInfoPtrTy)1499 llvm::Value *ItaniumCXXABI::EmitTypeid(CodeGenFunction &CGF,
1500                                        QualType SrcRecordTy,
1501                                        Address ThisPtr,
1502                                        llvm::Type *StdTypeInfoPtrTy) {
1503   auto *ClassDecl =
1504       cast<CXXRecordDecl>(SrcRecordTy->castAs<RecordType>()->getDecl());
1505   unsigned DefaultAS = CGM.getTargetCodeGenInfo().getDefaultAS();
1506   llvm::Value *Value =
1507       CGF.GetVTablePtr(ThisPtr, StdTypeInfoPtrTy->getPointerTo(DefaultAS),
1508                        ClassDecl);
1509 
1510   if (CGM.getItaniumVTableContext().isRelativeLayout()) {
1511     // Load the type info.
1512     Value = CGF.Builder.CreateBitCast(Value, CGM.Int8PtrTy);
1513     Value = CGF.Builder.CreateCall(
1514         CGM.getIntrinsic(llvm::Intrinsic::load_relative, {CGM.Int32Ty}),
1515         {Value, llvm::ConstantInt::get(CGM.Int32Ty, -4)});
1516 
1517     // Setup to dereference again since this is a proxy we accessed.
1518     Value = CGF.Builder.CreateBitCast(Value, StdTypeInfoPtrTy->getPointerTo());
1519   } else {
1520     // Load the type info.
1521     Value = CGF.Builder.CreateConstInBoundsGEP1_64(Value, -1ULL);
1522   }
1523   return CGF.Builder.CreateAlignedLoad(Value, CGF.getPointerAlign());
1524 }
1525 
shouldDynamicCastCallBeNullChecked(bool SrcIsPtr,QualType SrcRecordTy)1526 bool ItaniumCXXABI::shouldDynamicCastCallBeNullChecked(bool SrcIsPtr,
1527                                                        QualType SrcRecordTy) {
1528   return SrcIsPtr;
1529 }
1530 
EmitDynamicCastCall(CodeGenFunction & CGF,Address ThisAddr,QualType SrcRecordTy,QualType DestTy,QualType DestRecordTy,llvm::BasicBlock * CastEnd)1531 llvm::Value *ItaniumCXXABI::EmitDynamicCastCall(
1532     CodeGenFunction &CGF, Address ThisAddr, QualType SrcRecordTy,
1533     QualType DestTy, QualType DestRecordTy, llvm::BasicBlock *CastEnd) {
1534   llvm::Type *PtrDiffLTy =
1535       CGF.ConvertType(CGF.getContext().getPointerDiffType());
1536   llvm::Type *DestLTy = CGF.ConvertType(DestTy);
1537 
1538   llvm::Value *SrcRTTI =
1539       CGF.CGM.GetAddrOfRTTIDescriptor(SrcRecordTy.getUnqualifiedType());
1540   llvm::Value *DestRTTI =
1541       CGF.CGM.GetAddrOfRTTIDescriptor(DestRecordTy.getUnqualifiedType());
1542 
1543   // Compute the offset hint.
1544   const CXXRecordDecl *SrcDecl = SrcRecordTy->getAsCXXRecordDecl();
1545   const CXXRecordDecl *DestDecl = DestRecordTy->getAsCXXRecordDecl();
1546   llvm::Value *OffsetHint = llvm::ConstantInt::get(
1547       PtrDiffLTy,
1548       computeOffsetHint(CGF.getContext(), SrcDecl, DestDecl).getQuantity());
1549 
1550   // Emit the call to __dynamic_cast.
1551   llvm::Value *Value = ThisAddr.getPointer();
1552   Value = CGF.EmitCastToVoidPtr(Value);
1553 
1554   llvm::Value *args[] = {Value, SrcRTTI, DestRTTI, OffsetHint};
1555   Value = CGF.EmitNounwindRuntimeCall(getItaniumDynamicCastFn(CGF), args);
1556   Value = CGF.Builder.CreateBitCast(Value, DestLTy);
1557 
1558   /// C++ [expr.dynamic.cast]p9:
1559   ///   A failed cast to reference type throws std::bad_cast
1560   if (DestTy->isReferenceType()) {
1561     llvm::BasicBlock *BadCastBlock =
1562         CGF.createBasicBlock("dynamic_cast.bad_cast");
1563 
1564     llvm::Value *IsNull = CGF.Builder.CreateIsNull(Value);
1565     CGF.Builder.CreateCondBr(IsNull, BadCastBlock, CastEnd);
1566 
1567     CGF.EmitBlock(BadCastBlock);
1568     EmitBadCastCall(CGF);
1569   }
1570 
1571   return Value;
1572 }
1573 
EmitDynamicCastToVoid(CodeGenFunction & CGF,Address ThisAddr,QualType SrcRecordTy,QualType DestTy)1574 llvm::Value *ItaniumCXXABI::EmitDynamicCastToVoid(CodeGenFunction &CGF,
1575                                                   Address ThisAddr,
1576                                                   QualType SrcRecordTy,
1577                                                   QualType DestTy) {
1578   llvm::Type *DestLTy = CGF.ConvertType(DestTy);
1579   auto *ClassDecl =
1580       cast<CXXRecordDecl>(SrcRecordTy->castAs<RecordType>()->getDecl());
1581   unsigned DefaultAS = CGM.getTargetCodeGenInfo().getDefaultAS();
1582   llvm::Value *OffsetToTop;
1583   if (CGM.getItaniumVTableContext().isRelativeLayout()) {
1584     // Get the vtable pointer.
1585     llvm::Value *VTable =
1586         CGF.GetVTablePtr(ThisAddr, CGM.Int32Ty->getPointerTo(DefaultAS), ClassDecl);
1587 
1588     // Get the offset-to-top from the vtable.
1589     OffsetToTop =
1590         CGF.Builder.CreateConstInBoundsGEP1_32(/*Type=*/nullptr, VTable, -2U);
1591     OffsetToTop = CGF.Builder.CreateAlignedLoad(
1592         OffsetToTop, CharUnits::fromQuantity(4), "offset.to.top");
1593   } else {
1594     llvm::Type *PtrDiffLTy =
1595         CGF.ConvertType(CGF.getContext().getPointerDiffType());
1596 
1597     // Get the vtable pointer.
1598     llvm::Value *VTable =
1599         CGF.GetVTablePtr(ThisAddr, PtrDiffLTy->getPointerTo(DefaultAS), ClassDecl);
1600 
1601     // Get the offset-to-top from the vtable.
1602     OffsetToTop = CGF.Builder.CreateConstInBoundsGEP1_64(VTable, -2ULL);
1603     OffsetToTop = CGF.Builder.CreateAlignedLoad(
1604         OffsetToTop, CGF.getPointerAlign(), "offset.to.top");
1605   }
1606   // Finally, add the offset to the pointer.
1607   llvm::Value *Value = ThisAddr.getPointer();
1608   Value = CGF.EmitCastToVoidPtr(Value);
1609   Value = CGF.Builder.CreateInBoundsGEP(Value, OffsetToTop);
1610   return CGF.Builder.CreateBitCast(Value, DestLTy);
1611 }
1612 
EmitBadCastCall(CodeGenFunction & CGF)1613 bool ItaniumCXXABI::EmitBadCastCall(CodeGenFunction &CGF) {
1614   llvm::FunctionCallee Fn = getBadCastFn(CGF);
1615   llvm::CallBase *Call = CGF.EmitRuntimeCallOrInvoke(Fn);
1616   Call->setDoesNotReturn();
1617   CGF.Builder.CreateUnreachable();
1618   return true;
1619 }
1620 
1621 llvm::Value *
GetVirtualBaseClassOffset(CodeGenFunction & CGF,Address This,const CXXRecordDecl * ClassDecl,const CXXRecordDecl * BaseClassDecl)1622 ItaniumCXXABI::GetVirtualBaseClassOffset(CodeGenFunction &CGF, Address This,
1623                                          const CXXRecordDecl *ClassDecl,
1624                                          const CXXRecordDecl *BaseClassDecl) {
1625   llvm::Value *VTablePtr = CGF.GetVTablePtr(This, CGM.Int8PtrTy, ClassDecl);
1626   CharUnits VBaseOffsetOffset =
1627       CGM.getItaniumVTableContext().getVirtualBaseOffsetOffset(ClassDecl,
1628                                                                BaseClassDecl);
1629   llvm::Value *VBaseOffsetPtr =
1630     CGF.Builder.CreateConstGEP1_64(VTablePtr, VBaseOffsetOffset.getQuantity(),
1631                                    "vbase.offset.ptr");
1632 
1633   llvm::Value *VBaseOffset;
1634   unsigned DefaultAS = CGM.getTargetCodeGenInfo().getDefaultAS();
1635   bool IsPurecap =
1636       CGF.getContext().getTargetInfo().areAllPointersCapabilities();
1637   if (CGM.getItaniumVTableContext().isRelativeLayout()) {
1638     VBaseOffsetPtr =
1639         CGF.Builder.CreateBitCast(VBaseOffsetPtr, CGF.Int32Ty->getPointerTo(DefaultAS));
1640     VBaseOffset = CGF.Builder.CreateAlignedLoad(
1641         VBaseOffsetPtr, CharUnits::fromQuantity(4), "vbase.offset");
1642   } else {
1643     VBaseOffsetPtr = CGF.Builder.CreateBitCast(VBaseOffsetPtr,
1644                                                IsPurecap ? CGM.Int8PtrPtrTy : CGM.PtrDiffTy->getPointerTo(DefaultAS));
1645     VBaseOffset = CGF.Builder.CreateAlignedLoad(
1646         VBaseOffsetPtr, CGF.getPointerAlign(), "vbase.offset");
1647     if (IsPurecap)
1648       VBaseOffset = CGF.getCapabilityIntegerValue(VBaseOffset);
1649   }
1650   return VBaseOffset;
1651 }
1652 
EmitCXXConstructors(const CXXConstructorDecl * D)1653 void ItaniumCXXABI::EmitCXXConstructors(const CXXConstructorDecl *D) {
1654   // Just make sure we're in sync with TargetCXXABI.
1655   assert(CGM.getTarget().getCXXABI().hasConstructorVariants());
1656 
1657   // The constructor used for constructing this as a base class;
1658   // ignores virtual bases.
1659   CGM.EmitGlobal(GlobalDecl(D, Ctor_Base));
1660 
1661   // The constructor used for constructing this as a complete class;
1662   // constructs the virtual bases, then calls the base constructor.
1663   if (!D->getParent()->isAbstract()) {
1664     // We don't need to emit the complete ctor if the class is abstract.
1665     CGM.EmitGlobal(GlobalDecl(D, Ctor_Complete));
1666   }
1667 }
1668 
1669 CGCXXABI::AddedStructorArgCounts
buildStructorSignature(GlobalDecl GD,SmallVectorImpl<CanQualType> & ArgTys)1670 ItaniumCXXABI::buildStructorSignature(GlobalDecl GD,
1671                                       SmallVectorImpl<CanQualType> &ArgTys) {
1672   ASTContext &Context = getContext();
1673 
1674   // All parameters are already in place except VTT, which goes after 'this'.
1675   // These are Clang types, so we don't need to worry about sret yet.
1676 
1677   // Check if we need to add a VTT parameter (which has type void **).
1678   if ((isa<CXXConstructorDecl>(GD.getDecl()) ? GD.getCtorType() == Ctor_Base
1679                                              : GD.getDtorType() == Dtor_Base) &&
1680       cast<CXXMethodDecl>(GD.getDecl())->getParent()->getNumVBases() != 0) {
1681     ArgTys.insert(ArgTys.begin() + 1,
1682                   Context.getPointerType(Context.VoidPtrTy));
1683     return AddedStructorArgCounts::prefix(1);
1684   }
1685   return AddedStructorArgCounts{};
1686 }
1687 
EmitCXXDestructors(const CXXDestructorDecl * D)1688 void ItaniumCXXABI::EmitCXXDestructors(const CXXDestructorDecl *D) {
1689   // The destructor used for destructing this as a base class; ignores
1690   // virtual bases.
1691   CGM.EmitGlobal(GlobalDecl(D, Dtor_Base));
1692 
1693   // The destructor used for destructing this as a most-derived class;
1694   // call the base destructor and then destructs any virtual bases.
1695   CGM.EmitGlobal(GlobalDecl(D, Dtor_Complete));
1696 
1697   // The destructor in a virtual table is always a 'deleting'
1698   // destructor, which calls the complete destructor and then uses the
1699   // appropriate operator delete.
1700   if (D->isVirtual())
1701     CGM.EmitGlobal(GlobalDecl(D, Dtor_Deleting));
1702 }
1703 
addImplicitStructorParams(CodeGenFunction & CGF,QualType & ResTy,FunctionArgList & Params)1704 void ItaniumCXXABI::addImplicitStructorParams(CodeGenFunction &CGF,
1705                                               QualType &ResTy,
1706                                               FunctionArgList &Params) {
1707   const CXXMethodDecl *MD = cast<CXXMethodDecl>(CGF.CurGD.getDecl());
1708   assert(isa<CXXConstructorDecl>(MD) || isa<CXXDestructorDecl>(MD));
1709 
1710   // Check if we need a VTT parameter as well.
1711   if (NeedsVTTParameter(CGF.CurGD)) {
1712     ASTContext &Context = getContext();
1713 
1714     // FIXME: avoid the fake decl
1715     QualType T = Context.getPointerType(Context.VoidPtrTy);
1716     auto *VTTDecl = ImplicitParamDecl::Create(
1717         Context, /*DC=*/nullptr, MD->getLocation(), &Context.Idents.get("vtt"),
1718         T, ImplicitParamDecl::CXXVTT);
1719     Params.insert(Params.begin() + 1, VTTDecl);
1720     getStructorImplicitParamDecl(CGF) = VTTDecl;
1721   }
1722 }
1723 
EmitInstanceFunctionProlog(CodeGenFunction & CGF)1724 void ItaniumCXXABI::EmitInstanceFunctionProlog(CodeGenFunction &CGF) {
1725   // Naked functions have no prolog.
1726   if (CGF.CurFuncDecl && CGF.CurFuncDecl->hasAttr<NakedAttr>())
1727     return;
1728 
1729   /// Initialize the 'this' slot. In the Itanium C++ ABI, no prologue
1730   /// adjustments are required, because they are all handled by thunks.
1731   setCXXABIThisValue(CGF, loadIncomingCXXThis(CGF));
1732 
1733   /// Initialize the 'vtt' slot if needed.
1734   if (getStructorImplicitParamDecl(CGF)) {
1735     getStructorImplicitParamValue(CGF) = CGF.Builder.CreateLoad(
1736         CGF.GetAddrOfLocalVar(getStructorImplicitParamDecl(CGF)), "vtt");
1737   }
1738 
1739   /// If this is a function that the ABI specifies returns 'this', initialize
1740   /// the return slot to 'this' at the start of the function.
1741   ///
1742   /// Unlike the setting of return types, this is done within the ABI
1743   /// implementation instead of by clients of CGCXXABI because:
1744   /// 1) getThisValue is currently protected
1745   /// 2) in theory, an ABI could implement 'this' returns some other way;
1746   ///    HasThisReturn only specifies a contract, not the implementation
1747   if (HasThisReturn(CGF.CurGD))
1748     CGF.Builder.CreateStore(getThisValue(CGF), CGF.ReturnValue);
1749 }
1750 
getImplicitConstructorArgs(CodeGenFunction & CGF,const CXXConstructorDecl * D,CXXCtorType Type,bool ForVirtualBase,bool Delegating)1751 CGCXXABI::AddedStructorArgs ItaniumCXXABI::getImplicitConstructorArgs(
1752     CodeGenFunction &CGF, const CXXConstructorDecl *D, CXXCtorType Type,
1753     bool ForVirtualBase, bool Delegating) {
1754   if (!NeedsVTTParameter(GlobalDecl(D, Type)))
1755     return AddedStructorArgs{};
1756 
1757   // Insert the implicit 'vtt' argument as the second argument.
1758   llvm::Value *VTT =
1759       CGF.GetVTTParameter(GlobalDecl(D, Type), ForVirtualBase, Delegating);
1760   QualType VTTTy = getContext().getPointerType(getContext().VoidPtrTy);
1761   return AddedStructorArgs::prefix({{VTT, VTTTy}});
1762 }
1763 
getCXXDestructorImplicitParam(CodeGenFunction & CGF,const CXXDestructorDecl * DD,CXXDtorType Type,bool ForVirtualBase,bool Delegating)1764 llvm::Value *ItaniumCXXABI::getCXXDestructorImplicitParam(
1765     CodeGenFunction &CGF, const CXXDestructorDecl *DD, CXXDtorType Type,
1766     bool ForVirtualBase, bool Delegating) {
1767   GlobalDecl GD(DD, Type);
1768   return CGF.GetVTTParameter(GD, ForVirtualBase, Delegating);
1769 }
1770 
EmitDestructorCall(CodeGenFunction & CGF,const CXXDestructorDecl * DD,CXXDtorType Type,bool ForVirtualBase,bool Delegating,Address This,QualType ThisTy)1771 void ItaniumCXXABI::EmitDestructorCall(CodeGenFunction &CGF,
1772                                        const CXXDestructorDecl *DD,
1773                                        CXXDtorType Type, bool ForVirtualBase,
1774                                        bool Delegating, Address This,
1775                                        QualType ThisTy) {
1776   GlobalDecl GD(DD, Type);
1777   llvm::Value *VTT =
1778       getCXXDestructorImplicitParam(CGF, DD, Type, ForVirtualBase, Delegating);
1779   QualType VTTTy = getContext().getPointerType(getContext().VoidPtrTy);
1780 
1781   CGCallee Callee;
1782   if (getContext().getLangOpts().AppleKext &&
1783       Type != Dtor_Base && DD->isVirtual())
1784     Callee = CGF.BuildAppleKextVirtualDestructorCall(DD, Type, DD->getParent());
1785   else
1786     Callee = CGCallee::forDirect(CGM.getAddrOfCXXStructor(GD), GD);
1787 
1788   CGF.EmitCXXDestructorCall(GD, Callee, This.getPointer(), ThisTy, VTT, VTTTy,
1789                             nullptr);
1790 }
1791 
emitVTableDefinitions(CodeGenVTables & CGVT,const CXXRecordDecl * RD)1792 void ItaniumCXXABI::emitVTableDefinitions(CodeGenVTables &CGVT,
1793                                           const CXXRecordDecl *RD) {
1794   llvm::GlobalVariable *VTable = getAddrOfVTable(RD, CharUnits());
1795   if (VTable->hasInitializer())
1796     return;
1797 
1798   ItaniumVTableContext &VTContext = CGM.getItaniumVTableContext();
1799   const VTableLayout &VTLayout = VTContext.getVTableLayout(RD);
1800   llvm::GlobalVariable::LinkageTypes Linkage = CGM.getVTableLinkage(RD);
1801   llvm::Constant *RTTI =
1802       CGM.GetAddrOfRTTIDescriptor(CGM.getContext().getTagDeclType(RD));
1803 
1804   // Create and set the initializer.
1805   ConstantInitBuilder builder(CGM);
1806   auto components = builder.beginStruct();
1807   CGVT.createVTableInitializer(components, VTLayout, RTTI,
1808                                llvm::GlobalValue::isLocalLinkage(Linkage));
1809   components.finishAndSetAsInitializer(VTable);
1810 
1811   // Set the correct linkage.
1812   VTable->setLinkage(Linkage);
1813 
1814   if (CGM.supportsCOMDAT() && VTable->isWeakForLinker())
1815     VTable->setComdat(CGM.getModule().getOrInsertComdat(VTable->getName()));
1816 
1817   // Set the right visibility.
1818   CGM.setGVProperties(VTable, RD);
1819 
1820   // If this is the magic class __cxxabiv1::__fundamental_type_info,
1821   // we will emit the typeinfo for the fundamental types. This is the
1822   // same behaviour as GCC.
1823   const DeclContext *DC = RD->getDeclContext();
1824   if (RD->getIdentifier() &&
1825       RD->getIdentifier()->isStr("__fundamental_type_info") &&
1826       isa<NamespaceDecl>(DC) && cast<NamespaceDecl>(DC)->getIdentifier() &&
1827       cast<NamespaceDecl>(DC)->getIdentifier()->isStr("__cxxabiv1") &&
1828       DC->getParent()->isTranslationUnit())
1829     EmitFundamentalRTTIDescriptors(RD);
1830 
1831   if (!VTable->isDeclarationForLinker())
1832     CGM.EmitVTableTypeMetadata(RD, VTable, VTLayout);
1833 
1834   if (VTContext.isRelativeLayout() && !VTable->isDSOLocal())
1835     CGVT.GenerateRelativeVTableAlias(VTable, VTable->getName());
1836 }
1837 
isVirtualOffsetNeededForVTableField(CodeGenFunction & CGF,CodeGenFunction::VPtr Vptr)1838 bool ItaniumCXXABI::isVirtualOffsetNeededForVTableField(
1839     CodeGenFunction &CGF, CodeGenFunction::VPtr Vptr) {
1840   if (Vptr.NearestVBase == nullptr)
1841     return false;
1842   return NeedsVTTParameter(CGF.CurGD);
1843 }
1844 
getVTableAddressPointInStructor(CodeGenFunction & CGF,const CXXRecordDecl * VTableClass,BaseSubobject Base,const CXXRecordDecl * NearestVBase)1845 llvm::Value *ItaniumCXXABI::getVTableAddressPointInStructor(
1846     CodeGenFunction &CGF, const CXXRecordDecl *VTableClass, BaseSubobject Base,
1847     const CXXRecordDecl *NearestVBase) {
1848 
1849   if ((Base.getBase()->getNumVBases() || NearestVBase != nullptr) &&
1850       NeedsVTTParameter(CGF.CurGD)) {
1851     return getVTableAddressPointInStructorWithVTT(CGF, VTableClass, Base,
1852                                                   NearestVBase);
1853   }
1854   return getVTableAddressPoint(Base, VTableClass);
1855 }
1856 
1857 llvm::Constant *
getVTableAddressPoint(BaseSubobject Base,const CXXRecordDecl * VTableClass)1858 ItaniumCXXABI::getVTableAddressPoint(BaseSubobject Base,
1859                                      const CXXRecordDecl *VTableClass) {
1860   llvm::GlobalValue *VTable = getAddrOfVTable(VTableClass, CharUnits());
1861 
1862   // Find the appropriate vtable within the vtable group, and the address point
1863   // within that vtable.
1864   VTableLayout::AddressPointLocation AddressPoint =
1865       CGM.getItaniumVTableContext()
1866           .getVTableLayout(VTableClass)
1867           .getAddressPoint(Base);
1868   llvm::Value *Indices[] = {
1869     llvm::ConstantInt::get(CGM.Int32Ty, 0),
1870     llvm::ConstantInt::get(CGM.Int32Ty, AddressPoint.VTableIndex),
1871     llvm::ConstantInt::get(CGM.Int32Ty, AddressPoint.AddressPointIndex),
1872   };
1873 
1874   return llvm::ConstantExpr::getGetElementPtr(VTable->getValueType(), VTable,
1875                                               Indices, /*InBounds=*/true,
1876                                               /*InRangeIndex=*/1);
1877 }
1878 
getVTableAddressPointInStructorWithVTT(CodeGenFunction & CGF,const CXXRecordDecl * VTableClass,BaseSubobject Base,const CXXRecordDecl * NearestVBase)1879 llvm::Value *ItaniumCXXABI::getVTableAddressPointInStructorWithVTT(
1880     CodeGenFunction &CGF, const CXXRecordDecl *VTableClass, BaseSubobject Base,
1881     const CXXRecordDecl *NearestVBase) {
1882   assert((Base.getBase()->getNumVBases() || NearestVBase != nullptr) &&
1883          NeedsVTTParameter(CGF.CurGD) && "This class doesn't have VTT");
1884 
1885   // Get the secondary vpointer index.
1886   uint64_t VirtualPointerIndex =
1887       CGM.getVTables().getSecondaryVirtualPointerIndex(VTableClass, Base);
1888 
1889   /// Load the VTT.
1890   llvm::Value *VTT = CGF.LoadCXXVTT();
1891   if (VirtualPointerIndex)
1892     VTT = CGF.Builder.CreateConstInBoundsGEP1_64(VTT, VirtualPointerIndex);
1893 
1894   // And load the address point from the VTT.
1895   return CGF.Builder.CreateAlignedLoad(VTT, CGF.getPointerAlign());
1896 }
1897 
getVTableAddressPointForConstExpr(BaseSubobject Base,const CXXRecordDecl * VTableClass)1898 llvm::Constant *ItaniumCXXABI::getVTableAddressPointForConstExpr(
1899     BaseSubobject Base, const CXXRecordDecl *VTableClass) {
1900   return getVTableAddressPoint(Base, VTableClass);
1901 }
1902 
getAddrOfVTable(const CXXRecordDecl * RD,CharUnits VPtrOffset)1903 llvm::GlobalVariable *ItaniumCXXABI::getAddrOfVTable(const CXXRecordDecl *RD,
1904                                                      CharUnits VPtrOffset) {
1905   assert(VPtrOffset.isZero() && "Itanium ABI only supports zero vptr offsets");
1906 
1907   llvm::GlobalVariable *&VTable = VTables[RD];
1908   if (VTable)
1909     return VTable;
1910 
1911   // Queue up this vtable for possible deferred emission.
1912   CGM.addDeferredVTable(RD);
1913 
1914   SmallString<256> Name;
1915   llvm::raw_svector_ostream Out(Name);
1916   getMangleContext().mangleCXXVTable(RD, Out);
1917 
1918   const VTableLayout &VTLayout =
1919       CGM.getItaniumVTableContext().getVTableLayout(RD);
1920   llvm::Type *VTableType = CGM.getVTables().getVTableType(VTLayout);
1921 
1922   // Use pointer alignment for the vtable. Otherwise we would align them based
1923   // on the size of the initializer which doesn't make sense as only single
1924   // values are read.
1925   unsigned PAlign = CGM.getItaniumVTableContext().isRelativeLayout()
1926                         ? 32
1927                         : CGM.getTarget().getPointerAlign(0);
1928 
1929   VTable = CGM.CreateOrReplaceCXXRuntimeVariable(
1930       Name, VTableType, llvm::GlobalValue::ExternalLinkage,
1931       getContext().toCharUnitsFromBits(PAlign).getQuantity());
1932   VTable->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
1933 
1934   CGM.setGVProperties(VTable, RD);
1935 
1936   return VTable;
1937 }
1938 
getVirtualFunctionPointer(CodeGenFunction & CGF,GlobalDecl GD,Address This,llvm::Type * Ty,SourceLocation Loc)1939 CGCallee ItaniumCXXABI::getVirtualFunctionPointer(CodeGenFunction &CGF,
1940                                                   GlobalDecl GD,
1941                                                   Address This,
1942                                                   llvm::Type *Ty,
1943                                                   SourceLocation Loc) {
1944   unsigned AS = CGM.getDataLayout().getProgramAddressSpace();
1945   auto *MethodDecl = cast<CXXMethodDecl>(GD.getDecl());
1946   llvm::Value *VTable = CGF.GetVTablePtr(
1947       This, Ty->getPointerTo(AS)->getPointerTo(AS), MethodDecl->getParent());
1948 
1949   uint64_t VTableIndex = CGM.getItaniumVTableContext().getMethodVTableIndex(GD);
1950   llvm::Value *VFunc;
1951   if (CGF.ShouldEmitVTableTypeCheckedLoad(MethodDecl->getParent())) {
1952     VFunc = CGF.EmitVTableTypeCheckedLoad(
1953         MethodDecl->getParent(), VTable,
1954         VTableIndex * CGM.getContext().getTargetInfo().getPointerWidth(0) / 8);
1955   } else {
1956     CGF.EmitTypeMetadataCodeForVCall(MethodDecl->getParent(), VTable, Loc);
1957 
1958     llvm::Value *VFuncLoad;
1959     if (CGM.getItaniumVTableContext().isRelativeLayout()) {
1960       VTable = CGF.Builder.CreateBitCast(VTable, CGM.Int8PtrTy);
1961       llvm::Value *Load = CGF.Builder.CreateCall(
1962           CGM.getIntrinsic(llvm::Intrinsic::load_relative, {CGM.Int32Ty}),
1963           {VTable, llvm::ConstantInt::get(CGM.Int32Ty, 4 * VTableIndex)});
1964       VFuncLoad = CGF.Builder.CreateBitCast(Load, Ty->getPointerTo(AS));
1965     } else {
1966       VTable =
1967           CGF.Builder.CreateBitCast(VTable, Ty->getPointerTo(AS)->getPointerTo(AS));
1968       llvm::Value *VTableSlotPtr =
1969           CGF.Builder.CreateConstInBoundsGEP1_64(VTable, VTableIndex, "vfn");
1970       VFuncLoad =
1971           CGF.Builder.CreateAlignedLoad(VTableSlotPtr, CGF.getPointerAlign());
1972     }
1973 
1974     // Add !invariant.load md to virtual function load to indicate that
1975     // function didn't change inside vtable.
1976     // It's safe to add it without -fstrict-vtable-pointers, but it would not
1977     // help in devirtualization because it will only matter if we will have 2
1978     // the same virtual function loads from the same vtable load, which won't
1979     // happen without enabled devirtualization with -fstrict-vtable-pointers.
1980     if (CGM.getCodeGenOpts().OptimizationLevel > 0 &&
1981         CGM.getCodeGenOpts().StrictVTablePointers) {
1982       if (auto *VFuncLoadInstr = dyn_cast<llvm::Instruction>(VFuncLoad)) {
1983         VFuncLoadInstr->setMetadata(
1984             llvm::LLVMContext::MD_invariant_load,
1985             llvm::MDNode::get(CGM.getLLVMContext(),
1986                               llvm::ArrayRef<llvm::Metadata *>()));
1987       }
1988     }
1989     VFunc = VFuncLoad;
1990   }
1991 
1992   CGCallee Callee(GD, VFunc);
1993   return Callee;
1994 }
1995 
EmitVirtualDestructorCall(CodeGenFunction & CGF,const CXXDestructorDecl * Dtor,CXXDtorType DtorType,Address This,DeleteOrMemberCallExpr E)1996 llvm::Value *ItaniumCXXABI::EmitVirtualDestructorCall(
1997     CodeGenFunction &CGF, const CXXDestructorDecl *Dtor, CXXDtorType DtorType,
1998     Address This, DeleteOrMemberCallExpr E) {
1999   auto *CE = E.dyn_cast<const CXXMemberCallExpr *>();
2000   auto *D = E.dyn_cast<const CXXDeleteExpr *>();
2001   assert((CE != nullptr) ^ (D != nullptr));
2002   assert(CE == nullptr || CE->arg_begin() == CE->arg_end());
2003   assert(DtorType == Dtor_Deleting || DtorType == Dtor_Complete);
2004 
2005   GlobalDecl GD(Dtor, DtorType);
2006   const CGFunctionInfo *FInfo =
2007       &CGM.getTypes().arrangeCXXStructorDeclaration(GD);
2008   llvm::FunctionType *Ty = CGF.CGM.getTypes().GetFunctionType(*FInfo);
2009   CGCallee Callee = CGCallee::forVirtual(CE, GD, This, Ty);
2010 
2011   QualType ThisTy;
2012   if (CE) {
2013     ThisTy = CE->getObjectType();
2014   } else {
2015     ThisTy = D->getDestroyedType();
2016   }
2017 
2018   CGF.EmitCXXDestructorCall(GD, Callee, This.getPointer(), ThisTy, nullptr,
2019                             QualType(), nullptr);
2020   return nullptr;
2021 }
2022 
emitVirtualInheritanceTables(const CXXRecordDecl * RD)2023 void ItaniumCXXABI::emitVirtualInheritanceTables(const CXXRecordDecl *RD) {
2024   CodeGenVTables &VTables = CGM.getVTables();
2025   llvm::GlobalVariable *VTT = VTables.GetAddrOfVTT(RD);
2026   VTables.EmitVTTDefinition(VTT, CGM.getVTableLinkage(RD), RD);
2027 }
2028 
canSpeculativelyEmitVTableAsBaseClass(const CXXRecordDecl * RD) const2029 bool ItaniumCXXABI::canSpeculativelyEmitVTableAsBaseClass(
2030     const CXXRecordDecl *RD) const {
2031   // We don't emit available_externally vtables if we are in -fapple-kext mode
2032   // because kext mode does not permit devirtualization.
2033   if (CGM.getLangOpts().AppleKext)
2034     return false;
2035 
2036   // If the vtable is hidden then it is not safe to emit an available_externally
2037   // copy of vtable.
2038   if (isVTableHidden(RD))
2039     return false;
2040 
2041   if (CGM.getCodeGenOpts().ForceEmitVTables)
2042     return true;
2043 
2044   // If we don't have any not emitted inline virtual function then we are safe
2045   // to emit an available_externally copy of vtable.
2046   // FIXME we can still emit a copy of the vtable if we
2047   // can emit definition of the inline functions.
2048   if (hasAnyUnusedVirtualInlineFunction(RD))
2049     return false;
2050 
2051   // For a class with virtual bases, we must also be able to speculatively
2052   // emit the VTT, because CodeGen doesn't have separate notions of "can emit
2053   // the vtable" and "can emit the VTT". For a base subobject, this means we
2054   // need to be able to emit non-virtual base vtables.
2055   if (RD->getNumVBases()) {
2056     for (const auto &B : RD->bases()) {
2057       auto *BRD = B.getType()->getAsCXXRecordDecl();
2058       assert(BRD && "no class for base specifier");
2059       if (B.isVirtual() || !BRD->isDynamicClass())
2060         continue;
2061       if (!canSpeculativelyEmitVTableAsBaseClass(BRD))
2062         return false;
2063     }
2064   }
2065 
2066   return true;
2067 }
2068 
canSpeculativelyEmitVTable(const CXXRecordDecl * RD) const2069 bool ItaniumCXXABI::canSpeculativelyEmitVTable(const CXXRecordDecl *RD) const {
2070   if (!canSpeculativelyEmitVTableAsBaseClass(RD))
2071     return false;
2072 
2073   // For a complete-object vtable (or more specifically, for the VTT), we need
2074   // to be able to speculatively emit the vtables of all dynamic virtual bases.
2075   for (const auto &B : RD->vbases()) {
2076     auto *BRD = B.getType()->getAsCXXRecordDecl();
2077     assert(BRD && "no class for base specifier");
2078     if (!BRD->isDynamicClass())
2079       continue;
2080     if (!canSpeculativelyEmitVTableAsBaseClass(BRD))
2081       return false;
2082   }
2083 
2084   return true;
2085 }
performTypeAdjustment(CodeGenFunction & CGF,Address InitialPtr,int64_t NonVirtualAdjustment,int64_t VirtualAdjustment,bool IsReturnAdjustment)2086 static llvm::Value *performTypeAdjustment(CodeGenFunction &CGF,
2087                                           Address InitialPtr,
2088                                           int64_t NonVirtualAdjustment,
2089                                           int64_t VirtualAdjustment,
2090                                           bool IsReturnAdjustment) {
2091   if (!NonVirtualAdjustment && !VirtualAdjustment)
2092     return InitialPtr.getPointer();
2093 
2094   Address V = CGF.Builder.CreateElementBitCast(InitialPtr, CGF.Int8Ty);
2095 
2096   // In a base-to-derived cast, the non-virtual adjustment is applied first.
2097   if (NonVirtualAdjustment && !IsReturnAdjustment) {
2098     V = CGF.Builder.CreateConstInBoundsByteGEP(V,
2099                               CharUnits::fromQuantity(NonVirtualAdjustment));
2100   }
2101 
2102   // Perform the virtual adjustment if we have one.
2103   llvm::Value *ResultPtr;
2104   if (VirtualAdjustment) {
2105     Address VTablePtrPtr = CGF.Builder.CreateElementBitCast(V, CGF.Int8PtrTy);
2106     llvm::Value *VTablePtr = CGF.Builder.CreateLoad(VTablePtrPtr);
2107 
2108     llvm::Value *Offset;
2109     unsigned AS = CGF.CGM.getDataLayout().getGlobalsAddressSpace();
2110     llvm::Value *OffsetPtr =
2111         CGF.Builder.CreateConstInBoundsGEP1_64(VTablePtr, VirtualAdjustment);
2112     if (CGF.CGM.getItaniumVTableContext().isRelativeLayout()) {
2113       // Load the adjustment offset from the vtable as a 32-bit int.
2114       OffsetPtr =
2115           CGF.Builder.CreateBitCast(OffsetPtr, CGF.Int32Ty->getPointerTo(AS));
2116       Offset =
2117           CGF.Builder.CreateAlignedLoad(OffsetPtr, CharUnits::fromQuantity(4));
2118     } else {
2119       llvm::Type *PtrDiffTy =
2120           CGF.ConvertType(CGF.getContext().getPointerDiffType());
2121 
2122       // Load the adjustment offset from the vtable.
2123       if (CGF.getContext().getTargetInfo().areAllPointersCapabilities()) {
2124         OffsetPtr = CGF.Builder.CreateBitCast(OffsetPtr, CGF.Int8PtrPtrTy);
2125         Offset = CGF.Builder.CreateAlignedLoad(OffsetPtr, CGF.getPointerAlign(),
2126                                                "vbase.offset.intcap");
2127         Offset = CGF.getCapabilityIntegerValue(Offset);
2128       } else {
2129         OffsetPtr =
2130             CGF.Builder.CreateBitCast(OffsetPtr, PtrDiffTy->getPointerTo(AS));
2131 
2132         // Load the adjustment offset from the vtable.
2133         Offset =
2134             CGF.Builder.CreateAlignedLoad(OffsetPtr, CGF.getPointerAlign());
2135       }
2136     }
2137     // Adjust our pointer.
2138     ResultPtr = CGF.Builder.CreateInBoundsGEP(V.getPointer(), Offset);
2139   } else {
2140     ResultPtr = V.getPointer();
2141   }
2142 
2143   // In a derived-to-base conversion, the non-virtual adjustment is
2144   // applied second.
2145   if (NonVirtualAdjustment && IsReturnAdjustment) {
2146     ResultPtr = CGF.Builder.CreateConstInBoundsGEP1_64(ResultPtr,
2147                                                        NonVirtualAdjustment);
2148   }
2149 
2150   // Cast back to the original type.
2151   return CGF.Builder.CreateBitCast(ResultPtr, InitialPtr.getType());
2152 }
2153 
performThisAdjustment(CodeGenFunction & CGF,Address This,const ThisAdjustment & TA)2154 llvm::Value *ItaniumCXXABI::performThisAdjustment(CodeGenFunction &CGF,
2155                                                   Address This,
2156                                                   const ThisAdjustment &TA) {
2157   return performTypeAdjustment(CGF, This, TA.NonVirtual,
2158                                TA.Virtual.Itanium.VCallOffsetOffset,
2159                                /*IsReturnAdjustment=*/false);
2160 }
2161 
2162 llvm::Value *
performReturnAdjustment(CodeGenFunction & CGF,Address Ret,const ReturnAdjustment & RA)2163 ItaniumCXXABI::performReturnAdjustment(CodeGenFunction &CGF, Address Ret,
2164                                        const ReturnAdjustment &RA) {
2165   return performTypeAdjustment(CGF, Ret, RA.NonVirtual,
2166                                RA.Virtual.Itanium.VBaseOffsetOffset,
2167                                /*IsReturnAdjustment=*/true);
2168 }
2169 
EmitReturnFromThunk(CodeGenFunction & CGF,RValue RV,QualType ResultType)2170 void ARMCXXABI::EmitReturnFromThunk(CodeGenFunction &CGF,
2171                                     RValue RV, QualType ResultType) {
2172   if (!isa<CXXDestructorDecl>(CGF.CurGD.getDecl()))
2173     return ItaniumCXXABI::EmitReturnFromThunk(CGF, RV, ResultType);
2174 
2175   // Destructor thunks in the ARM ABI have indeterminate results.
2176   llvm::Type *T = CGF.ReturnValue.getElementType();
2177   RValue Undef = RValue::get(llvm::UndefValue::get(T));
2178   return ItaniumCXXABI::EmitReturnFromThunk(CGF, Undef, ResultType);
2179 }
2180 
2181 /************************** Array allocation cookies **************************/
2182 
getArrayCookieSizeImpl(QualType elementType)2183 CharUnits ItaniumCXXABI::getArrayCookieSizeImpl(QualType elementType) {
2184   // The array cookie is a size_t; pad that up to the element alignment.
2185   // The cookie is actually right-justified in that space.
2186   return std::max(CharUnits::fromQuantity(CGM.SizeSizeInBytes),
2187                   CGM.getContext().getTypeAlignInChars(elementType));
2188 }
2189 
InitializeArrayCookie(CodeGenFunction & CGF,Address NewPtr,llvm::Value * NumElements,const CXXNewExpr * expr,QualType ElementType)2190 Address ItaniumCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
2191                                              Address NewPtr,
2192                                              llvm::Value *NumElements,
2193                                              const CXXNewExpr *expr,
2194                                              QualType ElementType) {
2195   assert(requiresArrayCookie(expr));
2196 
2197   unsigned AS = NewPtr.getAddressSpace();
2198 
2199   ASTContext &Ctx = getContext();
2200   CharUnits SizeSize = CGF.getSizeSize();
2201 
2202   // The size of the cookie.
2203   CharUnits CookieSize =
2204     std::max(SizeSize, Ctx.getTypeAlignInChars(ElementType));
2205   assert(CookieSize == getArrayCookieSizeImpl(ElementType));
2206 
2207   // Compute an offset to the cookie.
2208   Address CookiePtr = NewPtr;
2209   CharUnits CookieOffset = CookieSize - SizeSize;
2210   if (!CookieOffset.isZero())
2211     CookiePtr = CGF.Builder.CreateConstInBoundsByteGEP(CookiePtr, CookieOffset);
2212 
2213   // Write the number of elements into the appropriate slot.
2214   Address NumElementsPtr =
2215       CGF.Builder.CreateElementBitCast(CookiePtr, CGF.SizeTy);
2216   llvm::Instruction *SI = CGF.Builder.CreateStore(NumElements, NumElementsPtr);
2217 
2218   // Handle the array cookie specially in ASan.
2219   if (CGM.getLangOpts().Sanitize.has(SanitizerKind::Address) && AS == 0 &&
2220       (expr->getOperatorNew()->isReplaceableGlobalAllocationFunction() ||
2221        CGM.getCodeGenOpts().SanitizeAddressPoisonCustomArrayCookie)) {
2222     // The store to the CookiePtr does not need to be instrumented.
2223     CGM.getSanitizerMetadata()->disableSanitizerForInstruction(SI);
2224     llvm::FunctionType *FTy =
2225         llvm::FunctionType::get(CGM.VoidTy, NumElementsPtr.getType(), false);
2226     llvm::FunctionCallee F =
2227         CGM.CreateRuntimeFunction(FTy, "__asan_poison_cxx_array_cookie");
2228     CGF.Builder.CreateCall(F, NumElementsPtr.getPointer());
2229   }
2230 
2231   // Finally, compute a pointer to the actual data buffer by skipping
2232   // over the cookie completely.
2233   return CGF.Builder.CreateConstInBoundsByteGEP(NewPtr, CookieSize);
2234 }
2235 
readArrayCookieImpl(CodeGenFunction & CGF,Address allocPtr,CharUnits cookieSize)2236 llvm::Value *ItaniumCXXABI::readArrayCookieImpl(CodeGenFunction &CGF,
2237                                                 Address allocPtr,
2238                                                 CharUnits cookieSize) {
2239   // The element size is right-justified in the cookie.
2240   Address numElementsPtr = allocPtr;
2241   CharUnits numElementsOffset = cookieSize - CGF.getSizeSize();
2242   if (!numElementsOffset.isZero())
2243     numElementsPtr =
2244       CGF.Builder.CreateConstInBoundsByteGEP(numElementsPtr, numElementsOffset);
2245 
2246   unsigned AS = allocPtr.getAddressSpace();
2247   numElementsPtr = CGF.Builder.CreateElementBitCast(numElementsPtr, CGF.SizeTy);
2248   if (!CGM.getLangOpts().Sanitize.has(SanitizerKind::Address) || AS != 0)
2249     return CGF.Builder.CreateLoad(numElementsPtr);
2250   // In asan mode emit a function call instead of a regular load and let the
2251   // run-time deal with it: if the shadow is properly poisoned return the
2252   // cookie, otherwise return 0 to avoid an infinite loop calling DTORs.
2253   // We can't simply ignore this load using nosanitize metadata because
2254   // the metadata may be lost.
2255   llvm::FunctionType *FTy =
2256       llvm::FunctionType::get(CGF.SizeTy, CGF.SizeTy->getPointerTo(0), false);
2257   llvm::FunctionCallee F =
2258       CGM.CreateRuntimeFunction(FTy, "__asan_load_cxx_array_cookie");
2259   return CGF.Builder.CreateCall(F, numElementsPtr.getPointer());
2260 }
2261 
getArrayCookieSizeImpl(QualType elementType)2262 CharUnits ARMCXXABI::getArrayCookieSizeImpl(QualType elementType) {
2263   // ARM says that the cookie is always:
2264   //   struct array_cookie {
2265   //     std::size_t element_size; // element_size != 0
2266   //     std::size_t element_count;
2267   //   };
2268   // But the base ABI doesn't give anything an alignment greater than
2269   // 8, so we can dismiss this as typical ABI-author blindness to
2270   // actual language complexity and round up to the element alignment.
2271   return std::max(CharUnits::fromQuantity(2 * CGM.SizeSizeInBytes),
2272                   CGM.getContext().getTypeAlignInChars(elementType));
2273 }
2274 
InitializeArrayCookie(CodeGenFunction & CGF,Address newPtr,llvm::Value * numElements,const CXXNewExpr * expr,QualType elementType)2275 Address ARMCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
2276                                          Address newPtr,
2277                                          llvm::Value *numElements,
2278                                          const CXXNewExpr *expr,
2279                                          QualType elementType) {
2280   assert(requiresArrayCookie(expr));
2281 
2282   // The cookie is always at the start of the buffer.
2283   Address cookie = newPtr;
2284 
2285   // The first element is the element size.
2286   cookie = CGF.Builder.CreateElementBitCast(cookie, CGF.SizeTy);
2287   llvm::Value *elementSize = llvm::ConstantInt::get(CGF.SizeTy,
2288                  getContext().getTypeSizeInChars(elementType).getQuantity());
2289   CGF.Builder.CreateStore(elementSize, cookie);
2290 
2291   // The second element is the element count.
2292   cookie = CGF.Builder.CreateConstInBoundsGEP(cookie, 1);
2293   CGF.Builder.CreateStore(numElements, cookie);
2294 
2295   // Finally, compute a pointer to the actual data buffer by skipping
2296   // over the cookie completely.
2297   CharUnits cookieSize = ARMCXXABI::getArrayCookieSizeImpl(elementType);
2298   return CGF.Builder.CreateConstInBoundsByteGEP(newPtr, cookieSize);
2299 }
2300 
readArrayCookieImpl(CodeGenFunction & CGF,Address allocPtr,CharUnits cookieSize)2301 llvm::Value *ARMCXXABI::readArrayCookieImpl(CodeGenFunction &CGF,
2302                                             Address allocPtr,
2303                                             CharUnits cookieSize) {
2304   // The number of elements is at offset sizeof(size_t) relative to
2305   // the allocated pointer.
2306   Address numElementsPtr
2307     = CGF.Builder.CreateConstInBoundsByteGEP(allocPtr, CGF.getSizeSize());
2308 
2309   numElementsPtr = CGF.Builder.CreateElementBitCast(numElementsPtr, CGF.SizeTy);
2310   return CGF.Builder.CreateLoad(numElementsPtr);
2311 }
2312 
2313 /*********************** Static local initialization **************************/
2314 
getGuardAcquireFn(CodeGenModule & CGM,llvm::PointerType * GuardPtrTy)2315 static llvm::FunctionCallee getGuardAcquireFn(CodeGenModule &CGM,
2316                                               llvm::PointerType *GuardPtrTy) {
2317   // int __cxa_guard_acquire(__guard *guard_object);
2318   llvm::FunctionType *FTy =
2319     llvm::FunctionType::get(CGM.getTypes().ConvertType(CGM.getContext().IntTy),
2320                             GuardPtrTy, /*isVarArg=*/false);
2321   return CGM.CreateRuntimeFunction(
2322       FTy, "__cxa_guard_acquire",
2323       llvm::AttributeList::get(CGM.getLLVMContext(),
2324                                llvm::AttributeList::FunctionIndex,
2325                                llvm::Attribute::NoUnwind));
2326 }
2327 
getGuardReleaseFn(CodeGenModule & CGM,llvm::PointerType * GuardPtrTy)2328 static llvm::FunctionCallee getGuardReleaseFn(CodeGenModule &CGM,
2329                                               llvm::PointerType *GuardPtrTy) {
2330   // void __cxa_guard_release(__guard *guard_object);
2331   llvm::FunctionType *FTy =
2332     llvm::FunctionType::get(CGM.VoidTy, GuardPtrTy, /*isVarArg=*/false);
2333   return CGM.CreateRuntimeFunction(
2334       FTy, "__cxa_guard_release",
2335       llvm::AttributeList::get(CGM.getLLVMContext(),
2336                                llvm::AttributeList::FunctionIndex,
2337                                llvm::Attribute::NoUnwind));
2338 }
2339 
getGuardAbortFn(CodeGenModule & CGM,llvm::PointerType * GuardPtrTy)2340 static llvm::FunctionCallee getGuardAbortFn(CodeGenModule &CGM,
2341                                             llvm::PointerType *GuardPtrTy) {
2342   // void __cxa_guard_abort(__guard *guard_object);
2343   llvm::FunctionType *FTy =
2344     llvm::FunctionType::get(CGM.VoidTy, GuardPtrTy, /*isVarArg=*/false);
2345   return CGM.CreateRuntimeFunction(
2346       FTy, "__cxa_guard_abort",
2347       llvm::AttributeList::get(CGM.getLLVMContext(),
2348                                llvm::AttributeList::FunctionIndex,
2349                                llvm::Attribute::NoUnwind));
2350 }
2351 
2352 namespace {
2353   struct CallGuardAbort final : EHScopeStack::Cleanup {
2354     llvm::GlobalVariable *Guard;
CallGuardAbort__anon06b6d2220311::CallGuardAbort2355     CallGuardAbort(llvm::GlobalVariable *Guard) : Guard(Guard) {}
2356 
Emit__anon06b6d2220311::CallGuardAbort2357     void Emit(CodeGenFunction &CGF, Flags flags) override {
2358       CGF.EmitNounwindRuntimeCall(getGuardAbortFn(CGF.CGM, Guard->getType()),
2359                                   Guard);
2360     }
2361   };
2362 }
2363 
2364 /// The ARM code here follows the Itanium code closely enough that we
2365 /// just special-case it at particular places.
EmitGuardedInit(CodeGenFunction & CGF,const VarDecl & D,llvm::GlobalVariable * var,bool shouldPerformInit)2366 void ItaniumCXXABI::EmitGuardedInit(CodeGenFunction &CGF,
2367                                     const VarDecl &D,
2368                                     llvm::GlobalVariable *var,
2369                                     bool shouldPerformInit) {
2370   CGBuilderTy &Builder = CGF.Builder;
2371 
2372   // Inline variables that weren't instantiated from variable templates have
2373   // partially-ordered initialization within their translation unit.
2374   bool NonTemplateInline =
2375       D.isInline() &&
2376       !isTemplateInstantiation(D.getTemplateSpecializationKind());
2377 
2378   // We only need to use thread-safe statics for local non-TLS variables and
2379   // inline variables; other global initialization is always single-threaded
2380   // or (through lazy dynamic loading in multiple threads) unsequenced.
2381   bool threadsafe = getContext().getLangOpts().ThreadsafeStatics &&
2382                     (D.isLocalVarDecl() || NonTemplateInline) &&
2383                     !D.getTLSKind();
2384 
2385   // If we have a global variable with internal linkage and thread-safe statics
2386   // are disabled, we can just let the guard variable be of type i8.
2387   bool useInt8GuardVariable = !threadsafe && var->hasInternalLinkage();
2388 
2389   llvm::IntegerType *guardTy;
2390   CharUnits guardAlignment;
2391   if (useInt8GuardVariable) {
2392     guardTy = CGF.Int8Ty;
2393     guardAlignment = CharUnits::One();
2394   } else {
2395     // Guard variables are 64 bits in the generic ABI and size width on ARM
2396     // (i.e. 32-bit on AArch32, 64-bit on AArch64).
2397     if (UseARMGuardVarABI) {
2398       guardTy = CGF.SizeTy;
2399       guardAlignment = CGF.getSizeAlign();
2400     } else {
2401       guardTy = CGF.Int64Ty;
2402       guardAlignment = CharUnits::fromQuantity(
2403                              CGM.getDataLayout().getABITypeAlignment(guardTy));
2404     }
2405   }
2406   llvm::PointerType *guardPtrTy = guardTy->getPointerTo(
2407                                     CGM.getTargetCodeGenInfo().getDefaultAS());
2408 
2409   // Create the guard variable if we don't already have it (as we
2410   // might if we're double-emitting this function body).
2411   llvm::GlobalVariable *guard = CGM.getStaticLocalDeclGuardAddress(&D);
2412   if (!guard) {
2413     // Mangle the name for the guard.
2414     SmallString<256> guardName;
2415     {
2416       llvm::raw_svector_ostream out(guardName);
2417       getMangleContext().mangleStaticGuardVariable(&D, out);
2418     }
2419 
2420     // Create the guard variable with a zero-initializer.
2421     // Just absorb linkage and visibility from the guarded variable.
2422     guard = new llvm::GlobalVariable(CGM.getModule(), guardTy,
2423                                      false, var->getLinkage(),
2424                                      llvm::ConstantInt::get(guardTy, 0),
2425                                      guardName.str());
2426     guard->setDSOLocal(var->isDSOLocal());
2427     guard->setVisibility(var->getVisibility());
2428     // If the variable is thread-local, so is its guard variable.
2429     guard->setThreadLocalMode(var->getThreadLocalMode());
2430     guard->setAlignment(guardAlignment.getAsAlign());
2431 
2432     // The ABI says: "It is suggested that it be emitted in the same COMDAT
2433     // group as the associated data object." In practice, this doesn't work for
2434     // non-ELF and non-Wasm object formats, so only do it for ELF and Wasm.
2435     llvm::Comdat *C = var->getComdat();
2436     if (!D.isLocalVarDecl() && C &&
2437         (CGM.getTarget().getTriple().isOSBinFormatELF() ||
2438          CGM.getTarget().getTriple().isOSBinFormatWasm())) {
2439       guard->setComdat(C);
2440       // An inline variable's guard function is run from the per-TU
2441       // initialization function, not via a dedicated global ctor function, so
2442       // we can't put it in a comdat.
2443       if (!NonTemplateInline)
2444         CGF.CurFn->setComdat(C);
2445     } else if (CGM.supportsCOMDAT() && guard->isWeakForLinker()) {
2446       guard->setComdat(CGM.getModule().getOrInsertComdat(guard->getName()));
2447     }
2448 
2449     CGM.setStaticLocalDeclGuardAddress(&D, guard);
2450   }
2451 
2452   Address guardAddr = Address(guard, guardAlignment);
2453 
2454   // Test whether the variable has completed initialization.
2455   //
2456   // Itanium C++ ABI 3.3.2:
2457   //   The following is pseudo-code showing how these functions can be used:
2458   //     if (obj_guard.first_byte == 0) {
2459   //       if ( __cxa_guard_acquire (&obj_guard) ) {
2460   //         try {
2461   //           ... initialize the object ...;
2462   //         } catch (...) {
2463   //            __cxa_guard_abort (&obj_guard);
2464   //            throw;
2465   //         }
2466   //         ... queue object destructor with __cxa_atexit() ...;
2467   //         __cxa_guard_release (&obj_guard);
2468   //       }
2469   //     }
2470 
2471   // Load the first byte of the guard variable.
2472   llvm::LoadInst *LI =
2473       Builder.CreateLoad(Builder.CreateElementBitCast(guardAddr, CGM.Int8Ty));
2474 
2475   // Itanium ABI:
2476   //   An implementation supporting thread-safety on multiprocessor
2477   //   systems must also guarantee that references to the initialized
2478   //   object do not occur before the load of the initialization flag.
2479   //
2480   // In LLVM, we do this by marking the load Acquire.
2481   if (threadsafe)
2482     LI->setAtomic(llvm::AtomicOrdering::Acquire);
2483 
2484   // For ARM, we should only check the first bit, rather than the entire byte:
2485   //
2486   // ARM C++ ABI 3.2.3.1:
2487   //   To support the potential use of initialization guard variables
2488   //   as semaphores that are the target of ARM SWP and LDREX/STREX
2489   //   synchronizing instructions we define a static initialization
2490   //   guard variable to be a 4-byte aligned, 4-byte word with the
2491   //   following inline access protocol.
2492   //     #define INITIALIZED 1
2493   //     if ((obj_guard & INITIALIZED) != INITIALIZED) {
2494   //       if (__cxa_guard_acquire(&obj_guard))
2495   //         ...
2496   //     }
2497   //
2498   // and similarly for ARM64:
2499   //
2500   // ARM64 C++ ABI 3.2.2:
2501   //   This ABI instead only specifies the value bit 0 of the static guard
2502   //   variable; all other bits are platform defined. Bit 0 shall be 0 when the
2503   //   variable is not initialized and 1 when it is.
2504   llvm::Value *V =
2505       (UseARMGuardVarABI && !useInt8GuardVariable)
2506           ? Builder.CreateAnd(LI, llvm::ConstantInt::get(CGM.Int8Ty, 1))
2507           : LI;
2508   llvm::Value *NeedsInit = Builder.CreateIsNull(V, "guard.uninitialized");
2509 
2510   llvm::BasicBlock *InitCheckBlock = CGF.createBasicBlock("init.check");
2511   llvm::BasicBlock *EndBlock = CGF.createBasicBlock("init.end");
2512 
2513   // Check if the first byte of the guard variable is zero.
2514   CGF.EmitCXXGuardedInitBranch(NeedsInit, InitCheckBlock, EndBlock,
2515                                CodeGenFunction::GuardKind::VariableGuard, &D);
2516 
2517   CGF.EmitBlock(InitCheckBlock);
2518 
2519   // Variables used when coping with thread-safe statics and exceptions.
2520   if (threadsafe) {
2521     // Call __cxa_guard_acquire.
2522     llvm::Value *V
2523       = CGF.EmitNounwindRuntimeCall(getGuardAcquireFn(CGM, guardPtrTy), guard);
2524 
2525     llvm::BasicBlock *InitBlock = CGF.createBasicBlock("init");
2526 
2527     Builder.CreateCondBr(Builder.CreateIsNotNull(V, "tobool"),
2528                          InitBlock, EndBlock);
2529 
2530     // Call __cxa_guard_abort along the exceptional edge.
2531     CGF.EHStack.pushCleanup<CallGuardAbort>(EHCleanup, guard);
2532 
2533     CGF.EmitBlock(InitBlock);
2534   }
2535 
2536   // Emit the initializer and add a global destructor if appropriate.
2537   CGF.EmitCXXGlobalVarDeclInit(D, var, shouldPerformInit);
2538 
2539   if (threadsafe) {
2540     // Pop the guard-abort cleanup if we pushed one.
2541     CGF.PopCleanupBlock();
2542 
2543     // Call __cxa_guard_release.  This cannot throw.
2544     CGF.EmitNounwindRuntimeCall(getGuardReleaseFn(CGM, guardPtrTy),
2545                                 guardAddr.getPointer());
2546   } else {
2547     Builder.CreateStore(llvm::ConstantInt::get(guardTy, 1), guardAddr);
2548   }
2549 
2550   CGF.EmitBlock(EndBlock);
2551 }
2552 
2553 /// Register a global destructor using __cxa_atexit.
emitGlobalDtorWithCXAAtExit(CodeGenFunction & CGF,llvm::FunctionCallee dtor,llvm::Constant * addr,bool TLS)2554 static void emitGlobalDtorWithCXAAtExit(CodeGenFunction &CGF,
2555                                         llvm::FunctionCallee dtor,
2556                                         llvm::Constant *addr, bool TLS) {
2557   assert((TLS || CGF.getTypes().getCodeGenOpts().CXAAtExit) &&
2558          "__cxa_atexit is disabled");
2559   const char *Name = "__cxa_atexit";
2560   if (TLS) {
2561     const llvm::Triple &T = CGF.getTarget().getTriple();
2562     Name = T.isOSDarwin() ?  "_tlv_atexit" : "__cxa_thread_atexit";
2563   }
2564 
2565   // We're assuming that the destructor function is something we can
2566   // reasonably call with the default CC.  Go ahead and cast it to the
2567   // right prototype.
2568   unsigned AS = CGF.CGM.getTargetCodeGenInfo().getDefaultAS();
2569   llvm::Type *dtorTy =
2570     llvm::FunctionType::get(CGF.VoidTy, CGF.Int8PtrTy, false)->getPointerTo(AS);
2571 
2572   // Preserve address space of addr.
2573   auto AddrAS = addr ? addr->getType()->getPointerAddressSpace() : 0;
2574   auto AddrInt8PtrTy =
2575       AddrAS ? CGF.Int8Ty->getPointerTo(AddrAS) : CGF.Int8PtrTy;
2576 
2577   // Create a variable that binds the atexit to this shared object.
2578   llvm::Constant *handle =
2579       CGF.CGM.CreateRuntimeVariable(CGF.Int8Ty, "__dso_handle", AS);
2580   auto *GV = cast<llvm::GlobalValue>(handle->stripPointerCasts());
2581   GV->setVisibility(llvm::GlobalValue::HiddenVisibility);
2582 
2583   // extern "C" int __cxa_atexit(void (*f)(void *), void *p, void *d);
2584   llvm::Type *paramTys[] = {dtorTy, AddrInt8PtrTy, handle->getType()};
2585   llvm::FunctionType *atexitTy =
2586     llvm::FunctionType::get(CGF.IntTy, paramTys, false);
2587 
2588   // Fetch the actual function.
2589   llvm::FunctionCallee atexit = CGF.CGM.CreateRuntimeFunction(atexitTy, Name);
2590   if (llvm::Function *fn = dyn_cast<llvm::Function>(atexit.getCallee()))
2591     fn->setDoesNotThrow();
2592 
2593   // Convert the destructor pointer to a capability before passing it
2594   llvm::Value *dtorV = dtor.getCallee();
2595   auto &TI = CGF.getContext().getTargetInfo();
2596   if (TI.areAllPointersCapabilities()) {
2597     assert(dtorV->getType()->getPointerAddressSpace() ==
2598         CGF.CGM.getTargetCodeGenInfo().getCHERICapabilityAS());
2599   }
2600   dtorV = CGF.Builder.CreateBitCast(dtorV, dtorTy);
2601 
2602   if (!addr)
2603     // addr is null when we are trying to register a dtor annotated with
2604     // __attribute__((destructor)) in a constructor function. Using null here is
2605     // okay because this argument is just passed back to the destructor
2606     // function.
2607     addr = llvm::Constant::getNullValue(CGF.Int8PtrTy);
2608 
2609   // FIXME-cheri-c++: should handle be a capability in the pure ABI?
2610   llvm::Value *args[] = {dtorV,
2611                          llvm::ConstantExpr::getBitCast(addr, AddrInt8PtrTy),
2612                          handle};
2613   CGF.EmitNounwindRuntimeCall(atexit, args);
2614 }
2615 
registerGlobalDtorsWithAtExit()2616 void CodeGenModule::registerGlobalDtorsWithAtExit() {
2617   for (const auto &I : DtorsUsingAtExit) {
2618     int Priority = I.first;
2619     const llvm::TinyPtrVector<llvm::Function *> &Dtors = I.second;
2620 
2621     // Create a function that registers destructors that have the same priority.
2622     //
2623     // Since constructor functions are run in non-descending order of their
2624     // priorities, destructors are registered in non-descending order of their
2625     // priorities, and since destructor functions are run in the reverse order
2626     // of their registration, destructor functions are run in non-ascending
2627     // order of their priorities.
2628     CodeGenFunction CGF(*this);
2629     std::string GlobalInitFnName =
2630         std::string("__GLOBAL_init_") + llvm::to_string(Priority);
2631     llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false);
2632     llvm::Function *GlobalInitFn = CreateGlobalInitOrCleanUpFunction(
2633         FTy, GlobalInitFnName, getTypes().arrangeNullaryFunction(),
2634         SourceLocation());
2635     ASTContext &Ctx = getContext();
2636     QualType ReturnTy = Ctx.VoidTy;
2637     QualType FunctionTy = Ctx.getFunctionType(ReturnTy, llvm::None, {});
2638     FunctionDecl *FD = FunctionDecl::Create(
2639         Ctx, Ctx.getTranslationUnitDecl(), SourceLocation(), SourceLocation(),
2640         &Ctx.Idents.get(GlobalInitFnName), FunctionTy, nullptr, SC_Static,
2641         false, false);
2642     CGF.StartFunction(GlobalDecl(FD), ReturnTy, GlobalInitFn,
2643                       getTypes().arrangeNullaryFunction(), FunctionArgList(),
2644                       SourceLocation(), SourceLocation());
2645 
2646     for (auto *Dtor : Dtors) {
2647       // Register the destructor function calling __cxa_atexit if it is
2648       // available. Otherwise fall back on calling atexit.
2649       if (getCodeGenOpts().CXAAtExit)
2650         emitGlobalDtorWithCXAAtExit(CGF, Dtor, nullptr, false);
2651       else
2652         CGF.registerGlobalDtorWithAtExit(Dtor);
2653     }
2654 
2655     CGF.FinishFunction();
2656     AddGlobalCtor(GlobalInitFn, Priority, nullptr);
2657   }
2658 }
2659 
2660 /// Register a global destructor as best as we know how.
registerGlobalDtor(CodeGenFunction & CGF,const VarDecl & D,llvm::FunctionCallee dtor,llvm::Constant * addr)2661 void ItaniumCXXABI::registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
2662                                        llvm::FunctionCallee dtor,
2663                                        llvm::Constant *addr) {
2664   if (D.isNoDestroy(CGM.getContext()))
2665     return;
2666 
2667   // emitGlobalDtorWithCXAAtExit will emit a call to either __cxa_thread_atexit
2668   // or __cxa_atexit depending on whether this VarDecl is a thread-local storage
2669   // or not. CXAAtExit controls only __cxa_atexit, so use it if it is enabled.
2670   // We can always use __cxa_thread_atexit.
2671   if (CGM.getCodeGenOpts().CXAAtExit || D.getTLSKind())
2672     return emitGlobalDtorWithCXAAtExit(CGF, dtor, addr, D.getTLSKind());
2673 
2674   // In Apple kexts, we want to add a global destructor entry.
2675   // FIXME: shouldn't this be guarded by some variable?
2676   if (CGM.getLangOpts().AppleKext) {
2677     // Generate a global destructor entry.
2678     return CGM.AddCXXDtorEntry(dtor, addr);
2679   }
2680 
2681   CGF.registerGlobalDtorWithAtExit(D, dtor, addr);
2682 }
2683 
isThreadWrapperReplaceable(const VarDecl * VD,CodeGen::CodeGenModule & CGM)2684 static bool isThreadWrapperReplaceable(const VarDecl *VD,
2685                                        CodeGen::CodeGenModule &CGM) {
2686   assert(!VD->isStaticLocal() && "static local VarDecls don't need wrappers!");
2687   // Darwin prefers to have references to thread local variables to go through
2688   // the thread wrapper instead of directly referencing the backing variable.
2689   return VD->getTLSKind() == VarDecl::TLS_Dynamic &&
2690          CGM.getTarget().getTriple().isOSDarwin();
2691 }
2692 
2693 /// Get the appropriate linkage for the wrapper function. This is essentially
2694 /// the weak form of the variable's linkage; every translation unit which needs
2695 /// the wrapper emits a copy, and we want the linker to merge them.
2696 static llvm::GlobalValue::LinkageTypes
getThreadLocalWrapperLinkage(const VarDecl * VD,CodeGen::CodeGenModule & CGM)2697 getThreadLocalWrapperLinkage(const VarDecl *VD, CodeGen::CodeGenModule &CGM) {
2698   llvm::GlobalValue::LinkageTypes VarLinkage =
2699       CGM.getLLVMLinkageVarDefinition(VD, /*IsConstant=*/false);
2700 
2701   // For internal linkage variables, we don't need an external or weak wrapper.
2702   if (llvm::GlobalValue::isLocalLinkage(VarLinkage))
2703     return VarLinkage;
2704 
2705   // If the thread wrapper is replaceable, give it appropriate linkage.
2706   if (isThreadWrapperReplaceable(VD, CGM))
2707     if (!llvm::GlobalVariable::isLinkOnceLinkage(VarLinkage) &&
2708         !llvm::GlobalVariable::isWeakODRLinkage(VarLinkage))
2709       return VarLinkage;
2710   return llvm::GlobalValue::WeakODRLinkage;
2711 }
2712 
2713 llvm::Function *
getOrCreateThreadLocalWrapper(const VarDecl * VD,llvm::Value * Val)2714 ItaniumCXXABI::getOrCreateThreadLocalWrapper(const VarDecl *VD,
2715                                              llvm::Value *Val) {
2716   // Mangle the name for the thread_local wrapper function.
2717   SmallString<256> WrapperName;
2718   {
2719     llvm::raw_svector_ostream Out(WrapperName);
2720     getMangleContext().mangleItaniumThreadLocalWrapper(VD, Out);
2721   }
2722 
2723   // FIXME: If VD is a definition, we should regenerate the function attributes
2724   // before returning.
2725   if (llvm::Value *V = CGM.getModule().getNamedValue(WrapperName))
2726     return cast<llvm::Function>(V);
2727 
2728   QualType RetQT = VD->getType();
2729   if (RetQT->isReferenceType())
2730     RetQT = RetQT.getNonReferenceType();
2731 
2732   const CGFunctionInfo &FI = CGM.getTypes().arrangeBuiltinFunctionDeclaration(
2733       getContext().getPointerType(RetQT), FunctionArgList());
2734 
2735   llvm::FunctionType *FnTy = CGM.getTypes().GetFunctionType(FI);
2736   llvm::Function *Wrapper =
2737       llvm::Function::Create(FnTy, getThreadLocalWrapperLinkage(VD, CGM),
2738                              WrapperName.str(), &CGM.getModule());
2739 
2740   if (CGM.supportsCOMDAT() && Wrapper->isWeakForLinker())
2741     Wrapper->setComdat(CGM.getModule().getOrInsertComdat(Wrapper->getName()));
2742 
2743   CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, Wrapper);
2744 
2745   // Always resolve references to the wrapper at link time.
2746   if (!Wrapper->hasLocalLinkage())
2747     if (!isThreadWrapperReplaceable(VD, CGM) ||
2748         llvm::GlobalVariable::isLinkOnceLinkage(Wrapper->getLinkage()) ||
2749         llvm::GlobalVariable::isWeakODRLinkage(Wrapper->getLinkage()) ||
2750         VD->getVisibility() == HiddenVisibility)
2751       Wrapper->setVisibility(llvm::GlobalValue::HiddenVisibility);
2752 
2753   if (isThreadWrapperReplaceable(VD, CGM)) {
2754     Wrapper->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
2755     Wrapper->addFnAttr(llvm::Attribute::NoUnwind);
2756   }
2757 
2758   ThreadWrappers.push_back({VD, Wrapper});
2759   return Wrapper;
2760 }
2761 
EmitThreadLocalInitFuncs(CodeGenModule & CGM,ArrayRef<const VarDecl * > CXXThreadLocals,ArrayRef<llvm::Function * > CXXThreadLocalInits,ArrayRef<const VarDecl * > CXXThreadLocalInitVars)2762 void ItaniumCXXABI::EmitThreadLocalInitFuncs(
2763     CodeGenModule &CGM, ArrayRef<const VarDecl *> CXXThreadLocals,
2764     ArrayRef<llvm::Function *> CXXThreadLocalInits,
2765     ArrayRef<const VarDecl *> CXXThreadLocalInitVars) {
2766   llvm::Function *InitFunc = nullptr;
2767 
2768   // Separate initializers into those with ordered (or partially-ordered)
2769   // initialization and those with unordered initialization.
2770   llvm::SmallVector<llvm::Function *, 8> OrderedInits;
2771   llvm::SmallDenseMap<const VarDecl *, llvm::Function *> UnorderedInits;
2772   for (unsigned I = 0; I != CXXThreadLocalInits.size(); ++I) {
2773     if (isTemplateInstantiation(
2774             CXXThreadLocalInitVars[I]->getTemplateSpecializationKind()))
2775       UnorderedInits[CXXThreadLocalInitVars[I]->getCanonicalDecl()] =
2776           CXXThreadLocalInits[I];
2777     else
2778       OrderedInits.push_back(CXXThreadLocalInits[I]);
2779   }
2780 
2781   if (!OrderedInits.empty()) {
2782     // Generate a guarded initialization function.
2783     llvm::FunctionType *FTy =
2784         llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
2785     const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
2786     InitFunc = CGM.CreateGlobalInitOrCleanUpFunction(FTy, "__tls_init", FI,
2787                                                      SourceLocation(),
2788                                                      /*TLS=*/true);
2789     llvm::GlobalVariable *Guard = new llvm::GlobalVariable(
2790         CGM.getModule(), CGM.Int8Ty, /*isConstant=*/false,
2791         llvm::GlobalVariable::InternalLinkage,
2792         llvm::ConstantInt::get(CGM.Int8Ty, 0), "__tls_guard", nullptr,
2793         // Need to set address space and initially mark as not thread-local
2794         // since that flag is set later.
2795         llvm::GlobalVariable::NotThreadLocal,
2796         CGM.getTargetCodeGenInfo().getTlsAddressSpace());
2797     Guard->setThreadLocal(true);
2798     Guard->setThreadLocalMode(CGM.GetDefaultLLVMTLSModel());
2799 
2800     CharUnits GuardAlign = CharUnits::One();
2801     Guard->setAlignment(GuardAlign.getAsAlign());
2802 
2803     CodeGenFunction(CGM).GenerateCXXGlobalInitFunc(
2804         InitFunc, OrderedInits, ConstantAddress(Guard, GuardAlign));
2805     // On Darwin platforms, use CXX_FAST_TLS calling convention.
2806     if (CGM.getTarget().getTriple().isOSDarwin()) {
2807       InitFunc->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
2808       InitFunc->addFnAttr(llvm::Attribute::NoUnwind);
2809     }
2810   }
2811 
2812   // Create declarations for thread wrappers for all thread-local variables
2813   // with non-discardable definitions in this translation unit.
2814   for (const VarDecl *VD : CXXThreadLocals) {
2815     if (VD->hasDefinition() &&
2816         !isDiscardableGVALinkage(getContext().GetGVALinkageForVariable(VD))) {
2817       llvm::GlobalValue *GV = CGM.GetGlobalValue(CGM.getMangledName(VD));
2818       getOrCreateThreadLocalWrapper(VD, GV);
2819     }
2820   }
2821 
2822   // Emit all referenced thread wrappers.
2823   for (auto VDAndWrapper : ThreadWrappers) {
2824     const VarDecl *VD = VDAndWrapper.first;
2825     llvm::GlobalVariable *Var =
2826         cast<llvm::GlobalVariable>(CGM.GetGlobalValue(CGM.getMangledName(VD)));
2827     llvm::Function *Wrapper = VDAndWrapper.second;
2828 
2829     // Some targets require that all access to thread local variables go through
2830     // the thread wrapper.  This means that we cannot attempt to create a thread
2831     // wrapper or a thread helper.
2832     if (!VD->hasDefinition()) {
2833       if (isThreadWrapperReplaceable(VD, CGM)) {
2834         Wrapper->setLinkage(llvm::Function::ExternalLinkage);
2835         continue;
2836       }
2837 
2838       // If this isn't a TU in which this variable is defined, the thread
2839       // wrapper is discardable.
2840       if (Wrapper->getLinkage() == llvm::Function::WeakODRLinkage)
2841         Wrapper->setLinkage(llvm::Function::LinkOnceODRLinkage);
2842     }
2843 
2844     CGM.SetLLVMFunctionAttributesForDefinition(nullptr, Wrapper);
2845 
2846     // Mangle the name for the thread_local initialization function.
2847     SmallString<256> InitFnName;
2848     {
2849       llvm::raw_svector_ostream Out(InitFnName);
2850       getMangleContext().mangleItaniumThreadLocalInit(VD, Out);
2851     }
2852 
2853     llvm::FunctionType *InitFnTy = llvm::FunctionType::get(CGM.VoidTy, false);
2854 
2855     // If we have a definition for the variable, emit the initialization
2856     // function as an alias to the global Init function (if any). Otherwise,
2857     // produce a declaration of the initialization function.
2858     llvm::GlobalValue *Init = nullptr;
2859     bool InitIsInitFunc = false;
2860     bool HasConstantInitialization = false;
2861     if (!usesThreadWrapperFunction(VD)) {
2862       HasConstantInitialization = true;
2863     } else if (VD->hasDefinition()) {
2864       InitIsInitFunc = true;
2865       llvm::Function *InitFuncToUse = InitFunc;
2866       if (isTemplateInstantiation(VD->getTemplateSpecializationKind()))
2867         InitFuncToUse = UnorderedInits.lookup(VD->getCanonicalDecl());
2868       if (InitFuncToUse)
2869         Init = llvm::GlobalAlias::create(Var->getLinkage(), InitFnName.str(),
2870                                          InitFuncToUse);
2871     } else {
2872       // Emit a weak global function referring to the initialization function.
2873       // This function will not exist if the TU defining the thread_local
2874       // variable in question does not need any dynamic initialization for
2875       // its thread_local variables.
2876       Init = llvm::Function::Create(InitFnTy,
2877                                     llvm::GlobalVariable::ExternalWeakLinkage,
2878                                     InitFnName.str(), &CGM.getModule());
2879       const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
2880       CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI,
2881                                     cast<llvm::Function>(Init));
2882     }
2883 
2884     if (Init) {
2885       Init->setVisibility(Var->getVisibility());
2886       // Don't mark an extern_weak function DSO local on windows.
2887       if (!CGM.getTriple().isOSWindows() || !Init->hasExternalWeakLinkage())
2888         Init->setDSOLocal(Var->isDSOLocal());
2889     }
2890 
2891     llvm::LLVMContext &Context = CGM.getModule().getContext();
2892     llvm::BasicBlock *Entry = llvm::BasicBlock::Create(Context, "", Wrapper);
2893     CGBuilderTy Builder(CGM, Entry);
2894     if (HasConstantInitialization) {
2895       // No dynamic initialization to invoke.
2896     } else if (InitIsInitFunc) {
2897       if (Init) {
2898         llvm::CallInst *CallVal = Builder.CreateCall(InitFnTy, Init);
2899         if (isThreadWrapperReplaceable(VD, CGM)) {
2900           CallVal->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
2901           llvm::Function *Fn =
2902               cast<llvm::Function>(cast<llvm::GlobalAlias>(Init)->getAliasee());
2903           Fn->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
2904         }
2905       }
2906     } else {
2907       // Don't know whether we have an init function. Call it if it exists.
2908       llvm::Value *Have = Builder.CreateIsNotNull(Init);
2909       llvm::BasicBlock *InitBB = llvm::BasicBlock::Create(Context, "", Wrapper);
2910       llvm::BasicBlock *ExitBB = llvm::BasicBlock::Create(Context, "", Wrapper);
2911       Builder.CreateCondBr(Have, InitBB, ExitBB);
2912 
2913       Builder.SetInsertPoint(InitBB);
2914       Builder.CreateCall(InitFnTy, Init);
2915       Builder.CreateBr(ExitBB);
2916 
2917       Builder.SetInsertPoint(ExitBB);
2918     }
2919 
2920     // For a reference, the result of the wrapper function is a pointer to
2921     // the referenced object.
2922     llvm::Value *Val = Var;
2923     if (VD->getType()->isReferenceType()) {
2924       CharUnits Align = CGM.getContext().getDeclAlign(VD);
2925       Val = Builder.CreateAlignedLoad(Val, Align);
2926     }
2927     if (Val->getType() != Wrapper->getReturnType())
2928       Val = Builder.CreatePointerBitCastOrAddrSpaceCast(
2929           Val, Wrapper->getReturnType(), "");
2930     Builder.CreateRet(Val);
2931   }
2932 }
2933 
EmitThreadLocalVarDeclLValue(CodeGenFunction & CGF,const VarDecl * VD,QualType LValType)2934 LValue ItaniumCXXABI::EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF,
2935                                                    const VarDecl *VD,
2936                                                    QualType LValType) {
2937   llvm::Value *Val = CGF.CGM.GetAddrOfGlobalVar(VD);
2938   llvm::Function *Wrapper = getOrCreateThreadLocalWrapper(VD, Val);
2939 
2940   llvm::CallInst *CallVal = CGF.Builder.CreateCall(Wrapper);
2941   CallVal->setCallingConv(Wrapper->getCallingConv());
2942 
2943   LValue LV;
2944   if (VD->getType()->isReferenceType())
2945     LV = CGF.MakeNaturalAlignAddrLValue(CallVal, LValType);
2946   else
2947     LV = CGF.MakeAddrLValue(CallVal, LValType,
2948                             CGF.getContext().getDeclAlign(VD));
2949   // FIXME: need setObjCGCLValueClass?
2950   return LV;
2951 }
2952 
2953 /// Return whether the given global decl needs a VTT parameter, which it does
2954 /// if it's a base constructor or destructor with virtual bases.
NeedsVTTParameter(GlobalDecl GD)2955 bool ItaniumCXXABI::NeedsVTTParameter(GlobalDecl GD) {
2956   const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
2957 
2958   // We don't have any virtual bases, just return early.
2959   if (!MD->getParent()->getNumVBases())
2960     return false;
2961 
2962   // Check if we have a base constructor.
2963   if (isa<CXXConstructorDecl>(MD) && GD.getCtorType() == Ctor_Base)
2964     return true;
2965 
2966   // Check if we have a base destructor.
2967   if (isa<CXXDestructorDecl>(MD) && GD.getDtorType() == Dtor_Base)
2968     return true;
2969 
2970   return false;
2971 }
2972 
2973 namespace {
2974 class ItaniumRTTIBuilder {
2975   CodeGenModule &CGM;  // Per-module state.
2976   llvm::LLVMContext &VMContext;
2977   const ItaniumCXXABI &CXXABI;  // Per-module state.
2978 
2979   /// Fields - The fields of the RTTI descriptor currently being built.
2980   SmallVector<llvm::Constant *, 16> Fields;
2981 
2982   /// GetAddrOfTypeName - Returns the mangled type name of the given type.
2983   llvm::GlobalVariable *
2984   GetAddrOfTypeName(QualType Ty, llvm::GlobalVariable::LinkageTypes Linkage);
2985 
2986   /// GetAddrOfExternalRTTIDescriptor - Returns the constant for the RTTI
2987   /// descriptor of the given type.
2988   llvm::Constant *GetAddrOfExternalRTTIDescriptor(QualType Ty);
2989 
2990   /// BuildVTablePointer - Build the vtable pointer for the given type.
2991   void BuildVTablePointer(const Type *Ty);
2992 
2993   /// BuildSIClassTypeInfo - Build an abi::__si_class_type_info, used for single
2994   /// inheritance, according to the Itanium C++ ABI, 2.9.5p6b.
2995   void BuildSIClassTypeInfo(const CXXRecordDecl *RD);
2996 
2997   /// BuildVMIClassTypeInfo - Build an abi::__vmi_class_type_info, used for
2998   /// classes with bases that do not satisfy the abi::__si_class_type_info
2999   /// constraints, according ti the Itanium C++ ABI, 2.9.5p5c.
3000   void BuildVMIClassTypeInfo(const CXXRecordDecl *RD);
3001 
3002   /// BuildPointerTypeInfo - Build an abi::__pointer_type_info struct, used
3003   /// for pointer types.
3004   void BuildPointerTypeInfo(QualType PointeeTy);
3005 
3006   /// BuildObjCObjectTypeInfo - Build the appropriate kind of
3007   /// type_info for an object type.
3008   void BuildObjCObjectTypeInfo(const ObjCObjectType *Ty);
3009 
3010   /// BuildPointerToMemberTypeInfo - Build an abi::__pointer_to_member_type_info
3011   /// struct, used for member pointer types.
3012   void BuildPointerToMemberTypeInfo(const MemberPointerType *Ty);
3013 
3014 public:
ItaniumRTTIBuilder(const ItaniumCXXABI & ABI)3015   ItaniumRTTIBuilder(const ItaniumCXXABI &ABI)
3016       : CGM(ABI.CGM), VMContext(CGM.getModule().getContext()), CXXABI(ABI) {}
3017 
3018   // Pointer type info flags.
3019   enum {
3020     /// PTI_Const - Type has const qualifier.
3021     PTI_Const = 0x1,
3022 
3023     /// PTI_Volatile - Type has volatile qualifier.
3024     PTI_Volatile = 0x2,
3025 
3026     /// PTI_Restrict - Type has restrict qualifier.
3027     PTI_Restrict = 0x4,
3028 
3029     /// PTI_Incomplete - Type is incomplete.
3030     PTI_Incomplete = 0x8,
3031 
3032     /// PTI_ContainingClassIncomplete - Containing class is incomplete.
3033     /// (in pointer to member).
3034     PTI_ContainingClassIncomplete = 0x10,
3035 
3036     /// PTI_TransactionSafe - Pointee is transaction_safe function (C++ TM TS).
3037     //PTI_TransactionSafe = 0x20,
3038 
3039     /// PTI_Noexcept - Pointee is noexcept function (C++1z).
3040     PTI_Noexcept = 0x40,
3041   };
3042 
3043   // VMI type info flags.
3044   enum {
3045     /// VMI_NonDiamondRepeat - Class has non-diamond repeated inheritance.
3046     VMI_NonDiamondRepeat = 0x1,
3047 
3048     /// VMI_DiamondShaped - Class is diamond shaped.
3049     VMI_DiamondShaped = 0x2
3050   };
3051 
3052   // Base class type info flags.
3053   enum {
3054     /// BCTI_Virtual - Base class is virtual.
3055     BCTI_Virtual = 0x1,
3056 
3057     /// BCTI_Public - Base class is public.
3058     BCTI_Public = 0x2
3059   };
3060 
3061   /// BuildTypeInfo - Build the RTTI type info struct for the given type, or
3062   /// link to an existing RTTI descriptor if one already exists.
3063   llvm::Constant *BuildTypeInfo(QualType Ty);
3064 
3065   /// BuildTypeInfo - Build the RTTI type info struct for the given type.
3066   llvm::Constant *BuildTypeInfo(
3067       QualType Ty,
3068       llvm::GlobalVariable::LinkageTypes Linkage,
3069       llvm::GlobalValue::VisibilityTypes Visibility,
3070       llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass);
3071 };
3072 }
3073 
GetAddrOfTypeName(QualType Ty,llvm::GlobalVariable::LinkageTypes Linkage)3074 llvm::GlobalVariable *ItaniumRTTIBuilder::GetAddrOfTypeName(
3075     QualType Ty, llvm::GlobalVariable::LinkageTypes Linkage) {
3076   SmallString<256> Name;
3077   llvm::raw_svector_ostream Out(Name);
3078   CGM.getCXXABI().getMangleContext().mangleCXXRTTIName(Ty, Out);
3079 
3080   // We know that the mangled name of the type starts at index 4 of the
3081   // mangled name of the typename, so we can just index into it in order to
3082   // get the mangled name of the type.
3083   llvm::Constant *Init = llvm::ConstantDataArray::getString(VMContext,
3084                                                             Name.substr(4));
3085   auto Align = CGM.getContext().getTypeAlignInChars(CGM.getContext().CharTy);
3086 
3087   llvm::GlobalVariable *GV = CGM.CreateOrReplaceCXXRuntimeVariable(
3088       Name, Init->getType(), Linkage, Align.getQuantity());
3089 
3090   GV->setInitializer(Init);
3091 
3092   return GV;
3093 }
3094 
3095 llvm::Constant *
GetAddrOfExternalRTTIDescriptor(QualType Ty)3096 ItaniumRTTIBuilder::GetAddrOfExternalRTTIDescriptor(QualType Ty) {
3097   // Mangle the RTTI name.
3098   SmallString<256> Name;
3099   llvm::raw_svector_ostream Out(Name);
3100   CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out);
3101 
3102   // Look for an existing global.
3103   llvm::GlobalVariable *GV = CGM.getModule().getNamedGlobal(Name);
3104 
3105   if (!GV) {
3106     // Create a new global variable.
3107     // Note for the future: If we would ever like to do deferred emission of
3108     // RTTI, check if emitting vtables opportunistically need any adjustment.
3109 
3110     GV = new llvm::GlobalVariable(CGM.getModule(), CGM.Int8PtrTy,
3111                                   /*isConstant=*/true,
3112                                   llvm::GlobalValue::ExternalLinkage, nullptr,
3113                                   Name);
3114     const CXXRecordDecl *RD = Ty->getAsCXXRecordDecl();
3115     CGM.setGVProperties(GV, RD);
3116   }
3117 
3118   return llvm::ConstantExpr::getBitCast(GV, CGM.Int8PtrTy);
3119 }
3120 
3121 /// TypeInfoIsInStandardLibrary - Given a builtin type, returns whether the type
3122 /// info for that type is defined in the standard library.
TypeInfoIsInStandardLibrary(const BuiltinType * Ty)3123 static bool TypeInfoIsInStandardLibrary(const BuiltinType *Ty) {
3124   // Itanium C++ ABI 2.9.2:
3125   //   Basic type information (e.g. for "int", "bool", etc.) will be kept in
3126   //   the run-time support library. Specifically, the run-time support
3127   //   library should contain type_info objects for the types X, X* and
3128   //   X const*, for every X in: void, std::nullptr_t, bool, wchar_t, char,
3129   //   unsigned char, signed char, short, unsigned short, int, unsigned int,
3130   //   long, unsigned long, long long, unsigned long long, float, double,
3131   //   long double, char16_t, char32_t, and the IEEE 754r decimal and
3132   //   half-precision floating point types.
3133   //
3134   // GCC also emits RTTI for __int128.
3135   // FIXME: We do not emit RTTI information for decimal types here.
3136 
3137   // Types added here must also be added to EmitFundamentalRTTIDescriptors.
3138   switch (Ty->getKind()) {
3139     case BuiltinType::Void:
3140     case BuiltinType::NullPtr:
3141     case BuiltinType::Bool:
3142     case BuiltinType::WChar_S:
3143     case BuiltinType::WChar_U:
3144     case BuiltinType::Char_U:
3145     case BuiltinType::Char_S:
3146     case BuiltinType::UChar:
3147     case BuiltinType::SChar:
3148     case BuiltinType::Short:
3149     case BuiltinType::UShort:
3150     case BuiltinType::Int:
3151     case BuiltinType::UInt:
3152     case BuiltinType::Long:
3153     case BuiltinType::ULong:
3154     case BuiltinType::LongLong:
3155     case BuiltinType::ULongLong:
3156     case BuiltinType::Half:
3157     case BuiltinType::Float:
3158     case BuiltinType::Double:
3159     case BuiltinType::LongDouble:
3160     case BuiltinType::Float16:
3161     case BuiltinType::Float128:
3162     case BuiltinType::Char8:
3163     case BuiltinType::Char16:
3164     case BuiltinType::Char32:
3165     case BuiltinType::Int128:
3166     case BuiltinType::UInt128:
3167       return true;
3168 
3169 #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
3170     case BuiltinType::Id:
3171 #include "clang/Basic/OpenCLImageTypes.def"
3172 #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
3173     case BuiltinType::Id:
3174 #include "clang/Basic/OpenCLExtensionTypes.def"
3175     case BuiltinType::OCLSampler:
3176     case BuiltinType::OCLEvent:
3177     case BuiltinType::OCLClkEvent:
3178     case BuiltinType::OCLQueue:
3179     case BuiltinType::OCLReserveID:
3180 #define SVE_TYPE(Name, Id, SingletonId) \
3181     case BuiltinType::Id:
3182 #include "clang/Basic/AArch64SVEACLETypes.def"
3183     case BuiltinType::ShortAccum:
3184     case BuiltinType::Accum:
3185     case BuiltinType::LongAccum:
3186     case BuiltinType::UShortAccum:
3187     case BuiltinType::UAccum:
3188     case BuiltinType::ULongAccum:
3189     case BuiltinType::ShortFract:
3190     case BuiltinType::Fract:
3191     case BuiltinType::LongFract:
3192     case BuiltinType::UShortFract:
3193     case BuiltinType::UFract:
3194     case BuiltinType::ULongFract:
3195     case BuiltinType::SatShortAccum:
3196     case BuiltinType::SatAccum:
3197     case BuiltinType::SatLongAccum:
3198     case BuiltinType::SatUShortAccum:
3199     case BuiltinType::SatUAccum:
3200     case BuiltinType::SatULongAccum:
3201     case BuiltinType::SatShortFract:
3202     case BuiltinType::SatFract:
3203     case BuiltinType::SatLongFract:
3204     case BuiltinType::SatUShortFract:
3205     case BuiltinType::SatUFract:
3206     case BuiltinType::SatULongFract:
3207     case BuiltinType::BFloat16:
3208       return false;
3209 
3210     case BuiltinType::Dependent:
3211 #define BUILTIN_TYPE(Id, SingletonId)
3212 #define PLACEHOLDER_TYPE(Id, SingletonId) \
3213     case BuiltinType::Id:
3214 #include "clang/AST/BuiltinTypes.def"
3215       llvm_unreachable("asking for RRTI for a placeholder type!");
3216 
3217     case BuiltinType::ObjCId:
3218     case BuiltinType::ObjCClass:
3219     case BuiltinType::ObjCSel:
3220       llvm_unreachable("FIXME: Objective-C types are unsupported!");
3221     case BuiltinType::UIntCap:
3222     case BuiltinType::IntCap:
3223       llvm_unreachable("Capability types not supported in RTTI yet!");
3224   }
3225 
3226   llvm_unreachable("Invalid BuiltinType Kind!");
3227 }
3228 
TypeInfoIsInStandardLibrary(const PointerType * PointerTy)3229 static bool TypeInfoIsInStandardLibrary(const PointerType *PointerTy) {
3230   QualType PointeeTy = PointerTy->getPointeeType();
3231   const BuiltinType *BuiltinTy = dyn_cast<BuiltinType>(PointeeTy);
3232   if (!BuiltinTy)
3233     return false;
3234 
3235   // Check the qualifiers.
3236   Qualifiers Quals = PointeeTy.getQualifiers();
3237   Quals.removeConst();
3238 
3239   if (!Quals.empty())
3240     return false;
3241 
3242   return TypeInfoIsInStandardLibrary(BuiltinTy);
3243 }
3244 
3245 /// IsStandardLibraryRTTIDescriptor - Returns whether the type
3246 /// information for the given type exists in the standard library.
IsStandardLibraryRTTIDescriptor(QualType Ty)3247 static bool IsStandardLibraryRTTIDescriptor(QualType Ty) {
3248   // Type info for builtin types is defined in the standard library.
3249   if (const BuiltinType *BuiltinTy = dyn_cast<BuiltinType>(Ty))
3250     return TypeInfoIsInStandardLibrary(BuiltinTy);
3251 
3252   // Type info for some pointer types to builtin types is defined in the
3253   // standard library.
3254   if (const PointerType *PointerTy = dyn_cast<PointerType>(Ty))
3255     return TypeInfoIsInStandardLibrary(PointerTy);
3256 
3257   return false;
3258 }
3259 
3260 /// ShouldUseExternalRTTIDescriptor - Returns whether the type information for
3261 /// the given type exists somewhere else, and that we should not emit the type
3262 /// information in this translation unit.  Assumes that it is not a
3263 /// standard-library type.
ShouldUseExternalRTTIDescriptor(CodeGenModule & CGM,QualType Ty)3264 static bool ShouldUseExternalRTTIDescriptor(CodeGenModule &CGM,
3265                                             QualType Ty) {
3266   ASTContext &Context = CGM.getContext();
3267 
3268   // If RTTI is disabled, assume it might be disabled in the
3269   // translation unit that defines any potential key function, too.
3270   if (!Context.getLangOpts().RTTI) return false;
3271 
3272   if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) {
3273     const CXXRecordDecl *RD = cast<CXXRecordDecl>(RecordTy->getDecl());
3274     if (!RD->hasDefinition())
3275       return false;
3276 
3277     if (!RD->isDynamicClass())
3278       return false;
3279 
3280     // FIXME: this may need to be reconsidered if the key function
3281     // changes.
3282     // N.B. We must always emit the RTTI data ourselves if there exists a key
3283     // function.
3284     bool IsDLLImport = RD->hasAttr<DLLImportAttr>();
3285 
3286     // Don't import the RTTI but emit it locally.
3287     if (CGM.getTriple().isWindowsGNUEnvironment())
3288       return false;
3289 
3290     if (CGM.getVTables().isVTableExternal(RD))
3291       return IsDLLImport && !CGM.getTriple().isWindowsItaniumEnvironment()
3292                  ? false
3293                  : true;
3294 
3295     if (IsDLLImport)
3296       return true;
3297   }
3298 
3299   return false;
3300 }
3301 
3302 /// IsIncompleteClassType - Returns whether the given record type is incomplete.
IsIncompleteClassType(const RecordType * RecordTy)3303 static bool IsIncompleteClassType(const RecordType *RecordTy) {
3304   return !RecordTy->getDecl()->isCompleteDefinition();
3305 }
3306 
3307 /// ContainsIncompleteClassType - Returns whether the given type contains an
3308 /// incomplete class type. This is true if
3309 ///
3310 ///   * The given type is an incomplete class type.
3311 ///   * The given type is a pointer type whose pointee type contains an
3312 ///     incomplete class type.
3313 ///   * The given type is a member pointer type whose class is an incomplete
3314 ///     class type.
3315 ///   * The given type is a member pointer type whoise pointee type contains an
3316 ///     incomplete class type.
3317 /// is an indirect or direct pointer to an incomplete class type.
ContainsIncompleteClassType(QualType Ty)3318 static bool ContainsIncompleteClassType(QualType Ty) {
3319   if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) {
3320     if (IsIncompleteClassType(RecordTy))
3321       return true;
3322   }
3323 
3324   if (const PointerType *PointerTy = dyn_cast<PointerType>(Ty))
3325     return ContainsIncompleteClassType(PointerTy->getPointeeType());
3326 
3327   if (const MemberPointerType *MemberPointerTy =
3328       dyn_cast<MemberPointerType>(Ty)) {
3329     // Check if the class type is incomplete.
3330     const RecordType *ClassType = cast<RecordType>(MemberPointerTy->getClass());
3331     if (IsIncompleteClassType(ClassType))
3332       return true;
3333 
3334     return ContainsIncompleteClassType(MemberPointerTy->getPointeeType());
3335   }
3336 
3337   return false;
3338 }
3339 
3340 // CanUseSingleInheritance - Return whether the given record decl has a "single,
3341 // public, non-virtual base at offset zero (i.e. the derived class is dynamic
3342 // iff the base is)", according to Itanium C++ ABI, 2.95p6b.
CanUseSingleInheritance(const CXXRecordDecl * RD)3343 static bool CanUseSingleInheritance(const CXXRecordDecl *RD) {
3344   // Check the number of bases.
3345   if (RD->getNumBases() != 1)
3346     return false;
3347 
3348   // Get the base.
3349   CXXRecordDecl::base_class_const_iterator Base = RD->bases_begin();
3350 
3351   // Check that the base is not virtual.
3352   if (Base->isVirtual())
3353     return false;
3354 
3355   // Check that the base is public.
3356   if (Base->getAccessSpecifier() != AS_public)
3357     return false;
3358 
3359   // Check that the class is dynamic iff the base is.
3360   auto *BaseDecl =
3361       cast<CXXRecordDecl>(Base->getType()->castAs<RecordType>()->getDecl());
3362   if (!BaseDecl->isEmpty() &&
3363       BaseDecl->isDynamicClass() != RD->isDynamicClass())
3364     return false;
3365 
3366   return true;
3367 }
3368 
BuildVTablePointer(const Type * Ty)3369 void ItaniumRTTIBuilder::BuildVTablePointer(const Type *Ty) {
3370   // abi::__class_type_info.
3371   static const char * const ClassTypeInfo =
3372     "_ZTVN10__cxxabiv117__class_type_infoE";
3373   // abi::__si_class_type_info.
3374   static const char * const SIClassTypeInfo =
3375     "_ZTVN10__cxxabiv120__si_class_type_infoE";
3376   // abi::__vmi_class_type_info.
3377   static const char * const VMIClassTypeInfo =
3378     "_ZTVN10__cxxabiv121__vmi_class_type_infoE";
3379 
3380   const char *VTableName = nullptr;
3381 
3382   switch (Ty->getTypeClass()) {
3383 #define TYPE(Class, Base)
3384 #define ABSTRACT_TYPE(Class, Base)
3385 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
3386 #define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
3387 #define DEPENDENT_TYPE(Class, Base) case Type::Class:
3388 #include "clang/AST/TypeNodes.inc"
3389     llvm_unreachable("Non-canonical and dependent types shouldn't get here");
3390 
3391   case Type::LValueReference:
3392   case Type::RValueReference:
3393     llvm_unreachable("References shouldn't get here");
3394 
3395   case Type::Auto:
3396   case Type::DeducedTemplateSpecialization:
3397     llvm_unreachable("Undeduced type shouldn't get here");
3398 
3399   case Type::Pipe:
3400     llvm_unreachable("Pipe types shouldn't get here");
3401 
3402   case Type::Builtin:
3403   case Type::ExtInt:
3404   // GCC treats vector and complex types as fundamental types.
3405   case Type::Vector:
3406   case Type::ExtVector:
3407   case Type::ConstantMatrix:
3408   case Type::Complex:
3409   case Type::Atomic:
3410   // FIXME: GCC treats block pointers as fundamental types?!
3411   case Type::BlockPointer:
3412     // abi::__fundamental_type_info.
3413     VTableName = "_ZTVN10__cxxabiv123__fundamental_type_infoE";
3414     break;
3415 
3416   case Type::ConstantArray:
3417   case Type::IncompleteArray:
3418   case Type::VariableArray:
3419     // abi::__array_type_info.
3420     VTableName = "_ZTVN10__cxxabiv117__array_type_infoE";
3421     break;
3422 
3423   case Type::FunctionNoProto:
3424   case Type::FunctionProto:
3425     // abi::__function_type_info.
3426     VTableName = "_ZTVN10__cxxabiv120__function_type_infoE";
3427     break;
3428 
3429   case Type::Enum:
3430     // abi::__enum_type_info.
3431     VTableName = "_ZTVN10__cxxabiv116__enum_type_infoE";
3432     break;
3433 
3434   case Type::Record: {
3435     const CXXRecordDecl *RD =
3436       cast<CXXRecordDecl>(cast<RecordType>(Ty)->getDecl());
3437 
3438     if (!RD->hasDefinition() || !RD->getNumBases()) {
3439       VTableName = ClassTypeInfo;
3440     } else if (CanUseSingleInheritance(RD)) {
3441       VTableName = SIClassTypeInfo;
3442     } else {
3443       VTableName = VMIClassTypeInfo;
3444     }
3445 
3446     break;
3447   }
3448 
3449   case Type::ObjCObject:
3450     // Ignore protocol qualifiers.
3451     Ty = cast<ObjCObjectType>(Ty)->getBaseType().getTypePtr();
3452 
3453     // Handle id and Class.
3454     if (isa<BuiltinType>(Ty)) {
3455       VTableName = ClassTypeInfo;
3456       break;
3457     }
3458 
3459     assert(isa<ObjCInterfaceType>(Ty));
3460     LLVM_FALLTHROUGH;
3461 
3462   case Type::ObjCInterface:
3463     if (cast<ObjCInterfaceType>(Ty)->getDecl()->getSuperClass()) {
3464       VTableName = SIClassTypeInfo;
3465     } else {
3466       VTableName = ClassTypeInfo;
3467     }
3468     break;
3469 
3470   case Type::ObjCObjectPointer:
3471   case Type::Pointer:
3472     // abi::__pointer_type_info.
3473     VTableName = "_ZTVN10__cxxabiv119__pointer_type_infoE";
3474     break;
3475 
3476   case Type::MemberPointer:
3477     // abi::__pointer_to_member_type_info.
3478     VTableName = "_ZTVN10__cxxabiv129__pointer_to_member_type_infoE";
3479     break;
3480   }
3481 
3482   llvm::Constant *VTable = nullptr;
3483 
3484   // Check if the alias exists. If it doesn't, then get or create the global.
3485   if (CGM.getItaniumVTableContext().isRelativeLayout())
3486     VTable = CGM.getModule().getNamedAlias(VTableName);
3487   if (!VTable)
3488     VTable = CGM.getModule().getOrInsertGlobal(VTableName, CGM.Int8PtrTy);
3489 
3490   CGM.setDSOLocal(cast<llvm::GlobalValue>(VTable->stripPointerCasts()));
3491 
3492   llvm::Type *PtrDiffTy =
3493       CGM.getTypes().ConvertType(CGM.getContext().getPointerDiffType());
3494 
3495   // The vtable address point is 2.
3496   if (CGM.getItaniumVTableContext().isRelativeLayout()) {
3497     // The vtable address point is 8 bytes after its start:
3498     // 4 for the offset to top + 4 for the relative offset to rtti.
3499     llvm::Constant *Eight = llvm::ConstantInt::get(CGM.Int32Ty, 8);
3500     VTable = llvm::ConstantExpr::getBitCast(VTable, CGM.Int8PtrTy);
3501     VTable =
3502         llvm::ConstantExpr::getInBoundsGetElementPtr(CGM.Int8Ty, VTable, Eight);
3503   } else {
3504     llvm::Constant *Two = llvm::ConstantInt::get(PtrDiffTy, 2);
3505     VTable = llvm::ConstantExpr::getInBoundsGetElementPtr(CGM.Int8PtrTy, VTable,
3506                                                           Two);
3507   }
3508   VTable = llvm::ConstantExpr::getBitCast(VTable, CGM.Int8PtrTy);
3509 
3510   Fields.push_back(VTable);
3511 }
3512 
3513 /// Return the linkage that the type info and type info name constants
3514 /// should have for the given type.
getTypeInfoLinkage(CodeGenModule & CGM,QualType Ty)3515 static llvm::GlobalVariable::LinkageTypes getTypeInfoLinkage(CodeGenModule &CGM,
3516                                                              QualType Ty) {
3517   // Itanium C++ ABI 2.9.5p7:
3518   //   In addition, it and all of the intermediate abi::__pointer_type_info
3519   //   structs in the chain down to the abi::__class_type_info for the
3520   //   incomplete class type must be prevented from resolving to the
3521   //   corresponding type_info structs for the complete class type, possibly
3522   //   by making them local static objects. Finally, a dummy class RTTI is
3523   //   generated for the incomplete type that will not resolve to the final
3524   //   complete class RTTI (because the latter need not exist), possibly by
3525   //   making it a local static object.
3526   if (ContainsIncompleteClassType(Ty))
3527     return llvm::GlobalValue::InternalLinkage;
3528 
3529   switch (Ty->getLinkage()) {
3530   case NoLinkage:
3531   case InternalLinkage:
3532   case UniqueExternalLinkage:
3533     return llvm::GlobalValue::InternalLinkage;
3534 
3535   case VisibleNoLinkage:
3536   case ModuleInternalLinkage:
3537   case ModuleLinkage:
3538   case ExternalLinkage:
3539     // RTTI is not enabled, which means that this type info struct is going
3540     // to be used for exception handling. Give it linkonce_odr linkage.
3541     if (!CGM.getLangOpts().RTTI)
3542       return llvm::GlobalValue::LinkOnceODRLinkage;
3543 
3544     if (const RecordType *Record = dyn_cast<RecordType>(Ty)) {
3545       const CXXRecordDecl *RD = cast<CXXRecordDecl>(Record->getDecl());
3546       if (RD->hasAttr<WeakAttr>())
3547         return llvm::GlobalValue::WeakODRLinkage;
3548       if (CGM.getTriple().isWindowsItaniumEnvironment())
3549         if (RD->hasAttr<DLLImportAttr>() &&
3550             ShouldUseExternalRTTIDescriptor(CGM, Ty))
3551           return llvm::GlobalValue::ExternalLinkage;
3552       // MinGW always uses LinkOnceODRLinkage for type info.
3553       if (RD->isDynamicClass() &&
3554           !CGM.getContext()
3555                .getTargetInfo()
3556                .getTriple()
3557                .isWindowsGNUEnvironment())
3558         return CGM.getVTableLinkage(RD);
3559     }
3560 
3561     return llvm::GlobalValue::LinkOnceODRLinkage;
3562   }
3563 
3564   llvm_unreachable("Invalid linkage!");
3565 }
3566 
BuildTypeInfo(QualType Ty)3567 llvm::Constant *ItaniumRTTIBuilder::BuildTypeInfo(QualType Ty) {
3568   // We want to operate on the canonical type.
3569   Ty = Ty.getCanonicalType();
3570 
3571   // Check if we've already emitted an RTTI descriptor for this type.
3572   SmallString<256> Name;
3573   llvm::raw_svector_ostream Out(Name);
3574   CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out);
3575 
3576   llvm::GlobalVariable *OldGV = CGM.getModule().getNamedGlobal(Name);
3577   if (OldGV && !OldGV->isDeclaration()) {
3578     assert(!OldGV->hasAvailableExternallyLinkage() &&
3579            "available_externally typeinfos not yet implemented");
3580 
3581     return llvm::ConstantExpr::getBitCast(OldGV, CGM.Int8PtrTy);
3582   }
3583 
3584   // Check if there is already an external RTTI descriptor for this type.
3585   if (IsStandardLibraryRTTIDescriptor(Ty) ||
3586       ShouldUseExternalRTTIDescriptor(CGM, Ty))
3587     return GetAddrOfExternalRTTIDescriptor(Ty);
3588 
3589   // Emit the standard library with external linkage.
3590   llvm::GlobalVariable::LinkageTypes Linkage = getTypeInfoLinkage(CGM, Ty);
3591 
3592   // Give the type_info object and name the formal visibility of the
3593   // type itself.
3594   llvm::GlobalValue::VisibilityTypes llvmVisibility;
3595   if (llvm::GlobalValue::isLocalLinkage(Linkage))
3596     // If the linkage is local, only default visibility makes sense.
3597     llvmVisibility = llvm::GlobalValue::DefaultVisibility;
3598   else if (CXXABI.classifyRTTIUniqueness(Ty, Linkage) ==
3599            ItaniumCXXABI::RUK_NonUniqueHidden)
3600     llvmVisibility = llvm::GlobalValue::HiddenVisibility;
3601   else
3602     llvmVisibility = CodeGenModule::GetLLVMVisibility(Ty->getVisibility());
3603 
3604   llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass =
3605       llvm::GlobalValue::DefaultStorageClass;
3606   if (CGM.getTriple().isWindowsItaniumEnvironment()) {
3607     auto RD = Ty->getAsCXXRecordDecl();
3608     if (RD && RD->hasAttr<DLLExportAttr>())
3609       DLLStorageClass = llvm::GlobalValue::DLLExportStorageClass;
3610   }
3611 
3612   return BuildTypeInfo(Ty, Linkage, llvmVisibility, DLLStorageClass);
3613 }
3614 
BuildTypeInfo(QualType Ty,llvm::GlobalVariable::LinkageTypes Linkage,llvm::GlobalValue::VisibilityTypes Visibility,llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass)3615 llvm::Constant *ItaniumRTTIBuilder::BuildTypeInfo(
3616       QualType Ty,
3617       llvm::GlobalVariable::LinkageTypes Linkage,
3618       llvm::GlobalValue::VisibilityTypes Visibility,
3619       llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass) {
3620   // Add the vtable pointer.
3621   BuildVTablePointer(cast<Type>(Ty));
3622 
3623   // And the name.
3624   llvm::GlobalVariable *TypeName = GetAddrOfTypeName(Ty, Linkage);
3625   llvm::Constant *TypeNameField;
3626 
3627   // If we're supposed to demote the visibility, be sure to set a flag
3628   // to use a string comparison for type_info comparisons.
3629   ItaniumCXXABI::RTTIUniquenessKind RTTIUniqueness =
3630       CXXABI.classifyRTTIUniqueness(Ty, Linkage);
3631   if (RTTIUniqueness != ItaniumCXXABI::RUK_Unique) {
3632     // The flag is the sign bit, which on ARM64 is defined to be clear
3633     // for global pointers.  This is very ARM64-specific.
3634     TypeNameField = llvm::ConstantExpr::getPtrToInt(TypeName, CGM.Int64Ty);
3635     llvm::Constant *flag =
3636         llvm::ConstantInt::get(CGM.Int64Ty, ((uint64_t)1) << 63);
3637     TypeNameField = llvm::ConstantExpr::getAdd(TypeNameField, flag);
3638     TypeNameField =
3639         CGM.getContext().getTargetInfo().areAllPointersCapabilities()
3640             ? CGM.getNullDerivedConstantCapability(CGM.Int8PtrTy, TypeNameField)
3641             : llvm::ConstantExpr::getIntToPtr(TypeNameField, CGM.Int8PtrTy);
3642   } else {
3643     TypeNameField = llvm::ConstantExpr::getBitCast(TypeName, CGM.Int8PtrTy);
3644   }
3645   Fields.push_back(TypeNameField);
3646 
3647   switch (Ty->getTypeClass()) {
3648 #define TYPE(Class, Base)
3649 #define ABSTRACT_TYPE(Class, Base)
3650 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
3651 #define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
3652 #define DEPENDENT_TYPE(Class, Base) case Type::Class:
3653 #include "clang/AST/TypeNodes.inc"
3654     llvm_unreachable("Non-canonical and dependent types shouldn't get here");
3655 
3656   // GCC treats vector types as fundamental types.
3657   case Type::Builtin:
3658   case Type::Vector:
3659   case Type::ExtVector:
3660   case Type::ConstantMatrix:
3661   case Type::Complex:
3662   case Type::BlockPointer:
3663     // Itanium C++ ABI 2.9.5p4:
3664     // abi::__fundamental_type_info adds no data members to std::type_info.
3665     break;
3666 
3667   case Type::LValueReference:
3668   case Type::RValueReference:
3669     llvm_unreachable("References shouldn't get here");
3670 
3671   case Type::Auto:
3672   case Type::DeducedTemplateSpecialization:
3673     llvm_unreachable("Undeduced type shouldn't get here");
3674 
3675   case Type::Pipe:
3676     break;
3677 
3678   case Type::ExtInt:
3679     break;
3680 
3681   case Type::ConstantArray:
3682   case Type::IncompleteArray:
3683   case Type::VariableArray:
3684     // Itanium C++ ABI 2.9.5p5:
3685     // abi::__array_type_info adds no data members to std::type_info.
3686     break;
3687 
3688   case Type::FunctionNoProto:
3689   case Type::FunctionProto:
3690     // Itanium C++ ABI 2.9.5p5:
3691     // abi::__function_type_info adds no data members to std::type_info.
3692     break;
3693 
3694   case Type::Enum:
3695     // Itanium C++ ABI 2.9.5p5:
3696     // abi::__enum_type_info adds no data members to std::type_info.
3697     break;
3698 
3699   case Type::Record: {
3700     const CXXRecordDecl *RD =
3701       cast<CXXRecordDecl>(cast<RecordType>(Ty)->getDecl());
3702     if (!RD->hasDefinition() || !RD->getNumBases()) {
3703       // We don't need to emit any fields.
3704       break;
3705     }
3706 
3707     if (CanUseSingleInheritance(RD))
3708       BuildSIClassTypeInfo(RD);
3709     else
3710       BuildVMIClassTypeInfo(RD);
3711 
3712     break;
3713   }
3714 
3715   case Type::ObjCObject:
3716   case Type::ObjCInterface:
3717     BuildObjCObjectTypeInfo(cast<ObjCObjectType>(Ty));
3718     break;
3719 
3720   case Type::ObjCObjectPointer:
3721     BuildPointerTypeInfo(cast<ObjCObjectPointerType>(Ty)->getPointeeType());
3722     break;
3723 
3724   case Type::Pointer:
3725     BuildPointerTypeInfo(cast<PointerType>(Ty)->getPointeeType());
3726     break;
3727 
3728   case Type::MemberPointer:
3729     BuildPointerToMemberTypeInfo(cast<MemberPointerType>(Ty));
3730     break;
3731 
3732   case Type::Atomic:
3733     // No fields, at least for the moment.
3734     break;
3735   }
3736 
3737   llvm::Constant *Init = llvm::ConstantStruct::getAnon(Fields);
3738 
3739   SmallString<256> Name;
3740   llvm::raw_svector_ostream Out(Name);
3741   CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out);
3742   llvm::Module &M = CGM.getModule();
3743   llvm::GlobalVariable *OldGV = M.getNamedGlobal(Name);
3744   llvm::GlobalVariable *GV =
3745       new llvm::GlobalVariable(M, Init->getType(),
3746                                /*isConstant=*/true, Linkage, Init, Name);
3747 
3748   // If there's already an old global variable, replace it with the new one.
3749   if (OldGV) {
3750     GV->takeName(OldGV);
3751     llvm::Constant *NewPtr =
3752       llvm::ConstantExpr::getBitCast(GV, OldGV->getType());
3753     OldGV->replaceAllUsesWith(NewPtr);
3754     OldGV->eraseFromParent();
3755   }
3756 
3757   if (CGM.supportsCOMDAT() && GV->isWeakForLinker())
3758     GV->setComdat(M.getOrInsertComdat(GV->getName()));
3759 
3760   CharUnits Align =
3761       CGM.getContext().toCharUnitsFromBits(CGM.getTarget().getPointerAlign(0));
3762   GV->setAlignment(Align.getAsAlign());
3763 
3764   // The Itanium ABI specifies that type_info objects must be globally
3765   // unique, with one exception: if the type is an incomplete class
3766   // type or a (possibly indirect) pointer to one.  That exception
3767   // affects the general case of comparing type_info objects produced
3768   // by the typeid operator, which is why the comparison operators on
3769   // std::type_info generally use the type_info name pointers instead
3770   // of the object addresses.  However, the language's built-in uses
3771   // of RTTI generally require class types to be complete, even when
3772   // manipulating pointers to those class types.  This allows the
3773   // implementation of dynamic_cast to rely on address equality tests,
3774   // which is much faster.
3775 
3776   // All of this is to say that it's important that both the type_info
3777   // object and the type_info name be uniqued when weakly emitted.
3778 
3779   TypeName->setVisibility(Visibility);
3780   CGM.setDSOLocal(TypeName);
3781 
3782   GV->setVisibility(Visibility);
3783   CGM.setDSOLocal(GV);
3784 
3785   TypeName->setDLLStorageClass(DLLStorageClass);
3786   GV->setDLLStorageClass(DLLStorageClass);
3787 
3788   TypeName->setPartition(CGM.getCodeGenOpts().SymbolPartition);
3789   GV->setPartition(CGM.getCodeGenOpts().SymbolPartition);
3790 
3791   return llvm::ConstantExpr::getBitCast(GV, CGM.Int8PtrTy);
3792 }
3793 
3794 /// BuildObjCObjectTypeInfo - Build the appropriate kind of type_info
3795 /// for the given Objective-C object type.
BuildObjCObjectTypeInfo(const ObjCObjectType * OT)3796 void ItaniumRTTIBuilder::BuildObjCObjectTypeInfo(const ObjCObjectType *OT) {
3797   // Drop qualifiers.
3798   const Type *T = OT->getBaseType().getTypePtr();
3799   assert(isa<BuiltinType>(T) || isa<ObjCInterfaceType>(T));
3800 
3801   // The builtin types are abi::__class_type_infos and don't require
3802   // extra fields.
3803   if (isa<BuiltinType>(T)) return;
3804 
3805   ObjCInterfaceDecl *Class = cast<ObjCInterfaceType>(T)->getDecl();
3806   ObjCInterfaceDecl *Super = Class->getSuperClass();
3807 
3808   // Root classes are also __class_type_info.
3809   if (!Super) return;
3810 
3811   QualType SuperTy = CGM.getContext().getObjCInterfaceType(Super);
3812 
3813   // Everything else is single inheritance.
3814   llvm::Constant *BaseTypeInfo =
3815       ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(SuperTy);
3816   Fields.push_back(BaseTypeInfo);
3817 }
3818 
3819 /// BuildSIClassTypeInfo - Build an abi::__si_class_type_info, used for single
3820 /// inheritance, according to the Itanium C++ ABI, 2.95p6b.
BuildSIClassTypeInfo(const CXXRecordDecl * RD)3821 void ItaniumRTTIBuilder::BuildSIClassTypeInfo(const CXXRecordDecl *RD) {
3822   // Itanium C++ ABI 2.9.5p6b:
3823   // It adds to abi::__class_type_info a single member pointing to the
3824   // type_info structure for the base type,
3825   llvm::Constant *BaseTypeInfo =
3826     ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(RD->bases_begin()->getType());
3827   Fields.push_back(BaseTypeInfo);
3828 }
3829 
3830 namespace {
3831   /// SeenBases - Contains virtual and non-virtual bases seen when traversing
3832   /// a class hierarchy.
3833   struct SeenBases {
3834     llvm::SmallPtrSet<const CXXRecordDecl *, 16> NonVirtualBases;
3835     llvm::SmallPtrSet<const CXXRecordDecl *, 16> VirtualBases;
3836   };
3837 }
3838 
3839 /// ComputeVMIClassTypeInfoFlags - Compute the value of the flags member in
3840 /// abi::__vmi_class_type_info.
3841 ///
ComputeVMIClassTypeInfoFlags(const CXXBaseSpecifier * Base,SeenBases & Bases)3842 static unsigned ComputeVMIClassTypeInfoFlags(const CXXBaseSpecifier *Base,
3843                                              SeenBases &Bases) {
3844 
3845   unsigned Flags = 0;
3846 
3847   auto *BaseDecl =
3848       cast<CXXRecordDecl>(Base->getType()->castAs<RecordType>()->getDecl());
3849 
3850   if (Base->isVirtual()) {
3851     // Mark the virtual base as seen.
3852     if (!Bases.VirtualBases.insert(BaseDecl).second) {
3853       // If this virtual base has been seen before, then the class is diamond
3854       // shaped.
3855       Flags |= ItaniumRTTIBuilder::VMI_DiamondShaped;
3856     } else {
3857       if (Bases.NonVirtualBases.count(BaseDecl))
3858         Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat;
3859     }
3860   } else {
3861     // Mark the non-virtual base as seen.
3862     if (!Bases.NonVirtualBases.insert(BaseDecl).second) {
3863       // If this non-virtual base has been seen before, then the class has non-
3864       // diamond shaped repeated inheritance.
3865       Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat;
3866     } else {
3867       if (Bases.VirtualBases.count(BaseDecl))
3868         Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat;
3869     }
3870   }
3871 
3872   // Walk all bases.
3873   for (const auto &I : BaseDecl->bases())
3874     Flags |= ComputeVMIClassTypeInfoFlags(&I, Bases);
3875 
3876   return Flags;
3877 }
3878 
ComputeVMIClassTypeInfoFlags(const CXXRecordDecl * RD)3879 static unsigned ComputeVMIClassTypeInfoFlags(const CXXRecordDecl *RD) {
3880   unsigned Flags = 0;
3881   SeenBases Bases;
3882 
3883   // Walk all bases.
3884   for (const auto &I : RD->bases())
3885     Flags |= ComputeVMIClassTypeInfoFlags(&I, Bases);
3886 
3887   return Flags;
3888 }
3889 
3890 /// BuildVMIClassTypeInfo - Build an abi::__vmi_class_type_info, used for
3891 /// classes with bases that do not satisfy the abi::__si_class_type_info
3892 /// constraints, according ti the Itanium C++ ABI, 2.9.5p5c.
BuildVMIClassTypeInfo(const CXXRecordDecl * RD)3893 void ItaniumRTTIBuilder::BuildVMIClassTypeInfo(const CXXRecordDecl *RD) {
3894   llvm::Type *UnsignedIntLTy =
3895     CGM.getTypes().ConvertType(CGM.getContext().UnsignedIntTy);
3896 
3897   // Itanium C++ ABI 2.9.5p6c:
3898   //   __flags is a word with flags describing details about the class
3899   //   structure, which may be referenced by using the __flags_masks
3900   //   enumeration. These flags refer to both direct and indirect bases.
3901   unsigned Flags = ComputeVMIClassTypeInfoFlags(RD);
3902   Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags));
3903 
3904   // Itanium C++ ABI 2.9.5p6c:
3905   //   __base_count is a word with the number of direct proper base class
3906   //   descriptions that follow.
3907   Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, RD->getNumBases()));
3908 
3909   if (!RD->getNumBases())
3910     return;
3911 
3912   // Now add the base class descriptions.
3913 
3914   // Itanium C++ ABI 2.9.5p6c:
3915   //   __base_info[] is an array of base class descriptions -- one for every
3916   //   direct proper base. Each description is of the type:
3917   //
3918   //   struct abi::__base_class_type_info {
3919   //   public:
3920   //     const __class_type_info *__base_type;
3921   //     long __offset_flags;
3922   //
3923   //     enum __offset_flags_masks {
3924   //       __virtual_mask = 0x1,
3925   //       __public_mask = 0x2,
3926   //       __offset_shift = 8
3927   //     };
3928   //   };
3929 
3930   // If we're in mingw and 'long' isn't wide enough for a pointer, use 'long
3931   // long' instead of 'long' for __offset_flags. libstdc++abi uses long long on
3932   // LLP64 platforms.
3933   // FIXME: Consider updating libc++abi to match, and extend this logic to all
3934   // LLP64 platforms.
3935   QualType OffsetFlagsTy = CGM.getContext().LongTy;
3936   const TargetInfo &TI = CGM.getContext().getTargetInfo();
3937   if (TI.getTriple().isOSCygMing() && TI.getPointerWidth(0) > TI.getLongWidth())
3938     OffsetFlagsTy = CGM.getContext().LongLongTy;
3939   llvm::Type *OffsetFlagsLTy =
3940       CGM.getTypes().ConvertType(OffsetFlagsTy);
3941 
3942   for (const auto &Base : RD->bases()) {
3943     // The __base_type member points to the RTTI for the base type.
3944     Fields.push_back(ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(Base.getType()));
3945 
3946     auto *BaseDecl =
3947         cast<CXXRecordDecl>(Base.getType()->castAs<RecordType>()->getDecl());
3948 
3949     int64_t OffsetFlags = 0;
3950 
3951     // All but the lower 8 bits of __offset_flags are a signed offset.
3952     // For a non-virtual base, this is the offset in the object of the base
3953     // subobject. For a virtual base, this is the offset in the virtual table of
3954     // the virtual base offset for the virtual base referenced (negative).
3955     CharUnits Offset;
3956     if (Base.isVirtual())
3957       Offset =
3958         CGM.getItaniumVTableContext().getVirtualBaseOffsetOffset(RD, BaseDecl);
3959     else {
3960       const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
3961       Offset = Layout.getBaseClassOffset(BaseDecl);
3962     };
3963 
3964     OffsetFlags = uint64_t(Offset.getQuantity()) << 8;
3965 
3966     // The low-order byte of __offset_flags contains flags, as given by the
3967     // masks from the enumeration __offset_flags_masks.
3968     if (Base.isVirtual())
3969       OffsetFlags |= BCTI_Virtual;
3970     if (Base.getAccessSpecifier() == AS_public)
3971       OffsetFlags |= BCTI_Public;
3972 
3973     Fields.push_back(llvm::ConstantInt::get(OffsetFlagsLTy, OffsetFlags));
3974   }
3975 }
3976 
3977 /// Compute the flags for a __pbase_type_info, and remove the corresponding
3978 /// pieces from \p Type.
extractPBaseFlags(ASTContext & Ctx,QualType & Type)3979 static unsigned extractPBaseFlags(ASTContext &Ctx, QualType &Type) {
3980   unsigned Flags = 0;
3981 
3982   if (Type.isConstQualified())
3983     Flags |= ItaniumRTTIBuilder::PTI_Const;
3984   if (Type.isVolatileQualified())
3985     Flags |= ItaniumRTTIBuilder::PTI_Volatile;
3986   if (Type.isRestrictQualified())
3987     Flags |= ItaniumRTTIBuilder::PTI_Restrict;
3988   Type = Type.getUnqualifiedType();
3989 
3990   // Itanium C++ ABI 2.9.5p7:
3991   //   When the abi::__pbase_type_info is for a direct or indirect pointer to an
3992   //   incomplete class type, the incomplete target type flag is set.
3993   if (ContainsIncompleteClassType(Type))
3994     Flags |= ItaniumRTTIBuilder::PTI_Incomplete;
3995 
3996   if (auto *Proto = Type->getAs<FunctionProtoType>()) {
3997     if (Proto->isNothrow()) {
3998       Flags |= ItaniumRTTIBuilder::PTI_Noexcept;
3999       Type = Ctx.getFunctionTypeWithExceptionSpec(Type, EST_None);
4000     }
4001   }
4002 
4003   return Flags;
4004 }
4005 
4006 /// BuildPointerTypeInfo - Build an abi::__pointer_type_info struct,
4007 /// used for pointer types.
BuildPointerTypeInfo(QualType PointeeTy)4008 void ItaniumRTTIBuilder::BuildPointerTypeInfo(QualType PointeeTy) {
4009   // Itanium C++ ABI 2.9.5p7:
4010   //   __flags is a flag word describing the cv-qualification and other
4011   //   attributes of the type pointed to
4012   unsigned Flags = extractPBaseFlags(CGM.getContext(), PointeeTy);
4013 
4014   llvm::Type *UnsignedIntLTy =
4015     CGM.getTypes().ConvertType(CGM.getContext().UnsignedIntTy);
4016   Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags));
4017 
4018   // Itanium C++ ABI 2.9.5p7:
4019   //  __pointee is a pointer to the std::type_info derivation for the
4020   //  unqualified type being pointed to.
4021   llvm::Constant *PointeeTypeInfo =
4022       ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(PointeeTy);
4023   Fields.push_back(PointeeTypeInfo);
4024 }
4025 
4026 /// BuildPointerToMemberTypeInfo - Build an abi::__pointer_to_member_type_info
4027 /// struct, used for member pointer types.
4028 void
BuildPointerToMemberTypeInfo(const MemberPointerType * Ty)4029 ItaniumRTTIBuilder::BuildPointerToMemberTypeInfo(const MemberPointerType *Ty) {
4030   QualType PointeeTy = Ty->getPointeeType();
4031 
4032   // Itanium C++ ABI 2.9.5p7:
4033   //   __flags is a flag word describing the cv-qualification and other
4034   //   attributes of the type pointed to.
4035   unsigned Flags = extractPBaseFlags(CGM.getContext(), PointeeTy);
4036 
4037   const RecordType *ClassType = cast<RecordType>(Ty->getClass());
4038   if (IsIncompleteClassType(ClassType))
4039     Flags |= PTI_ContainingClassIncomplete;
4040 
4041   llvm::Type *UnsignedIntLTy =
4042     CGM.getTypes().ConvertType(CGM.getContext().UnsignedIntTy);
4043   Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags));
4044 
4045   // Itanium C++ ABI 2.9.5p7:
4046   //   __pointee is a pointer to the std::type_info derivation for the
4047   //   unqualified type being pointed to.
4048   llvm::Constant *PointeeTypeInfo =
4049       ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(PointeeTy);
4050   Fields.push_back(PointeeTypeInfo);
4051 
4052   // Itanium C++ ABI 2.9.5p9:
4053   //   __context is a pointer to an abi::__class_type_info corresponding to the
4054   //   class type containing the member pointed to
4055   //   (e.g., the "A" in "int A::*").
4056   Fields.push_back(
4057       ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(QualType(ClassType, 0)));
4058 }
4059 
getAddrOfRTTIDescriptor(QualType Ty)4060 llvm::Constant *ItaniumCXXABI::getAddrOfRTTIDescriptor(QualType Ty) {
4061   return ItaniumRTTIBuilder(*this).BuildTypeInfo(Ty);
4062 }
4063 
EmitFundamentalRTTIDescriptors(const CXXRecordDecl * RD)4064 void ItaniumCXXABI::EmitFundamentalRTTIDescriptors(const CXXRecordDecl *RD) {
4065   // Types added here must also be added to TypeInfoIsInStandardLibrary.
4066   QualType FundamentalTypes[] = {
4067       getContext().VoidTy,             getContext().NullPtrTy,
4068       getContext().BoolTy,             getContext().WCharTy,
4069       getContext().CharTy,             getContext().UnsignedCharTy,
4070       getContext().SignedCharTy,       getContext().ShortTy,
4071       getContext().UnsignedShortTy,    getContext().IntTy,
4072       getContext().UnsignedIntTy,      getContext().LongTy,
4073       getContext().UnsignedLongTy,     getContext().LongLongTy,
4074       getContext().UnsignedLongLongTy, getContext().Int128Ty,
4075       getContext().UnsignedInt128Ty,   getContext().HalfTy,
4076       getContext().FloatTy,            getContext().DoubleTy,
4077       getContext().LongDoubleTy,       getContext().Float128Ty,
4078       getContext().Char8Ty,            getContext().Char16Ty,
4079       getContext().Char32Ty
4080   };
4081   llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass =
4082       RD->hasAttr<DLLExportAttr>()
4083       ? llvm::GlobalValue::DLLExportStorageClass
4084       : llvm::GlobalValue::DefaultStorageClass;
4085   llvm::GlobalValue::VisibilityTypes Visibility =
4086       CodeGenModule::GetLLVMVisibility(RD->getVisibility());
4087   for (const QualType &FundamentalType : FundamentalTypes) {
4088     QualType PointerType = getContext().getPointerType(FundamentalType);
4089     QualType PointerTypeConst = getContext().getPointerType(
4090         FundamentalType.withConst());
4091     for (QualType Type : {FundamentalType, PointerType, PointerTypeConst})
4092       ItaniumRTTIBuilder(*this).BuildTypeInfo(
4093           Type, llvm::GlobalValue::ExternalLinkage,
4094           Visibility, DLLStorageClass);
4095   }
4096 }
4097 
4098 /// What sort of uniqueness rules should we use for the RTTI for the
4099 /// given type?
classifyRTTIUniqueness(QualType CanTy,llvm::GlobalValue::LinkageTypes Linkage) const4100 ItaniumCXXABI::RTTIUniquenessKind ItaniumCXXABI::classifyRTTIUniqueness(
4101     QualType CanTy, llvm::GlobalValue::LinkageTypes Linkage) const {
4102   if (shouldRTTIBeUnique())
4103     return RUK_Unique;
4104 
4105   // It's only necessary for linkonce_odr or weak_odr linkage.
4106   if (Linkage != llvm::GlobalValue::LinkOnceODRLinkage &&
4107       Linkage != llvm::GlobalValue::WeakODRLinkage)
4108     return RUK_Unique;
4109 
4110   // It's only necessary with default visibility.
4111   if (CanTy->getVisibility() != DefaultVisibility)
4112     return RUK_Unique;
4113 
4114   // If we're not required to publish this symbol, hide it.
4115   if (Linkage == llvm::GlobalValue::LinkOnceODRLinkage)
4116     return RUK_NonUniqueHidden;
4117 
4118   // If we're required to publish this symbol, as we might be under an
4119   // explicit instantiation, leave it with default visibility but
4120   // enable string-comparisons.
4121   assert(Linkage == llvm::GlobalValue::WeakODRLinkage);
4122   return RUK_NonUniqueVisible;
4123 }
4124 
4125 // Find out how to codegen the complete destructor and constructor
4126 namespace {
4127 enum class StructorCodegen { Emit, RAUW, Alias, COMDAT };
4128 }
getCodegenToUse(CodeGenModule & CGM,const CXXMethodDecl * MD)4129 static StructorCodegen getCodegenToUse(CodeGenModule &CGM,
4130                                        const CXXMethodDecl *MD) {
4131   if (!CGM.getCodeGenOpts().CXXCtorDtorAliases)
4132     return StructorCodegen::Emit;
4133 
4134   // The complete and base structors are not equivalent if there are any virtual
4135   // bases, so emit separate functions.
4136   if (MD->getParent()->getNumVBases())
4137     return StructorCodegen::Emit;
4138 
4139   GlobalDecl AliasDecl;
4140   if (const auto *DD = dyn_cast<CXXDestructorDecl>(MD)) {
4141     AliasDecl = GlobalDecl(DD, Dtor_Complete);
4142   } else {
4143     const auto *CD = cast<CXXConstructorDecl>(MD);
4144     AliasDecl = GlobalDecl(CD, Ctor_Complete);
4145   }
4146   llvm::GlobalValue::LinkageTypes Linkage = CGM.getFunctionLinkage(AliasDecl);
4147 
4148   if (llvm::GlobalValue::isDiscardableIfUnused(Linkage))
4149     return StructorCodegen::RAUW;
4150 
4151   // FIXME: Should we allow available_externally aliases?
4152   if (!llvm::GlobalAlias::isValidLinkage(Linkage))
4153     return StructorCodegen::RAUW;
4154 
4155   if (llvm::GlobalValue::isWeakForLinker(Linkage)) {
4156     // Only ELF and wasm support COMDATs with arbitrary names (C5/D5).
4157     if (CGM.getTarget().getTriple().isOSBinFormatELF() ||
4158         CGM.getTarget().getTriple().isOSBinFormatWasm())
4159       return StructorCodegen::COMDAT;
4160     return StructorCodegen::Emit;
4161   }
4162 
4163   return StructorCodegen::Alias;
4164 }
4165 
emitConstructorDestructorAlias(CodeGenModule & CGM,GlobalDecl AliasDecl,GlobalDecl TargetDecl)4166 static void emitConstructorDestructorAlias(CodeGenModule &CGM,
4167                                            GlobalDecl AliasDecl,
4168                                            GlobalDecl TargetDecl) {
4169   llvm::GlobalValue::LinkageTypes Linkage = CGM.getFunctionLinkage(AliasDecl);
4170 
4171   StringRef MangledName = CGM.getMangledName(AliasDecl);
4172   llvm::GlobalValue *Entry = CGM.GetGlobalValue(MangledName);
4173   if (Entry && !Entry->isDeclaration())
4174     return;
4175 
4176   auto *Aliasee = cast<llvm::GlobalValue>(CGM.GetAddrOfGlobal(TargetDecl));
4177 
4178   // Create the alias with no name.
4179   auto *Alias = llvm::GlobalAlias::create(Linkage, "", Aliasee);
4180 
4181   // Constructors and destructors are always unnamed_addr.
4182   Alias->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
4183 
4184   // Switch any previous uses to the alias.
4185   if (Entry) {
4186     assert(Entry->getType() == Aliasee->getType() &&
4187            "declaration exists with different type");
4188     Alias->takeName(Entry);
4189     Entry->replaceAllUsesWith(Alias);
4190     Entry->eraseFromParent();
4191   } else {
4192     Alias->setName(MangledName);
4193   }
4194 
4195   // Finally, set up the alias with its proper name and attributes.
4196   CGM.SetCommonAttributes(AliasDecl, Alias);
4197 }
4198 
emitCXXStructor(GlobalDecl GD)4199 void ItaniumCXXABI::emitCXXStructor(GlobalDecl GD) {
4200   auto *MD = cast<CXXMethodDecl>(GD.getDecl());
4201   auto *CD = dyn_cast<CXXConstructorDecl>(MD);
4202   const CXXDestructorDecl *DD = CD ? nullptr : cast<CXXDestructorDecl>(MD);
4203 
4204   StructorCodegen CGType = getCodegenToUse(CGM, MD);
4205 
4206   if (CD ? GD.getCtorType() == Ctor_Complete
4207          : GD.getDtorType() == Dtor_Complete) {
4208     GlobalDecl BaseDecl;
4209     if (CD)
4210       BaseDecl = GD.getWithCtorType(Ctor_Base);
4211     else
4212       BaseDecl = GD.getWithDtorType(Dtor_Base);
4213 
4214     if (CGType == StructorCodegen::Alias || CGType == StructorCodegen::COMDAT) {
4215       emitConstructorDestructorAlias(CGM, GD, BaseDecl);
4216       return;
4217     }
4218 
4219     if (CGType == StructorCodegen::RAUW) {
4220       StringRef MangledName = CGM.getMangledName(GD);
4221       auto *Aliasee = CGM.GetAddrOfGlobal(BaseDecl);
4222       CGM.addReplacement(MangledName, Aliasee);
4223       return;
4224     }
4225   }
4226 
4227   // The base destructor is equivalent to the base destructor of its
4228   // base class if there is exactly one non-virtual base class with a
4229   // non-trivial destructor, there are no fields with a non-trivial
4230   // destructor, and the body of the destructor is trivial.
4231   if (DD && GD.getDtorType() == Dtor_Base &&
4232       CGType != StructorCodegen::COMDAT &&
4233       !CGM.TryEmitBaseDestructorAsAlias(DD))
4234     return;
4235 
4236   // FIXME: The deleting destructor is equivalent to the selected operator
4237   // delete if:
4238   //  * either the delete is a destroying operator delete or the destructor
4239   //    would be trivial if it weren't virtual,
4240   //  * the conversion from the 'this' parameter to the first parameter of the
4241   //    destructor is equivalent to a bitcast,
4242   //  * the destructor does not have an implicit "this" return, and
4243   //  * the operator delete has the same calling convention and IR function type
4244   //    as the destructor.
4245   // In such cases we should try to emit the deleting dtor as an alias to the
4246   // selected 'operator delete'.
4247 
4248   llvm::Function *Fn = CGM.codegenCXXStructor(GD);
4249 
4250   if (CGType == StructorCodegen::COMDAT) {
4251     SmallString<256> Buffer;
4252     llvm::raw_svector_ostream Out(Buffer);
4253     if (DD)
4254       getMangleContext().mangleCXXDtorComdat(DD, Out);
4255     else
4256       getMangleContext().mangleCXXCtorComdat(CD, Out);
4257     llvm::Comdat *C = CGM.getModule().getOrInsertComdat(Out.str());
4258     Fn->setComdat(C);
4259   } else {
4260     CGM.maybeSetTrivialComdat(*MD, *Fn);
4261   }
4262 }
4263 
getBeginCatchFn(CodeGenModule & CGM)4264 static llvm::FunctionCallee getBeginCatchFn(CodeGenModule &CGM) {
4265   // void *__cxa_begin_catch(void*);
4266   llvm::FunctionType *FTy = llvm::FunctionType::get(
4267       CGM.Int8PtrTy, CGM.Int8PtrTy, /*isVarArg=*/false);
4268 
4269   return CGM.CreateRuntimeFunction(FTy, "__cxa_begin_catch");
4270 }
4271 
getEndCatchFn(CodeGenModule & CGM)4272 static llvm::FunctionCallee getEndCatchFn(CodeGenModule &CGM) {
4273   // void __cxa_end_catch();
4274   llvm::FunctionType *FTy =
4275       llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
4276 
4277   return CGM.CreateRuntimeFunction(FTy, "__cxa_end_catch");
4278 }
4279 
getGetExceptionPtrFn(CodeGenModule & CGM)4280 static llvm::FunctionCallee getGetExceptionPtrFn(CodeGenModule &CGM) {
4281   // void *__cxa_get_exception_ptr(void*);
4282   llvm::FunctionType *FTy = llvm::FunctionType::get(
4283       CGM.Int8PtrTy, CGM.Int8PtrTy, /*isVarArg=*/false);
4284 
4285   return CGM.CreateRuntimeFunction(FTy, "__cxa_get_exception_ptr");
4286 }
4287 
4288 namespace {
4289   /// A cleanup to call __cxa_end_catch.  In many cases, the caught
4290   /// exception type lets us state definitively that the thrown exception
4291   /// type does not have a destructor.  In particular:
4292   ///   - Catch-alls tell us nothing, so we have to conservatively
4293   ///     assume that the thrown exception might have a destructor.
4294   ///   - Catches by reference behave according to their base types.
4295   ///   - Catches of non-record types will only trigger for exceptions
4296   ///     of non-record types, which never have destructors.
4297   ///   - Catches of record types can trigger for arbitrary subclasses
4298   ///     of the caught type, so we have to assume the actual thrown
4299   ///     exception type might have a throwing destructor, even if the
4300   ///     caught type's destructor is trivial or nothrow.
4301   struct CallEndCatch final : EHScopeStack::Cleanup {
CallEndCatch__anon06b6d2220a11::CallEndCatch4302     CallEndCatch(bool MightThrow) : MightThrow(MightThrow) {}
4303     bool MightThrow;
4304 
Emit__anon06b6d2220a11::CallEndCatch4305     void Emit(CodeGenFunction &CGF, Flags flags) override {
4306       if (!MightThrow) {
4307         CGF.EmitNounwindRuntimeCall(getEndCatchFn(CGF.CGM));
4308         return;
4309       }
4310 
4311       CGF.EmitRuntimeCallOrInvoke(getEndCatchFn(CGF.CGM));
4312     }
4313   };
4314 }
4315 
4316 /// Emits a call to __cxa_begin_catch and enters a cleanup to call
4317 /// __cxa_end_catch.
4318 ///
4319 /// \param EndMightThrow - true if __cxa_end_catch might throw
CallBeginCatch(CodeGenFunction & CGF,llvm::Value * Exn,bool EndMightThrow)4320 static llvm::Value *CallBeginCatch(CodeGenFunction &CGF,
4321                                    llvm::Value *Exn,
4322                                    bool EndMightThrow) {
4323   llvm::CallInst *call =
4324     CGF.EmitNounwindRuntimeCall(getBeginCatchFn(CGF.CGM), Exn);
4325 
4326   CGF.EHStack.pushCleanup<CallEndCatch>(NormalAndEHCleanup, EndMightThrow);
4327 
4328   return call;
4329 }
4330 
4331 /// A "special initializer" callback for initializing a catch
4332 /// parameter during catch initialization.
InitCatchParam(CodeGenFunction & CGF,const VarDecl & CatchParam,Address ParamAddr,SourceLocation Loc)4333 static void InitCatchParam(CodeGenFunction &CGF,
4334                            const VarDecl &CatchParam,
4335                            Address ParamAddr,
4336                            SourceLocation Loc) {
4337   // Load the exception from where the landing pad saved it.
4338   llvm::Value *Exn = CGF.getExceptionFromSlot();
4339 
4340   CanQualType CatchType =
4341     CGF.CGM.getContext().getCanonicalType(CatchParam.getType());
4342   llvm::Type *LLVMCatchTy = CGF.ConvertTypeForMem(CatchType);
4343 
4344   // If we're catching by reference, we can just cast the object
4345   // pointer to the appropriate pointer.
4346   if (isa<ReferenceType>(CatchType)) {
4347     QualType CaughtType = cast<ReferenceType>(CatchType)->getPointeeType();
4348     bool EndCatchMightThrow = CaughtType->isRecordType();
4349 
4350     // __cxa_begin_catch returns the adjusted object pointer.
4351     llvm::Value *AdjustedExn = CallBeginCatch(CGF, Exn, EndCatchMightThrow);
4352 
4353     // We have no way to tell the personality function that we're
4354     // catching by reference, so if we're catching a pointer,
4355     // __cxa_begin_catch will actually return that pointer by value.
4356     if (const PointerType *PT = dyn_cast<PointerType>(CaughtType)) {
4357       QualType PointeeType = PT->getPointeeType();
4358 
4359       // When catching by reference, generally we should just ignore
4360       // this by-value pointer and use the exception object instead.
4361       if (!PointeeType->isRecordType()) {
4362 
4363         // Exn points to the struct _Unwind_Exception header, which
4364         // we have to skip past in order to reach the exception data.
4365         unsigned HeaderSize =
4366           CGF.CGM.getTargetCodeGenInfo().getSizeOfUnwindException();
4367         AdjustedExn = CGF.Builder.CreateConstGEP1_32(Exn, HeaderSize);
4368 
4369       // However, if we're catching a pointer-to-record type that won't
4370       // work, because the personality function might have adjusted
4371       // the pointer.  There's actually no way for us to fully satisfy
4372       // the language/ABI contract here:  we can't use Exn because it
4373       // might have the wrong adjustment, but we can't use the by-value
4374       // pointer because it's off by a level of abstraction.
4375       //
4376       // The current solution is to dump the adjusted pointer into an
4377       // alloca, which breaks language semantics (because changing the
4378       // pointer doesn't change the exception) but at least works.
4379       // The better solution would be to filter out non-exact matches
4380       // and rethrow them, but this is tricky because the rethrow
4381       // really needs to be catchable by other sites at this landing
4382       // pad.  The best solution is to fix the personality function.
4383       } else {
4384         // Pull the pointer for the reference type off.
4385         llvm::Type *PtrTy =
4386           cast<llvm::PointerType>(LLVMCatchTy)->getElementType();
4387 
4388         // Create the temporary and write the adjusted pointer into it.
4389         Address ExnPtrTmp =
4390           CGF.CreateTempAlloca(PtrTy, CGF.getPointerAlign(), "exn.byref.tmp");
4391         llvm::Value *Casted = CGF.Builder.CreateBitCast(AdjustedExn, PtrTy);
4392         CGF.Builder.CreateStore(Casted, ExnPtrTmp);
4393 
4394         // Bind the reference to the temporary.
4395         AdjustedExn = ExnPtrTmp.getPointer();
4396       }
4397     }
4398 
4399     llvm::Value *ExnCast =
4400       CGF.Builder.CreateBitCast(AdjustedExn, LLVMCatchTy, "exn.byref");
4401     CGF.Builder.CreateStore(ExnCast, ParamAddr);
4402     return;
4403   }
4404 
4405   // Scalars and complexes.
4406   TypeEvaluationKind TEK = CGF.getEvaluationKind(CatchType);
4407   if (TEK != TEK_Aggregate) {
4408     llvm::Value *AdjustedExn = CallBeginCatch(CGF, Exn, false);
4409 
4410     // If the catch type is a pointer type, __cxa_begin_catch returns
4411     // the pointer by value.
4412     if (CatchType->hasPointerRepresentation()) {
4413       llvm::Value *CastExn =
4414         CGF.Builder.CreateBitCast(AdjustedExn, LLVMCatchTy, "exn.casted");
4415 
4416       switch (CatchType.getQualifiers().getObjCLifetime()) {
4417       case Qualifiers::OCL_Strong:
4418         CastExn = CGF.EmitARCRetainNonBlock(CastExn);
4419         LLVM_FALLTHROUGH;
4420 
4421       case Qualifiers::OCL_None:
4422       case Qualifiers::OCL_ExplicitNone:
4423       case Qualifiers::OCL_Autoreleasing:
4424         CGF.Builder.CreateStore(CastExn, ParamAddr);
4425         return;
4426 
4427       case Qualifiers::OCL_Weak:
4428         CGF.EmitARCInitWeak(ParamAddr, CastExn);
4429         return;
4430       }
4431       llvm_unreachable("bad ownership qualifier!");
4432     }
4433 
4434     // Otherwise, it returns a pointer into the exception object.
4435 
4436     unsigned DefaultAS = CGF.CGM.getTargetCodeGenInfo().getDefaultAS();
4437     llvm::Type *PtrTy = LLVMCatchTy->getPointerTo(DefaultAS);
4438     llvm::Value *Cast = CGF.Builder.CreateBitCast(AdjustedExn, PtrTy);
4439 
4440     LValue srcLV = CGF.MakeNaturalAlignAddrLValue(Cast, CatchType);
4441     LValue destLV = CGF.MakeAddrLValue(ParamAddr, CatchType);
4442     switch (TEK) {
4443     case TEK_Complex:
4444       CGF.EmitStoreOfComplex(CGF.EmitLoadOfComplex(srcLV, Loc), destLV,
4445                              /*init*/ true);
4446       return;
4447     case TEK_Scalar: {
4448       llvm::Value *ExnLoad = CGF.EmitLoadOfScalar(srcLV, Loc);
4449       CGF.EmitStoreOfScalar(ExnLoad, destLV, /*init*/ true);
4450       return;
4451     }
4452     case TEK_Aggregate:
4453       llvm_unreachable("evaluation kind filtered out!");
4454     }
4455     llvm_unreachable("bad evaluation kind");
4456   }
4457 
4458   assert(isa<RecordType>(CatchType) && "unexpected catch type!");
4459   auto catchRD = CatchType->getAsCXXRecordDecl();
4460   CharUnits caughtExnAlignment = CGF.CGM.getClassPointerAlignment(catchRD);
4461 
4462   unsigned DefaultAS = CGF.CGM.getTargetCodeGenInfo().getDefaultAS();
4463   llvm::Type *PtrTy = LLVMCatchTy->getPointerTo(DefaultAS);
4464 
4465   // Check for a copy expression.  If we don't have a copy expression,
4466   // that means a trivial copy is okay.
4467   const Expr *copyExpr = CatchParam.getInit();
4468   if (!copyExpr) {
4469     llvm::Value *rawAdjustedExn = CallBeginCatch(CGF, Exn, true);
4470     Address adjustedExn(CGF.Builder.CreateBitCast(rawAdjustedExn, PtrTy),
4471                         caughtExnAlignment);
4472     LValue Dest = CGF.MakeAddrLValue(ParamAddr, CatchType);
4473     LValue Src = CGF.MakeAddrLValue(adjustedExn, CatchType);
4474     CGF.EmitAggregateCopy(Dest, Src, CatchType, AggValueSlot::DoesNotOverlap);
4475     return;
4476   }
4477 
4478   // We have to call __cxa_get_exception_ptr to get the adjusted
4479   // pointer before copying.
4480   llvm::CallInst *rawAdjustedExn =
4481     CGF.EmitNounwindRuntimeCall(getGetExceptionPtrFn(CGF.CGM), Exn);
4482 
4483   // Cast that to the appropriate type.
4484   Address adjustedExn(CGF.Builder.CreateBitCast(rawAdjustedExn, PtrTy),
4485                       caughtExnAlignment);
4486 
4487   // The copy expression is defined in terms of an OpaqueValueExpr.
4488   // Find it and map it to the adjusted expression.
4489   CodeGenFunction::OpaqueValueMapping
4490     opaque(CGF, OpaqueValueExpr::findInCopyConstruct(copyExpr),
4491            CGF.MakeAddrLValue(adjustedExn, CatchParam.getType()));
4492 
4493   // Call the copy ctor in a terminate scope.
4494   CGF.EHStack.pushTerminate();
4495 
4496   // Perform the copy construction.
4497   CGF.EmitAggExpr(copyExpr,
4498                   AggValueSlot::forAddr(ParamAddr, Qualifiers(),
4499                                         AggValueSlot::IsNotDestructed,
4500                                         AggValueSlot::DoesNotNeedGCBarriers,
4501                                         AggValueSlot::IsNotAliased,
4502                                         AggValueSlot::DoesNotOverlap));
4503 
4504   // Leave the terminate scope.
4505   CGF.EHStack.popTerminate();
4506 
4507   // Undo the opaque value mapping.
4508   opaque.pop();
4509 
4510   // Finally we can call __cxa_begin_catch.
4511   CallBeginCatch(CGF, Exn, true);
4512 }
4513 
4514 /// Begins a catch statement by initializing the catch variable and
4515 /// calling __cxa_begin_catch.
emitBeginCatch(CodeGenFunction & CGF,const CXXCatchStmt * S)4516 void ItaniumCXXABI::emitBeginCatch(CodeGenFunction &CGF,
4517                                    const CXXCatchStmt *S) {
4518   // We have to be very careful with the ordering of cleanups here:
4519   //   C++ [except.throw]p4:
4520   //     The destruction [of the exception temporary] occurs
4521   //     immediately after the destruction of the object declared in
4522   //     the exception-declaration in the handler.
4523   //
4524   // So the precise ordering is:
4525   //   1.  Construct catch variable.
4526   //   2.  __cxa_begin_catch
4527   //   3.  Enter __cxa_end_catch cleanup
4528   //   4.  Enter dtor cleanup
4529   //
4530   // We do this by using a slightly abnormal initialization process.
4531   // Delegation sequence:
4532   //   - ExitCXXTryStmt opens a RunCleanupsScope
4533   //     - EmitAutoVarAlloca creates the variable and debug info
4534   //       - InitCatchParam initializes the variable from the exception
4535   //       - CallBeginCatch calls __cxa_begin_catch
4536   //       - CallBeginCatch enters the __cxa_end_catch cleanup
4537   //     - EmitAutoVarCleanups enters the variable destructor cleanup
4538   //   - EmitCXXTryStmt emits the code for the catch body
4539   //   - EmitCXXTryStmt close the RunCleanupsScope
4540 
4541   VarDecl *CatchParam = S->getExceptionDecl();
4542   if (!CatchParam) {
4543     llvm::Value *Exn = CGF.getExceptionFromSlot();
4544     CallBeginCatch(CGF, Exn, true);
4545     return;
4546   }
4547 
4548   // Emit the local.
4549   CodeGenFunction::AutoVarEmission var = CGF.EmitAutoVarAlloca(*CatchParam);
4550   InitCatchParam(CGF, *CatchParam, var.getObjectAddress(CGF), S->getBeginLoc());
4551   CGF.EmitAutoVarCleanups(var);
4552 }
4553 
4554 /// Get or define the following function:
4555 ///   void @__clang_call_terminate(i8* %exn) nounwind noreturn
4556 /// This code is used only in C++.
getClangCallTerminateFn(CodeGenModule & CGM)4557 static llvm::FunctionCallee getClangCallTerminateFn(CodeGenModule &CGM) {
4558   llvm::FunctionType *fnTy =
4559     llvm::FunctionType::get(CGM.VoidTy, CGM.Int8PtrTy, /*isVarArg=*/false);
4560   llvm::FunctionCallee fnRef = CGM.CreateRuntimeFunction(
4561       fnTy, "__clang_call_terminate", llvm::AttributeList(), /*Local=*/true);
4562   llvm::Function *fn =
4563       cast<llvm::Function>(fnRef.getCallee()->stripPointerCasts());
4564   if (fn->empty()) {
4565     fn->setDoesNotThrow();
4566     fn->setDoesNotReturn();
4567 
4568     // What we really want is to massively penalize inlining without
4569     // forbidding it completely.  The difference between that and
4570     // 'noinline' is negligible.
4571     fn->addFnAttr(llvm::Attribute::NoInline);
4572 
4573     // Allow this function to be shared across translation units, but
4574     // we don't want it to turn into an exported symbol.
4575     fn->setLinkage(llvm::Function::LinkOnceODRLinkage);
4576     fn->setVisibility(llvm::Function::HiddenVisibility);
4577     if (CGM.supportsCOMDAT())
4578       fn->setComdat(CGM.getModule().getOrInsertComdat(fn->getName()));
4579 
4580     // Set up the function.
4581     llvm::BasicBlock *entry =
4582         llvm::BasicBlock::Create(CGM.getLLVMContext(), "", fn);
4583     CGBuilderTy builder(CGM, entry);
4584 
4585     // Pull the exception pointer out of the parameter list.
4586     llvm::Value *exn = &*fn->arg_begin();
4587 
4588     // Call __cxa_begin_catch(exn).
4589     llvm::CallInst *catchCall = builder.CreateCall(getBeginCatchFn(CGM), exn);
4590     catchCall->setDoesNotThrow();
4591     catchCall->setCallingConv(CGM.getRuntimeCC());
4592 
4593     // Call std::terminate().
4594     llvm::CallInst *termCall = builder.CreateCall(CGM.getTerminateFn());
4595     termCall->setDoesNotThrow();
4596     termCall->setDoesNotReturn();
4597     termCall->setCallingConv(CGM.getRuntimeCC());
4598 
4599     // std::terminate cannot return.
4600     builder.CreateUnreachable();
4601   }
4602   return fnRef;
4603 }
4604 
4605 llvm::CallInst *
emitTerminateForUnexpectedException(CodeGenFunction & CGF,llvm::Value * Exn)4606 ItaniumCXXABI::emitTerminateForUnexpectedException(CodeGenFunction &CGF,
4607                                                    llvm::Value *Exn) {
4608   // In C++, we want to call __cxa_begin_catch() before terminating.
4609   if (Exn) {
4610     assert(CGF.CGM.getLangOpts().CPlusPlus);
4611     return CGF.EmitNounwindRuntimeCall(getClangCallTerminateFn(CGF.CGM), Exn);
4612   }
4613   return CGF.EmitNounwindRuntimeCall(CGF.CGM.getTerminateFn());
4614 }
4615 
4616 std::pair<llvm::Value *, const CXXRecordDecl *>
LoadVTablePtr(CodeGenFunction & CGF,Address This,const CXXRecordDecl * RD)4617 ItaniumCXXABI::LoadVTablePtr(CodeGenFunction &CGF, Address This,
4618                              const CXXRecordDecl *RD) {
4619   return {CGF.GetVTablePtr(This, CGM.Int8PtrTy, RD), RD};
4620 }
4621 
emitBeginCatch(CodeGenFunction & CGF,const CXXCatchStmt * C)4622 void WebAssemblyCXXABI::emitBeginCatch(CodeGenFunction &CGF,
4623                                        const CXXCatchStmt *C) {
4624   if (CGF.getTarget().hasFeature("exception-handling"))
4625     CGF.EHStack.pushCleanup<CatchRetScope>(
4626         NormalCleanup, cast<llvm::CatchPadInst>(CGF.CurrentFuncletPad));
4627   ItaniumCXXABI::emitBeginCatch(CGF, C);
4628 }
4629 
4630 /// Register a global destructor as best as we know how.
registerGlobalDtor(CodeGenFunction & CGF,const VarDecl & D,llvm::FunctionCallee dtor,llvm::Constant * addr)4631 void XLCXXABI::registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
4632                                   llvm::FunctionCallee dtor,
4633                                   llvm::Constant *addr) {
4634   if (D.getTLSKind() != VarDecl::TLS_None)
4635     llvm::report_fatal_error("thread local storage not yet implemented on AIX");
4636 
4637   // Create __dtor function for the var decl.
4638   llvm::Function *dtorStub = CGF.createAtExitStub(D, dtor, addr);
4639 
4640   // Register above __dtor with atexit().
4641   CGF.registerGlobalDtorWithAtExit(dtorStub);
4642 
4643   // Emit __finalize function to unregister __dtor and (as appropriate) call
4644   // __dtor.
4645   emitCXXStermFinalizer(D, dtorStub, addr);
4646 }
4647 
emitCXXStermFinalizer(const VarDecl & D,llvm::Function * dtorStub,llvm::Constant * addr)4648 void XLCXXABI::emitCXXStermFinalizer(const VarDecl &D, llvm::Function *dtorStub,
4649                                      llvm::Constant *addr) {
4650   llvm::FunctionType *FTy = llvm::FunctionType::get(CGM.VoidTy, false);
4651   SmallString<256> FnName;
4652   {
4653     llvm::raw_svector_ostream Out(FnName);
4654     getMangleContext().mangleDynamicStermFinalizer(&D, Out);
4655   }
4656 
4657   // Create the finalization action associated with a variable.
4658   const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
4659   llvm::Function *StermFinalizer = CGM.CreateGlobalInitOrCleanUpFunction(
4660       FTy, FnName.str(), FI, D.getLocation());
4661 
4662   CodeGenFunction CGF(CGM);
4663 
4664   CGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, StermFinalizer, FI,
4665                     FunctionArgList());
4666 
4667   // The unatexit subroutine unregisters __dtor functions that were previously
4668   // registered by the atexit subroutine. If the referenced function is found,
4669   // the unatexit returns a value of 0, meaning that the cleanup is still
4670   // pending (and we should call the __dtor function).
4671   llvm::Value *V = CGF.unregisterGlobalDtorWithUnAtExit(dtorStub);
4672 
4673   llvm::Value *NeedsDestruct = CGF.Builder.CreateIsNull(V, "needs_destruct");
4674 
4675   llvm::BasicBlock *DestructCallBlock = CGF.createBasicBlock("destruct.call");
4676   llvm::BasicBlock *EndBlock = CGF.createBasicBlock("destruct.end");
4677 
4678   // Check if unatexit returns a value of 0. If it does, jump to
4679   // DestructCallBlock, otherwise jump to EndBlock directly.
4680   CGF.Builder.CreateCondBr(NeedsDestruct, DestructCallBlock, EndBlock);
4681 
4682   CGF.EmitBlock(DestructCallBlock);
4683 
4684   // Emit the call to dtorStub.
4685   llvm::CallInst *CI = CGF.Builder.CreateCall(dtorStub);
4686 
4687   // Make sure the call and the callee agree on calling convention.
4688   CI->setCallingConv(dtorStub->getCallingConv());
4689 
4690   CGF.EmitBlock(EndBlock);
4691 
4692   CGF.FinishFunction();
4693 
4694   CGM.AddCXXStermFinalizerEntry(StermFinalizer);
4695 }
4696