1 //===------- ItaniumCXXABI.cpp - Emit LLVM Code from ASTs for a Module ----===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This provides C++ code generation targeting the Itanium C++ ABI.  The class
11 // in this file generates structures that follow the Itanium C++ ABI, which is
12 // documented at:
13 //  http://www.codesourcery.com/public/cxx-abi/abi.html
14 //  http://www.codesourcery.com/public/cxx-abi/abi-eh.html
15 //
16 // It also supports the closely-related ARM ABI, documented at:
17 // http://infocenter.arm.com/help/topic/com.arm.doc.ihi0041c/IHI0041C_cppabi.pdf
18 //
19 //===----------------------------------------------------------------------===//
20 
21 #include "CGCXXABI.h"
22 #include "CGCleanup.h"
23 #include "CGRecordLayout.h"
24 #include "CGVTables.h"
25 #include "CodeGenFunction.h"
26 #include "CodeGenModule.h"
27 #include "TargetInfo.h"
28 #include "clang/CodeGen/ConstantInitBuilder.h"
29 #include "clang/AST/Mangle.h"
30 #include "clang/AST/Type.h"
31 #include "clang/AST/StmtCXX.h"
32 #include "llvm/IR/CallSite.h"
33 #include "llvm/IR/DataLayout.h"
34 #include "llvm/IR/GlobalValue.h"
35 #include "llvm/IR/Instructions.h"
36 #include "llvm/IR/Intrinsics.h"
37 #include "llvm/IR/Value.h"
38 #include "llvm/Support/ScopedPrinter.h"
39 
40 using namespace clang;
41 using namespace CodeGen;
42 
43 namespace {
44 class ItaniumCXXABI : public CodeGen::CGCXXABI {
45   /// VTables - All the vtables which have been defined.
46   llvm::DenseMap<const CXXRecordDecl *, llvm::GlobalVariable *> VTables;
47 
48 protected:
49   bool UseARMMethodPtrABI;
50   bool UseARMGuardVarABI;
51   bool Use32BitVTableOffsetABI;
52 
getMangleContext()53   ItaniumMangleContext &getMangleContext() {
54     return cast<ItaniumMangleContext>(CodeGen::CGCXXABI::getMangleContext());
55   }
56 
57 public:
ItaniumCXXABI(CodeGen::CodeGenModule & CGM,bool UseARMMethodPtrABI=false,bool UseARMGuardVarABI=false)58   ItaniumCXXABI(CodeGen::CodeGenModule &CGM,
59                 bool UseARMMethodPtrABI = false,
60                 bool UseARMGuardVarABI = false) :
61     CGCXXABI(CGM), UseARMMethodPtrABI(UseARMMethodPtrABI),
62     UseARMGuardVarABI(UseARMGuardVarABI),
63     Use32BitVTableOffsetABI(false) { }
64 
65   bool classifyReturnType(CGFunctionInfo &FI) const override;
66 
passClassIndirect(const CXXRecordDecl * RD) const67   bool passClassIndirect(const CXXRecordDecl *RD) const {
68     return !canCopyArgument(RD);
69   }
70 
getRecordArgABI(const CXXRecordDecl * RD) const71   RecordArgABI getRecordArgABI(const CXXRecordDecl *RD) const override {
72     // If C++ prohibits us from making a copy, pass by address.
73     if (passClassIndirect(RD))
74       return RAA_Indirect;
75     return RAA_Default;
76   }
77 
isThisCompleteObject(GlobalDecl GD) const78   bool isThisCompleteObject(GlobalDecl GD) const override {
79     // The Itanium ABI has separate complete-object vs.  base-object
80     // variants of both constructors and destructors.
81     if (isa<CXXDestructorDecl>(GD.getDecl())) {
82       switch (GD.getDtorType()) {
83       case Dtor_Complete:
84       case Dtor_Deleting:
85         return true;
86 
87       case Dtor_Base:
88         return false;
89 
90       case Dtor_Comdat:
91         llvm_unreachable("emitting dtor comdat as function?");
92       }
93       llvm_unreachable("bad dtor kind");
94     }
95     if (isa<CXXConstructorDecl>(GD.getDecl())) {
96       switch (GD.getCtorType()) {
97       case Ctor_Complete:
98         return true;
99 
100       case Ctor_Base:
101         return false;
102 
103       case Ctor_CopyingClosure:
104       case Ctor_DefaultClosure:
105         llvm_unreachable("closure ctors in Itanium ABI?");
106 
107       case Ctor_Comdat:
108         llvm_unreachable("emitting ctor comdat as function?");
109       }
110       llvm_unreachable("bad dtor kind");
111     }
112 
113     // No other kinds.
114     return false;
115   }
116 
117   bool isZeroInitializable(const MemberPointerType *MPT) override;
118 
119   llvm::Type *ConvertMemberPointerType(const MemberPointerType *MPT) override;
120 
121   CGCallee
122     EmitLoadOfMemberFunctionPointer(CodeGenFunction &CGF,
123                                     const Expr *E,
124                                     Address This,
125                                     llvm::Value *&ThisPtrForCall,
126                                     llvm::Value *MemFnPtr,
127                                     const MemberPointerType *MPT) override;
128 
129   llvm::Value *
130     EmitMemberDataPointerAddress(CodeGenFunction &CGF, const Expr *E,
131                                  Address Base,
132                                  llvm::Value *MemPtr,
133                                  const MemberPointerType *MPT) override;
134 
135   llvm::Value *EmitMemberPointerConversion(CodeGenFunction &CGF,
136                                            const CastExpr *E,
137                                            llvm::Value *Src) override;
138   llvm::Constant *EmitMemberPointerConversion(const CastExpr *E,
139                                               llvm::Constant *Src) override;
140 
141   llvm::Constant *EmitNullMemberPointer(const MemberPointerType *MPT) override;
142 
143   llvm::Constant *EmitMemberFunctionPointer(const CXXMethodDecl *MD) override;
144   llvm::Constant *EmitMemberDataPointer(const MemberPointerType *MPT,
145                                         CharUnits offset) override;
146   llvm::Constant *EmitMemberPointer(const APValue &MP, QualType MPT) override;
147   llvm::Constant *BuildMemberPointer(const CXXMethodDecl *MD,
148                                      CharUnits ThisAdjustment);
149 
150   llvm::Value *EmitMemberPointerComparison(CodeGenFunction &CGF,
151                                            llvm::Value *L, llvm::Value *R,
152                                            const MemberPointerType *MPT,
153                                            bool Inequality) override;
154 
155   llvm::Value *EmitMemberPointerIsNotNull(CodeGenFunction &CGF,
156                                          llvm::Value *Addr,
157                                          const MemberPointerType *MPT) override;
158 
159   void emitVirtualObjectDelete(CodeGenFunction &CGF, const CXXDeleteExpr *DE,
160                                Address Ptr, QualType ElementType,
161                                const CXXDestructorDecl *Dtor) override;
162 
163   /// Itanium says that an _Unwind_Exception has to be "double-word"
164   /// aligned (and thus the end of it is also so-aligned), meaning 16
165   /// bytes.  Of course, that was written for the actual Itanium,
166   /// which is a 64-bit platform.  Classically, the ABI doesn't really
167   /// specify the alignment on other platforms, but in practice
168   /// libUnwind declares the struct with __attribute__((aligned)), so
169   /// we assume that alignment here.  (It's generally 16 bytes, but
170   /// some targets overwrite it.)
getAlignmentOfExnObject()171   CharUnits getAlignmentOfExnObject() {
172     auto align = CGM.getContext().getTargetDefaultAlignForAttributeAligned();
173     return CGM.getContext().toCharUnitsFromBits(align);
174   }
175 
176   void emitRethrow(CodeGenFunction &CGF, bool isNoReturn) override;
177   void emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) override;
178 
179   void emitBeginCatch(CodeGenFunction &CGF, const CXXCatchStmt *C) override;
180 
181   llvm::CallInst *
182   emitTerminateForUnexpectedException(CodeGenFunction &CGF,
183                                       llvm::Value *Exn) override;
184 
185   void EmitFundamentalRTTIDescriptors(const CXXRecordDecl *RD);
186   llvm::Constant *getAddrOfRTTIDescriptor(QualType Ty) override;
187   CatchTypeInfo
getAddrOfCXXCatchHandlerType(QualType Ty,QualType CatchHandlerType)188   getAddrOfCXXCatchHandlerType(QualType Ty,
189                                QualType CatchHandlerType) override {
190     return CatchTypeInfo{getAddrOfRTTIDescriptor(Ty), 0};
191   }
192 
193   bool shouldTypeidBeNullChecked(bool IsDeref, QualType SrcRecordTy) override;
194   void EmitBadTypeidCall(CodeGenFunction &CGF) override;
195   llvm::Value *EmitTypeid(CodeGenFunction &CGF, QualType SrcRecordTy,
196                           Address ThisPtr,
197                           llvm::Type *StdTypeInfoPtrTy) override;
198 
199   bool shouldDynamicCastCallBeNullChecked(bool SrcIsPtr,
200                                           QualType SrcRecordTy) override;
201 
202   llvm::Value *EmitDynamicCastCall(CodeGenFunction &CGF, Address Value,
203                                    QualType SrcRecordTy, QualType DestTy,
204                                    QualType DestRecordTy,
205                                    llvm::BasicBlock *CastEnd) override;
206 
207   llvm::Value *EmitDynamicCastToVoid(CodeGenFunction &CGF, Address Value,
208                                      QualType SrcRecordTy,
209                                      QualType DestTy) override;
210 
211   bool EmitBadCastCall(CodeGenFunction &CGF) override;
212 
213   llvm::Value *
214     GetVirtualBaseClassOffset(CodeGenFunction &CGF, Address This,
215                               const CXXRecordDecl *ClassDecl,
216                               const CXXRecordDecl *BaseClassDecl) override;
217 
218   void EmitCXXConstructors(const CXXConstructorDecl *D) override;
219 
220   AddedStructorArgs
221   buildStructorSignature(const CXXMethodDecl *MD, StructorType T,
222                          SmallVectorImpl<CanQualType> &ArgTys) override;
223 
useThunkForDtorVariant(const CXXDestructorDecl * Dtor,CXXDtorType DT) const224   bool useThunkForDtorVariant(const CXXDestructorDecl *Dtor,
225                               CXXDtorType DT) const override {
226     // Itanium does not emit any destructor variant as an inline thunk.
227     // Delegating may occur as an optimization, but all variants are either
228     // emitted with external linkage or as linkonce if they are inline and used.
229     return false;
230   }
231 
232   void EmitCXXDestructors(const CXXDestructorDecl *D) override;
233 
234   void addImplicitStructorParams(CodeGenFunction &CGF, QualType &ResTy,
235                                  FunctionArgList &Params) override;
236 
237   void EmitInstanceFunctionProlog(CodeGenFunction &CGF) override;
238 
239   AddedStructorArgs
240   addImplicitConstructorArgs(CodeGenFunction &CGF, const CXXConstructorDecl *D,
241                              CXXCtorType Type, bool ForVirtualBase,
242                              bool Delegating, CallArgList &Args) override;
243 
244   void EmitDestructorCall(CodeGenFunction &CGF, const CXXDestructorDecl *DD,
245                           CXXDtorType Type, bool ForVirtualBase,
246                           bool Delegating, Address This) override;
247 
248   void emitVTableDefinitions(CodeGenVTables &CGVT,
249                              const CXXRecordDecl *RD) override;
250 
251   bool isVirtualOffsetNeededForVTableField(CodeGenFunction &CGF,
252                                            CodeGenFunction::VPtr Vptr) override;
253 
doStructorsInitializeVPtrs(const CXXRecordDecl * VTableClass)254   bool doStructorsInitializeVPtrs(const CXXRecordDecl *VTableClass) override {
255     return true;
256   }
257 
258   llvm::Constant *
259   getVTableAddressPoint(BaseSubobject Base,
260                         const CXXRecordDecl *VTableClass) override;
261 
262   llvm::Value *getVTableAddressPointInStructor(
263       CodeGenFunction &CGF, const CXXRecordDecl *VTableClass,
264       BaseSubobject Base, const CXXRecordDecl *NearestVBase) override;
265 
266   llvm::Value *getVTableAddressPointInStructorWithVTT(
267       CodeGenFunction &CGF, const CXXRecordDecl *VTableClass,
268       BaseSubobject Base, const CXXRecordDecl *NearestVBase);
269 
270   llvm::Constant *
271   getVTableAddressPointForConstExpr(BaseSubobject Base,
272                                     const CXXRecordDecl *VTableClass) override;
273 
274   llvm::GlobalVariable *getAddrOfVTable(const CXXRecordDecl *RD,
275                                         CharUnits VPtrOffset) override;
276 
277   CGCallee getVirtualFunctionPointer(CodeGenFunction &CGF, GlobalDecl GD,
278                                      Address This, llvm::Type *Ty,
279                                      SourceLocation Loc) override;
280 
281   llvm::Value *EmitVirtualDestructorCall(CodeGenFunction &CGF,
282                                          const CXXDestructorDecl *Dtor,
283                                          CXXDtorType DtorType,
284                                          Address This,
285                                          const CXXMemberCallExpr *CE) override;
286 
287   void emitVirtualInheritanceTables(const CXXRecordDecl *RD) override;
288 
289   bool canSpeculativelyEmitVTable(const CXXRecordDecl *RD) const override;
290   bool canSpeculativelyEmitVTableAsBaseClass(const CXXRecordDecl *RD) const;
291 
setThunkLinkage(llvm::Function * Thunk,bool ForVTable,GlobalDecl GD,bool ReturnAdjustment)292   void setThunkLinkage(llvm::Function *Thunk, bool ForVTable, GlobalDecl GD,
293                        bool ReturnAdjustment) override {
294     // Allow inlining of thunks by emitting them with available_externally
295     // linkage together with vtables when needed.
296     if (ForVTable && !Thunk->hasLocalLinkage())
297       Thunk->setLinkage(llvm::GlobalValue::AvailableExternallyLinkage);
298     CGM.setGVProperties(Thunk, GD);
299   }
300 
exportThunk()301   bool exportThunk() override { return true; }
302 
303   llvm::Value *performThisAdjustment(CodeGenFunction &CGF, Address This,
304                                      const ThisAdjustment &TA) override;
305 
306   llvm::Value *performReturnAdjustment(CodeGenFunction &CGF, Address Ret,
307                                        const ReturnAdjustment &RA) override;
308 
getSrcArgforCopyCtor(const CXXConstructorDecl *,FunctionArgList & Args) const309   size_t getSrcArgforCopyCtor(const CXXConstructorDecl *,
310                               FunctionArgList &Args) const override {
311     assert(!Args.empty() && "expected the arglist to not be empty!");
312     return Args.size() - 1;
313   }
314 
GetPureVirtualCallName()315   StringRef GetPureVirtualCallName() override { return "__cxa_pure_virtual"; }
GetDeletedVirtualCallName()316   StringRef GetDeletedVirtualCallName() override
317     { return "__cxa_deleted_virtual"; }
318 
319   CharUnits getArrayCookieSizeImpl(QualType elementType) override;
320   Address InitializeArrayCookie(CodeGenFunction &CGF,
321                                 Address NewPtr,
322                                 llvm::Value *NumElements,
323                                 const CXXNewExpr *expr,
324                                 QualType ElementType) override;
325   llvm::Value *readArrayCookieImpl(CodeGenFunction &CGF,
326                                    Address allocPtr,
327                                    CharUnits cookieSize) override;
328 
329   void EmitGuardedInit(CodeGenFunction &CGF, const VarDecl &D,
330                        llvm::GlobalVariable *DeclPtr,
331                        bool PerformInit) override;
332   void registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
333                           llvm::Constant *dtor, llvm::Constant *addr) override;
334 
335   llvm::Function *getOrCreateThreadLocalWrapper(const VarDecl *VD,
336                                                 llvm::Value *Val);
337   void EmitThreadLocalInitFuncs(
338       CodeGenModule &CGM,
339       ArrayRef<const VarDecl *> CXXThreadLocals,
340       ArrayRef<llvm::Function *> CXXThreadLocalInits,
341       ArrayRef<const VarDecl *> CXXThreadLocalInitVars) override;
342 
usesThreadWrapperFunction() const343   bool usesThreadWrapperFunction() const override { return true; }
344   LValue EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF, const VarDecl *VD,
345                                       QualType LValType) override;
346 
347   bool NeedsVTTParameter(GlobalDecl GD) override;
348 
349   /**************************** RTTI Uniqueness ******************************/
350 
351 protected:
352   /// Returns true if the ABI requires RTTI type_info objects to be unique
353   /// across a program.
shouldRTTIBeUnique() const354   virtual bool shouldRTTIBeUnique() const { return true; }
355 
356 public:
357   /// What sort of unique-RTTI behavior should we use?
358   enum RTTIUniquenessKind {
359     /// We are guaranteeing, or need to guarantee, that the RTTI string
360     /// is unique.
361     RUK_Unique,
362 
363     /// We are not guaranteeing uniqueness for the RTTI string, so we
364     /// can demote to hidden visibility but must use string comparisons.
365     RUK_NonUniqueHidden,
366 
367     /// We are not guaranteeing uniqueness for the RTTI string, so we
368     /// have to use string comparisons, but we also have to emit it with
369     /// non-hidden visibility.
370     RUK_NonUniqueVisible
371   };
372 
373   /// Return the required visibility status for the given type and linkage in
374   /// the current ABI.
375   RTTIUniquenessKind
376   classifyRTTIUniqueness(QualType CanTy,
377                          llvm::GlobalValue::LinkageTypes Linkage) const;
378   friend class ItaniumRTTIBuilder;
379 
380   void emitCXXStructor(const CXXMethodDecl *MD, StructorType Type) override;
381 
382   std::pair<llvm::Value *, const CXXRecordDecl *>
383   LoadVTablePtr(CodeGenFunction &CGF, Address This,
384                 const CXXRecordDecl *RD) override;
385 
386  private:
hasAnyUnusedVirtualInlineFunction(const CXXRecordDecl * RD) const387    bool hasAnyUnusedVirtualInlineFunction(const CXXRecordDecl *RD) const {
388      const auto &VtableLayout =
389          CGM.getItaniumVTableContext().getVTableLayout(RD);
390 
391      for (const auto &VtableComponent : VtableLayout.vtable_components()) {
392        // Skip empty slot.
393        if (!VtableComponent.isUsedFunctionPointerKind())
394          continue;
395 
396        const CXXMethodDecl *Method = VtableComponent.getFunctionDecl();
397        if (!Method->getCanonicalDecl()->isInlined())
398          continue;
399 
400        StringRef Name = CGM.getMangledName(VtableComponent.getGlobalDecl());
401        auto *Entry = CGM.GetGlobalValue(Name);
402        // This checks if virtual inline function has already been emitted.
403        // Note that it is possible that this inline function would be emitted
404        // after trying to emit vtable speculatively. Because of this we do
405        // an extra pass after emitting all deferred vtables to find and emit
406        // these vtables opportunistically.
407        if (!Entry || Entry->isDeclaration())
408          return true;
409      }
410      return false;
411   }
412 
isVTableHidden(const CXXRecordDecl * RD) const413   bool isVTableHidden(const CXXRecordDecl *RD) const {
414     const auto &VtableLayout =
415             CGM.getItaniumVTableContext().getVTableLayout(RD);
416 
417     for (const auto &VtableComponent : VtableLayout.vtable_components()) {
418       if (VtableComponent.isRTTIKind()) {
419         const CXXRecordDecl *RTTIDecl = VtableComponent.getRTTIDecl();
420         if (RTTIDecl->getVisibility() == Visibility::HiddenVisibility)
421           return true;
422       } else if (VtableComponent.isUsedFunctionPointerKind()) {
423         const CXXMethodDecl *Method = VtableComponent.getFunctionDecl();
424         if (Method->getVisibility() == Visibility::HiddenVisibility &&
425             !Method->isDefined())
426           return true;
427       }
428     }
429     return false;
430   }
431 };
432 
433 class ARMCXXABI : public ItaniumCXXABI {
434 public:
ARMCXXABI(CodeGen::CodeGenModule & CGM)435   ARMCXXABI(CodeGen::CodeGenModule &CGM) :
436     ItaniumCXXABI(CGM, /* UseARMMethodPtrABI = */ true,
437                   /* UseARMGuardVarABI = */ true) {}
438 
HasThisReturn(GlobalDecl GD) const439   bool HasThisReturn(GlobalDecl GD) const override {
440     return (isa<CXXConstructorDecl>(GD.getDecl()) || (
441               isa<CXXDestructorDecl>(GD.getDecl()) &&
442               GD.getDtorType() != Dtor_Deleting));
443   }
444 
445   void EmitReturnFromThunk(CodeGenFunction &CGF, RValue RV,
446                            QualType ResTy) override;
447 
448   CharUnits getArrayCookieSizeImpl(QualType elementType) override;
449   Address InitializeArrayCookie(CodeGenFunction &CGF,
450                                 Address NewPtr,
451                                 llvm::Value *NumElements,
452                                 const CXXNewExpr *expr,
453                                 QualType ElementType) override;
454   llvm::Value *readArrayCookieImpl(CodeGenFunction &CGF, Address allocPtr,
455                                    CharUnits cookieSize) override;
456 };
457 
458 class iOS64CXXABI : public ARMCXXABI {
459 public:
iOS64CXXABI(CodeGen::CodeGenModule & CGM)460   iOS64CXXABI(CodeGen::CodeGenModule &CGM) : ARMCXXABI(CGM) {
461     Use32BitVTableOffsetABI = true;
462   }
463 
464   // ARM64 libraries are prepared for non-unique RTTI.
shouldRTTIBeUnique() const465   bool shouldRTTIBeUnique() const override { return false; }
466 };
467 
468 class WebAssemblyCXXABI final : public ItaniumCXXABI {
469 public:
WebAssemblyCXXABI(CodeGen::CodeGenModule & CGM)470   explicit WebAssemblyCXXABI(CodeGen::CodeGenModule &CGM)
471       : ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true,
472                       /*UseARMGuardVarABI=*/true) {}
473   void emitBeginCatch(CodeGenFunction &CGF, const CXXCatchStmt *C) override;
474 
475 private:
HasThisReturn(GlobalDecl GD) const476   bool HasThisReturn(GlobalDecl GD) const override {
477     return isa<CXXConstructorDecl>(GD.getDecl()) ||
478            (isa<CXXDestructorDecl>(GD.getDecl()) &&
479             GD.getDtorType() != Dtor_Deleting);
480   }
canCallMismatchedFunctionType() const481   bool canCallMismatchedFunctionType() const override { return false; }
482 };
483 }
484 
CreateItaniumCXXABI(CodeGenModule & CGM)485 CodeGen::CGCXXABI *CodeGen::CreateItaniumCXXABI(CodeGenModule &CGM) {
486   switch (CGM.getTarget().getCXXABI().getKind()) {
487   // For IR-generation purposes, there's no significant difference
488   // between the ARM and iOS ABIs.
489   case TargetCXXABI::GenericARM:
490   case TargetCXXABI::iOS:
491   case TargetCXXABI::WatchOS:
492     return new ARMCXXABI(CGM);
493 
494   case TargetCXXABI::iOS64:
495     return new iOS64CXXABI(CGM);
496 
497   // Note that AArch64 uses the generic ItaniumCXXABI class since it doesn't
498   // include the other 32-bit ARM oddities: constructor/destructor return values
499   // and array cookies.
500   case TargetCXXABI::GenericAArch64:
501     return new ItaniumCXXABI(CGM, /* UseARMMethodPtrABI = */ true,
502                              /* UseARMGuardVarABI = */ true);
503 
504   case TargetCXXABI::GenericMIPS:
505     return new ItaniumCXXABI(CGM, /* UseARMMethodPtrABI = */ true);
506 
507   case TargetCXXABI::WebAssembly:
508     return new WebAssemblyCXXABI(CGM);
509 
510   case TargetCXXABI::GenericItanium:
511     if (CGM.getContext().getTargetInfo().getTriple().getArch()
512         == llvm::Triple::le32) {
513       // For PNaCl, use ARM-style method pointers so that PNaCl code
514       // does not assume anything about the alignment of function
515       // pointers.
516       return new ItaniumCXXABI(CGM, /* UseARMMethodPtrABI = */ true,
517                                /* UseARMGuardVarABI = */ false);
518     }
519     return new ItaniumCXXABI(CGM);
520 
521   case TargetCXXABI::Microsoft:
522     llvm_unreachable("Microsoft ABI is not Itanium-based");
523   }
524   llvm_unreachable("bad ABI kind");
525 }
526 
527 llvm::Type *
ConvertMemberPointerType(const MemberPointerType * MPT)528 ItaniumCXXABI::ConvertMemberPointerType(const MemberPointerType *MPT) {
529   if (MPT->isMemberDataPointer())
530     return CGM.PtrDiffTy;
531   return llvm::StructType::get(CGM.PtrDiffTy, CGM.PtrDiffTy);
532 }
533 
534 /// In the Itanium and ARM ABIs, method pointers have the form:
535 ///   struct { ptrdiff_t ptr; ptrdiff_t adj; } memptr;
536 ///
537 /// In the Itanium ABI:
538 ///  - method pointers are virtual if (memptr.ptr & 1) is nonzero
539 ///  - the this-adjustment is (memptr.adj)
540 ///  - the virtual offset is (memptr.ptr - 1)
541 ///
542 /// In the ARM ABI:
543 ///  - method pointers are virtual if (memptr.adj & 1) is nonzero
544 ///  - the this-adjustment is (memptr.adj >> 1)
545 ///  - the virtual offset is (memptr.ptr)
546 /// ARM uses 'adj' for the virtual flag because Thumb functions
547 /// may be only single-byte aligned.
548 ///
549 /// If the member is virtual, the adjusted 'this' pointer points
550 /// to a vtable pointer from which the virtual offset is applied.
551 ///
552 /// If the member is non-virtual, memptr.ptr is the address of
553 /// the function to call.
EmitLoadOfMemberFunctionPointer(CodeGenFunction & CGF,const Expr * E,Address ThisAddr,llvm::Value * & ThisPtrForCall,llvm::Value * MemFnPtr,const MemberPointerType * MPT)554 CGCallee ItaniumCXXABI::EmitLoadOfMemberFunctionPointer(
555     CodeGenFunction &CGF, const Expr *E, Address ThisAddr,
556     llvm::Value *&ThisPtrForCall,
557     llvm::Value *MemFnPtr, const MemberPointerType *MPT) {
558   CGBuilderTy &Builder = CGF.Builder;
559 
560   const FunctionProtoType *FPT =
561     MPT->getPointeeType()->getAs<FunctionProtoType>();
562   const CXXRecordDecl *RD =
563     cast<CXXRecordDecl>(MPT->getClass()->getAs<RecordType>()->getDecl());
564 
565   llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(
566       CGM.getTypes().arrangeCXXMethodType(RD, FPT, /*FD=*/nullptr));
567 
568   llvm::Constant *ptrdiff_1 = llvm::ConstantInt::get(CGM.PtrDiffTy, 1);
569 
570   llvm::BasicBlock *FnVirtual = CGF.createBasicBlock("memptr.virtual");
571   llvm::BasicBlock *FnNonVirtual = CGF.createBasicBlock("memptr.nonvirtual");
572   llvm::BasicBlock *FnEnd = CGF.createBasicBlock("memptr.end");
573 
574   // Extract memptr.adj, which is in the second field.
575   llvm::Value *RawAdj = Builder.CreateExtractValue(MemFnPtr, 1, "memptr.adj");
576 
577   // Compute the true adjustment.
578   llvm::Value *Adj = RawAdj;
579   if (UseARMMethodPtrABI)
580     Adj = Builder.CreateAShr(Adj, ptrdiff_1, "memptr.adj.shifted");
581 
582   // Apply the adjustment and cast back to the original struct type
583   // for consistency.
584   llvm::Value *This = ThisAddr.getPointer();
585   llvm::Value *Ptr = Builder.CreateBitCast(This, Builder.getInt8PtrTy());
586   Ptr = Builder.CreateInBoundsGEP(Ptr, Adj);
587   This = Builder.CreateBitCast(Ptr, This->getType(), "this.adjusted");
588   ThisPtrForCall = This;
589 
590   // Load the function pointer.
591   llvm::Value *FnAsInt = Builder.CreateExtractValue(MemFnPtr, 0, "memptr.ptr");
592 
593   // If the LSB in the function pointer is 1, the function pointer points to
594   // a virtual function.
595   llvm::Value *IsVirtual;
596   if (UseARMMethodPtrABI)
597     IsVirtual = Builder.CreateAnd(RawAdj, ptrdiff_1);
598   else
599     IsVirtual = Builder.CreateAnd(FnAsInt, ptrdiff_1);
600   IsVirtual = Builder.CreateIsNotNull(IsVirtual, "memptr.isvirtual");
601   Builder.CreateCondBr(IsVirtual, FnVirtual, FnNonVirtual);
602 
603   // In the virtual path, the adjustment left 'This' pointing to the
604   // vtable of the correct base subobject.  The "function pointer" is an
605   // offset within the vtable (+1 for the virtual flag on non-ARM).
606   CGF.EmitBlock(FnVirtual);
607 
608   // Cast the adjusted this to a pointer to vtable pointer and load.
609   llvm::Type *VTableTy = Builder.getInt8PtrTy();
610   CharUnits VTablePtrAlign =
611     CGF.CGM.getDynamicOffsetAlignment(ThisAddr.getAlignment(), RD,
612                                       CGF.getPointerAlign());
613   llvm::Value *VTable =
614     CGF.GetVTablePtr(Address(This, VTablePtrAlign), VTableTy, RD);
615 
616   // Apply the offset.
617   // On ARM64, to reserve extra space in virtual member function pointers,
618   // we only pay attention to the low 32 bits of the offset.
619   llvm::Value *VTableOffset = FnAsInt;
620   if (!UseARMMethodPtrABI)
621     VTableOffset = Builder.CreateSub(VTableOffset, ptrdiff_1);
622   if (Use32BitVTableOffsetABI) {
623     VTableOffset = Builder.CreateTrunc(VTableOffset, CGF.Int32Ty);
624     VTableOffset = Builder.CreateZExt(VTableOffset, CGM.PtrDiffTy);
625   }
626   // Compute the address of the virtual function pointer.
627   llvm::Value *VFPAddr = Builder.CreateGEP(VTable, VTableOffset);
628 
629   // Check the address of the function pointer if CFI on member function
630   // pointers is enabled.
631   llvm::Constant *CheckSourceLocation;
632   llvm::Constant *CheckTypeDesc;
633   bool ShouldEmitCFICheck = CGF.SanOpts.has(SanitizerKind::CFIMFCall) &&
634                             CGM.HasHiddenLTOVisibility(RD);
635   if (ShouldEmitCFICheck) {
636     CodeGenFunction::SanitizerScope SanScope(&CGF);
637 
638     CheckSourceLocation = CGF.EmitCheckSourceLocation(E->getBeginLoc());
639     CheckTypeDesc = CGF.EmitCheckTypeDescriptor(QualType(MPT, 0));
640     llvm::Constant *StaticData[] = {
641         llvm::ConstantInt::get(CGF.Int8Ty, CodeGenFunction::CFITCK_VMFCall),
642         CheckSourceLocation,
643         CheckTypeDesc,
644     };
645 
646     llvm::Metadata *MD =
647         CGM.CreateMetadataIdentifierForVirtualMemPtrType(QualType(MPT, 0));
648     llvm::Value *TypeId = llvm::MetadataAsValue::get(CGF.getLLVMContext(), MD);
649 
650     llvm::Value *TypeTest = Builder.CreateCall(
651         CGM.getIntrinsic(llvm::Intrinsic::type_test), {VFPAddr, TypeId});
652 
653     if (CGM.getCodeGenOpts().SanitizeTrap.has(SanitizerKind::CFIMFCall)) {
654       CGF.EmitTrapCheck(TypeTest);
655     } else {
656       llvm::Value *AllVtables = llvm::MetadataAsValue::get(
657           CGM.getLLVMContext(),
658           llvm::MDString::get(CGM.getLLVMContext(), "all-vtables"));
659       llvm::Value *ValidVtable = Builder.CreateCall(
660           CGM.getIntrinsic(llvm::Intrinsic::type_test), {VTable, AllVtables});
661       CGF.EmitCheck(std::make_pair(TypeTest, SanitizerKind::CFIMFCall),
662                     SanitizerHandler::CFICheckFail, StaticData,
663                     {VTable, ValidVtable});
664     }
665 
666     FnVirtual = Builder.GetInsertBlock();
667   }
668 
669   // Load the virtual function to call.
670   VFPAddr = Builder.CreateBitCast(VFPAddr, FTy->getPointerTo()->getPointerTo());
671   llvm::Value *VirtualFn = Builder.CreateAlignedLoad(
672       VFPAddr, CGF.getPointerAlign(), "memptr.virtualfn");
673   CGF.EmitBranch(FnEnd);
674 
675   // In the non-virtual path, the function pointer is actually a
676   // function pointer.
677   CGF.EmitBlock(FnNonVirtual);
678   llvm::Value *NonVirtualFn =
679     Builder.CreateIntToPtr(FnAsInt, FTy->getPointerTo(), "memptr.nonvirtualfn");
680 
681   // Check the function pointer if CFI on member function pointers is enabled.
682   if (ShouldEmitCFICheck) {
683     CXXRecordDecl *RD = MPT->getClass()->getAsCXXRecordDecl();
684     if (RD->hasDefinition()) {
685       CodeGenFunction::SanitizerScope SanScope(&CGF);
686 
687       llvm::Constant *StaticData[] = {
688           llvm::ConstantInt::get(CGF.Int8Ty, CodeGenFunction::CFITCK_NVMFCall),
689           CheckSourceLocation,
690           CheckTypeDesc,
691       };
692 
693       llvm::Value *Bit = Builder.getFalse();
694       llvm::Value *CastedNonVirtualFn =
695           Builder.CreateBitCast(NonVirtualFn, CGF.Int8PtrTy);
696       for (const CXXRecordDecl *Base : CGM.getMostBaseClasses(RD)) {
697         llvm::Metadata *MD = CGM.CreateMetadataIdentifierForType(
698             getContext().getMemberPointerType(
699                 MPT->getPointeeType(),
700                 getContext().getRecordType(Base).getTypePtr()));
701         llvm::Value *TypeId =
702             llvm::MetadataAsValue::get(CGF.getLLVMContext(), MD);
703 
704         llvm::Value *TypeTest =
705             Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::type_test),
706                                {CastedNonVirtualFn, TypeId});
707         Bit = Builder.CreateOr(Bit, TypeTest);
708       }
709 
710       CGF.EmitCheck(std::make_pair(Bit, SanitizerKind::CFIMFCall),
711                     SanitizerHandler::CFICheckFail, StaticData,
712                     {CastedNonVirtualFn, llvm::UndefValue::get(CGF.IntPtrTy)});
713 
714       FnNonVirtual = Builder.GetInsertBlock();
715     }
716   }
717 
718   // We're done.
719   CGF.EmitBlock(FnEnd);
720   llvm::PHINode *CalleePtr = Builder.CreatePHI(FTy->getPointerTo(), 2);
721   CalleePtr->addIncoming(VirtualFn, FnVirtual);
722   CalleePtr->addIncoming(NonVirtualFn, FnNonVirtual);
723 
724   CGCallee Callee(FPT, CalleePtr);
725   return Callee;
726 }
727 
728 /// Compute an l-value by applying the given pointer-to-member to a
729 /// base object.
EmitMemberDataPointerAddress(CodeGenFunction & CGF,const Expr * E,Address Base,llvm::Value * MemPtr,const MemberPointerType * MPT)730 llvm::Value *ItaniumCXXABI::EmitMemberDataPointerAddress(
731     CodeGenFunction &CGF, const Expr *E, Address Base, llvm::Value *MemPtr,
732     const MemberPointerType *MPT) {
733   assert(MemPtr->getType() == CGM.PtrDiffTy);
734 
735   CGBuilderTy &Builder = CGF.Builder;
736 
737   // Cast to char*.
738   Base = Builder.CreateElementBitCast(Base, CGF.Int8Ty);
739 
740   // Apply the offset, which we assume is non-null.
741   llvm::Value *Addr =
742     Builder.CreateInBoundsGEP(Base.getPointer(), MemPtr, "memptr.offset");
743 
744   // Cast the address to the appropriate pointer type, adopting the
745   // address space of the base pointer.
746   llvm::Type *PType = CGF.ConvertTypeForMem(MPT->getPointeeType())
747                             ->getPointerTo(Base.getAddressSpace());
748   return Builder.CreateBitCast(Addr, PType);
749 }
750 
751 /// Perform a bitcast, derived-to-base, or base-to-derived member pointer
752 /// conversion.
753 ///
754 /// Bitcast conversions are always a no-op under Itanium.
755 ///
756 /// Obligatory offset/adjustment diagram:
757 ///         <-- offset -->          <-- adjustment -->
758 ///   |--------------------------|----------------------|--------------------|
759 ///   ^Derived address point     ^Base address point    ^Member address point
760 ///
761 /// So when converting a base member pointer to a derived member pointer,
762 /// we add the offset to the adjustment because the address point has
763 /// decreased;  and conversely, when converting a derived MP to a base MP
764 /// we subtract the offset from the adjustment because the address point
765 /// has increased.
766 ///
767 /// The standard forbids (at compile time) conversion to and from
768 /// virtual bases, which is why we don't have to consider them here.
769 ///
770 /// The standard forbids (at run time) casting a derived MP to a base
771 /// MP when the derived MP does not point to a member of the base.
772 /// This is why -1 is a reasonable choice for null data member
773 /// pointers.
774 llvm::Value *
EmitMemberPointerConversion(CodeGenFunction & CGF,const CastExpr * E,llvm::Value * src)775 ItaniumCXXABI::EmitMemberPointerConversion(CodeGenFunction &CGF,
776                                            const CastExpr *E,
777                                            llvm::Value *src) {
778   assert(E->getCastKind() == CK_DerivedToBaseMemberPointer ||
779          E->getCastKind() == CK_BaseToDerivedMemberPointer ||
780          E->getCastKind() == CK_ReinterpretMemberPointer);
781 
782   // Under Itanium, reinterprets don't require any additional processing.
783   if (E->getCastKind() == CK_ReinterpretMemberPointer) return src;
784 
785   // Use constant emission if we can.
786   if (isa<llvm::Constant>(src))
787     return EmitMemberPointerConversion(E, cast<llvm::Constant>(src));
788 
789   llvm::Constant *adj = getMemberPointerAdjustment(E);
790   if (!adj) return src;
791 
792   CGBuilderTy &Builder = CGF.Builder;
793   bool isDerivedToBase = (E->getCastKind() == CK_DerivedToBaseMemberPointer);
794 
795   const MemberPointerType *destTy =
796     E->getType()->castAs<MemberPointerType>();
797 
798   // For member data pointers, this is just a matter of adding the
799   // offset if the source is non-null.
800   if (destTy->isMemberDataPointer()) {
801     llvm::Value *dst;
802     if (isDerivedToBase)
803       dst = Builder.CreateNSWSub(src, adj, "adj");
804     else
805       dst = Builder.CreateNSWAdd(src, adj, "adj");
806 
807     // Null check.
808     llvm::Value *null = llvm::Constant::getAllOnesValue(src->getType());
809     llvm::Value *isNull = Builder.CreateICmpEQ(src, null, "memptr.isnull");
810     return Builder.CreateSelect(isNull, src, dst);
811   }
812 
813   // The this-adjustment is left-shifted by 1 on ARM.
814   if (UseARMMethodPtrABI) {
815     uint64_t offset = cast<llvm::ConstantInt>(adj)->getZExtValue();
816     offset <<= 1;
817     adj = llvm::ConstantInt::get(adj->getType(), offset);
818   }
819 
820   llvm::Value *srcAdj = Builder.CreateExtractValue(src, 1, "src.adj");
821   llvm::Value *dstAdj;
822   if (isDerivedToBase)
823     dstAdj = Builder.CreateNSWSub(srcAdj, adj, "adj");
824   else
825     dstAdj = Builder.CreateNSWAdd(srcAdj, adj, "adj");
826 
827   return Builder.CreateInsertValue(src, dstAdj, 1);
828 }
829 
830 llvm::Constant *
EmitMemberPointerConversion(const CastExpr * E,llvm::Constant * src)831 ItaniumCXXABI::EmitMemberPointerConversion(const CastExpr *E,
832                                            llvm::Constant *src) {
833   assert(E->getCastKind() == CK_DerivedToBaseMemberPointer ||
834          E->getCastKind() == CK_BaseToDerivedMemberPointer ||
835          E->getCastKind() == CK_ReinterpretMemberPointer);
836 
837   // Under Itanium, reinterprets don't require any additional processing.
838   if (E->getCastKind() == CK_ReinterpretMemberPointer) return src;
839 
840   // If the adjustment is trivial, we don't need to do anything.
841   llvm::Constant *adj = getMemberPointerAdjustment(E);
842   if (!adj) return src;
843 
844   bool isDerivedToBase = (E->getCastKind() == CK_DerivedToBaseMemberPointer);
845 
846   const MemberPointerType *destTy =
847     E->getType()->castAs<MemberPointerType>();
848 
849   // For member data pointers, this is just a matter of adding the
850   // offset if the source is non-null.
851   if (destTy->isMemberDataPointer()) {
852     // null maps to null.
853     if (src->isAllOnesValue()) return src;
854 
855     if (isDerivedToBase)
856       return llvm::ConstantExpr::getNSWSub(src, adj);
857     else
858       return llvm::ConstantExpr::getNSWAdd(src, adj);
859   }
860 
861   // The this-adjustment is left-shifted by 1 on ARM.
862   if (UseARMMethodPtrABI) {
863     uint64_t offset = cast<llvm::ConstantInt>(adj)->getZExtValue();
864     offset <<= 1;
865     adj = llvm::ConstantInt::get(adj->getType(), offset);
866   }
867 
868   llvm::Constant *srcAdj = llvm::ConstantExpr::getExtractValue(src, 1);
869   llvm::Constant *dstAdj;
870   if (isDerivedToBase)
871     dstAdj = llvm::ConstantExpr::getNSWSub(srcAdj, adj);
872   else
873     dstAdj = llvm::ConstantExpr::getNSWAdd(srcAdj, adj);
874 
875   return llvm::ConstantExpr::getInsertValue(src, dstAdj, 1);
876 }
877 
878 llvm::Constant *
EmitNullMemberPointer(const MemberPointerType * MPT)879 ItaniumCXXABI::EmitNullMemberPointer(const MemberPointerType *MPT) {
880   // Itanium C++ ABI 2.3:
881   //   A NULL pointer is represented as -1.
882   if (MPT->isMemberDataPointer())
883     return llvm::ConstantInt::get(CGM.PtrDiffTy, -1ULL, /*isSigned=*/true);
884 
885   llvm::Constant *Zero = llvm::ConstantInt::get(CGM.PtrDiffTy, 0);
886   llvm::Constant *Values[2] = { Zero, Zero };
887   return llvm::ConstantStruct::getAnon(Values);
888 }
889 
890 llvm::Constant *
EmitMemberDataPointer(const MemberPointerType * MPT,CharUnits offset)891 ItaniumCXXABI::EmitMemberDataPointer(const MemberPointerType *MPT,
892                                      CharUnits offset) {
893   // Itanium C++ ABI 2.3:
894   //   A pointer to data member is an offset from the base address of
895   //   the class object containing it, represented as a ptrdiff_t
896   return llvm::ConstantInt::get(CGM.PtrDiffTy, offset.getQuantity());
897 }
898 
899 llvm::Constant *
EmitMemberFunctionPointer(const CXXMethodDecl * MD)900 ItaniumCXXABI::EmitMemberFunctionPointer(const CXXMethodDecl *MD) {
901   return BuildMemberPointer(MD, CharUnits::Zero());
902 }
903 
BuildMemberPointer(const CXXMethodDecl * MD,CharUnits ThisAdjustment)904 llvm::Constant *ItaniumCXXABI::BuildMemberPointer(const CXXMethodDecl *MD,
905                                                   CharUnits ThisAdjustment) {
906   assert(MD->isInstance() && "Member function must not be static!");
907 
908   CodeGenTypes &Types = CGM.getTypes();
909 
910   // Get the function pointer (or index if this is a virtual function).
911   llvm::Constant *MemPtr[2];
912   if (MD->isVirtual()) {
913     uint64_t Index = CGM.getItaniumVTableContext().getMethodVTableIndex(MD);
914 
915     const ASTContext &Context = getContext();
916     CharUnits PointerWidth =
917       Context.toCharUnitsFromBits(Context.getTargetInfo().getPointerWidth(0));
918     uint64_t VTableOffset = (Index * PointerWidth.getQuantity());
919 
920     if (UseARMMethodPtrABI) {
921       // ARM C++ ABI 3.2.1:
922       //   This ABI specifies that adj contains twice the this
923       //   adjustment, plus 1 if the member function is virtual. The
924       //   least significant bit of adj then makes exactly the same
925       //   discrimination as the least significant bit of ptr does for
926       //   Itanium.
927       MemPtr[0] = llvm::ConstantInt::get(CGM.PtrDiffTy, VTableOffset);
928       MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy,
929                                          2 * ThisAdjustment.getQuantity() + 1);
930     } else {
931       // Itanium C++ ABI 2.3:
932       //   For a virtual function, [the pointer field] is 1 plus the
933       //   virtual table offset (in bytes) of the function,
934       //   represented as a ptrdiff_t.
935       MemPtr[0] = llvm::ConstantInt::get(CGM.PtrDiffTy, VTableOffset + 1);
936       MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy,
937                                          ThisAdjustment.getQuantity());
938     }
939   } else {
940     const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
941     llvm::Type *Ty;
942     // Check whether the function has a computable LLVM signature.
943     if (Types.isFuncTypeConvertible(FPT)) {
944       // The function has a computable LLVM signature; use the correct type.
945       Ty = Types.GetFunctionType(Types.arrangeCXXMethodDeclaration(MD));
946     } else {
947       // Use an arbitrary non-function type to tell GetAddrOfFunction that the
948       // function type is incomplete.
949       Ty = CGM.PtrDiffTy;
950     }
951     llvm::Constant *addr = CGM.GetAddrOfFunction(MD, Ty);
952 
953     MemPtr[0] = llvm::ConstantExpr::getPtrToInt(addr, CGM.PtrDiffTy);
954     MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy,
955                                        (UseARMMethodPtrABI ? 2 : 1) *
956                                        ThisAdjustment.getQuantity());
957   }
958 
959   return llvm::ConstantStruct::getAnon(MemPtr);
960 }
961 
EmitMemberPointer(const APValue & MP,QualType MPType)962 llvm::Constant *ItaniumCXXABI::EmitMemberPointer(const APValue &MP,
963                                                  QualType MPType) {
964   const MemberPointerType *MPT = MPType->castAs<MemberPointerType>();
965   const ValueDecl *MPD = MP.getMemberPointerDecl();
966   if (!MPD)
967     return EmitNullMemberPointer(MPT);
968 
969   CharUnits ThisAdjustment = getMemberPointerPathAdjustment(MP);
970 
971   if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(MPD))
972     return BuildMemberPointer(MD, ThisAdjustment);
973 
974   CharUnits FieldOffset =
975     getContext().toCharUnitsFromBits(getContext().getFieldOffset(MPD));
976   return EmitMemberDataPointer(MPT, ThisAdjustment + FieldOffset);
977 }
978 
979 /// The comparison algorithm is pretty easy: the member pointers are
980 /// the same if they're either bitwise identical *or* both null.
981 ///
982 /// ARM is different here only because null-ness is more complicated.
983 llvm::Value *
EmitMemberPointerComparison(CodeGenFunction & CGF,llvm::Value * L,llvm::Value * R,const MemberPointerType * MPT,bool Inequality)984 ItaniumCXXABI::EmitMemberPointerComparison(CodeGenFunction &CGF,
985                                            llvm::Value *L,
986                                            llvm::Value *R,
987                                            const MemberPointerType *MPT,
988                                            bool Inequality) {
989   CGBuilderTy &Builder = CGF.Builder;
990 
991   llvm::ICmpInst::Predicate Eq;
992   llvm::Instruction::BinaryOps And, Or;
993   if (Inequality) {
994     Eq = llvm::ICmpInst::ICMP_NE;
995     And = llvm::Instruction::Or;
996     Or = llvm::Instruction::And;
997   } else {
998     Eq = llvm::ICmpInst::ICMP_EQ;
999     And = llvm::Instruction::And;
1000     Or = llvm::Instruction::Or;
1001   }
1002 
1003   // Member data pointers are easy because there's a unique null
1004   // value, so it just comes down to bitwise equality.
1005   if (MPT->isMemberDataPointer())
1006     return Builder.CreateICmp(Eq, L, R);
1007 
1008   // For member function pointers, the tautologies are more complex.
1009   // The Itanium tautology is:
1010   //   (L == R) <==> (L.ptr == R.ptr && (L.ptr == 0 || L.adj == R.adj))
1011   // The ARM tautology is:
1012   //   (L == R) <==> (L.ptr == R.ptr &&
1013   //                  (L.adj == R.adj ||
1014   //                   (L.ptr == 0 && ((L.adj|R.adj) & 1) == 0)))
1015   // The inequality tautologies have exactly the same structure, except
1016   // applying De Morgan's laws.
1017 
1018   llvm::Value *LPtr = Builder.CreateExtractValue(L, 0, "lhs.memptr.ptr");
1019   llvm::Value *RPtr = Builder.CreateExtractValue(R, 0, "rhs.memptr.ptr");
1020 
1021   // This condition tests whether L.ptr == R.ptr.  This must always be
1022   // true for equality to hold.
1023   llvm::Value *PtrEq = Builder.CreateICmp(Eq, LPtr, RPtr, "cmp.ptr");
1024 
1025   // This condition, together with the assumption that L.ptr == R.ptr,
1026   // tests whether the pointers are both null.  ARM imposes an extra
1027   // condition.
1028   llvm::Value *Zero = llvm::Constant::getNullValue(LPtr->getType());
1029   llvm::Value *EqZero = Builder.CreateICmp(Eq, LPtr, Zero, "cmp.ptr.null");
1030 
1031   // This condition tests whether L.adj == R.adj.  If this isn't
1032   // true, the pointers are unequal unless they're both null.
1033   llvm::Value *LAdj = Builder.CreateExtractValue(L, 1, "lhs.memptr.adj");
1034   llvm::Value *RAdj = Builder.CreateExtractValue(R, 1, "rhs.memptr.adj");
1035   llvm::Value *AdjEq = Builder.CreateICmp(Eq, LAdj, RAdj, "cmp.adj");
1036 
1037   // Null member function pointers on ARM clear the low bit of Adj,
1038   // so the zero condition has to check that neither low bit is set.
1039   if (UseARMMethodPtrABI) {
1040     llvm::Value *One = llvm::ConstantInt::get(LPtr->getType(), 1);
1041 
1042     // Compute (l.adj | r.adj) & 1 and test it against zero.
1043     llvm::Value *OrAdj = Builder.CreateOr(LAdj, RAdj, "or.adj");
1044     llvm::Value *OrAdjAnd1 = Builder.CreateAnd(OrAdj, One);
1045     llvm::Value *OrAdjAnd1EqZero = Builder.CreateICmp(Eq, OrAdjAnd1, Zero,
1046                                                       "cmp.or.adj");
1047     EqZero = Builder.CreateBinOp(And, EqZero, OrAdjAnd1EqZero);
1048   }
1049 
1050   // Tie together all our conditions.
1051   llvm::Value *Result = Builder.CreateBinOp(Or, EqZero, AdjEq);
1052   Result = Builder.CreateBinOp(And, PtrEq, Result,
1053                                Inequality ? "memptr.ne" : "memptr.eq");
1054   return Result;
1055 }
1056 
1057 llvm::Value *
EmitMemberPointerIsNotNull(CodeGenFunction & CGF,llvm::Value * MemPtr,const MemberPointerType * MPT)1058 ItaniumCXXABI::EmitMemberPointerIsNotNull(CodeGenFunction &CGF,
1059                                           llvm::Value *MemPtr,
1060                                           const MemberPointerType *MPT) {
1061   CGBuilderTy &Builder = CGF.Builder;
1062 
1063   /// For member data pointers, this is just a check against -1.
1064   if (MPT->isMemberDataPointer()) {
1065     assert(MemPtr->getType() == CGM.PtrDiffTy);
1066     llvm::Value *NegativeOne =
1067       llvm::Constant::getAllOnesValue(MemPtr->getType());
1068     return Builder.CreateICmpNE(MemPtr, NegativeOne, "memptr.tobool");
1069   }
1070 
1071   // In Itanium, a member function pointer is not null if 'ptr' is not null.
1072   llvm::Value *Ptr = Builder.CreateExtractValue(MemPtr, 0, "memptr.ptr");
1073 
1074   llvm::Constant *Zero = llvm::ConstantInt::get(Ptr->getType(), 0);
1075   llvm::Value *Result = Builder.CreateICmpNE(Ptr, Zero, "memptr.tobool");
1076 
1077   // On ARM, a member function pointer is also non-null if the low bit of 'adj'
1078   // (the virtual bit) is set.
1079   if (UseARMMethodPtrABI) {
1080     llvm::Constant *One = llvm::ConstantInt::get(Ptr->getType(), 1);
1081     llvm::Value *Adj = Builder.CreateExtractValue(MemPtr, 1, "memptr.adj");
1082     llvm::Value *VirtualBit = Builder.CreateAnd(Adj, One, "memptr.virtualbit");
1083     llvm::Value *IsVirtual = Builder.CreateICmpNE(VirtualBit, Zero,
1084                                                   "memptr.isvirtual");
1085     Result = Builder.CreateOr(Result, IsVirtual);
1086   }
1087 
1088   return Result;
1089 }
1090 
classifyReturnType(CGFunctionInfo & FI) const1091 bool ItaniumCXXABI::classifyReturnType(CGFunctionInfo &FI) const {
1092   const CXXRecordDecl *RD = FI.getReturnType()->getAsCXXRecordDecl();
1093   if (!RD)
1094     return false;
1095 
1096   // If C++ prohibits us from making a copy, return by address.
1097   if (passClassIndirect(RD)) {
1098     auto Align = CGM.getContext().getTypeAlignInChars(FI.getReturnType());
1099     FI.getReturnInfo() = ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
1100     return true;
1101   }
1102   return false;
1103 }
1104 
1105 /// The Itanium ABI requires non-zero initialization only for data
1106 /// member pointers, for which '0' is a valid offset.
isZeroInitializable(const MemberPointerType * MPT)1107 bool ItaniumCXXABI::isZeroInitializable(const MemberPointerType *MPT) {
1108   return MPT->isMemberFunctionPointer();
1109 }
1110 
1111 /// The Itanium ABI always places an offset to the complete object
1112 /// at entry -2 in the vtable.
emitVirtualObjectDelete(CodeGenFunction & CGF,const CXXDeleteExpr * DE,Address Ptr,QualType ElementType,const CXXDestructorDecl * Dtor)1113 void ItaniumCXXABI::emitVirtualObjectDelete(CodeGenFunction &CGF,
1114                                             const CXXDeleteExpr *DE,
1115                                             Address Ptr,
1116                                             QualType ElementType,
1117                                             const CXXDestructorDecl *Dtor) {
1118   bool UseGlobalDelete = DE->isGlobalDelete();
1119   if (UseGlobalDelete) {
1120     // Derive the complete-object pointer, which is what we need
1121     // to pass to the deallocation function.
1122 
1123     // Grab the vtable pointer as an intptr_t*.
1124     auto *ClassDecl =
1125         cast<CXXRecordDecl>(ElementType->getAs<RecordType>()->getDecl());
1126     llvm::Value *VTable =
1127         CGF.GetVTablePtr(Ptr, CGF.IntPtrTy->getPointerTo(), ClassDecl);
1128 
1129     // Track back to entry -2 and pull out the offset there.
1130     llvm::Value *OffsetPtr = CGF.Builder.CreateConstInBoundsGEP1_64(
1131         VTable, -2, "complete-offset.ptr");
1132     llvm::Value *Offset =
1133       CGF.Builder.CreateAlignedLoad(OffsetPtr, CGF.getPointerAlign());
1134 
1135     // Apply the offset.
1136     llvm::Value *CompletePtr =
1137       CGF.Builder.CreateBitCast(Ptr.getPointer(), CGF.Int8PtrTy);
1138     CompletePtr = CGF.Builder.CreateInBoundsGEP(CompletePtr, Offset);
1139 
1140     // If we're supposed to call the global delete, make sure we do so
1141     // even if the destructor throws.
1142     CGF.pushCallObjectDeleteCleanup(DE->getOperatorDelete(), CompletePtr,
1143                                     ElementType);
1144   }
1145 
1146   // FIXME: Provide a source location here even though there's no
1147   // CXXMemberCallExpr for dtor call.
1148   CXXDtorType DtorType = UseGlobalDelete ? Dtor_Complete : Dtor_Deleting;
1149   EmitVirtualDestructorCall(CGF, Dtor, DtorType, Ptr, /*CE=*/nullptr);
1150 
1151   if (UseGlobalDelete)
1152     CGF.PopCleanupBlock();
1153 }
1154 
emitRethrow(CodeGenFunction & CGF,bool isNoReturn)1155 void ItaniumCXXABI::emitRethrow(CodeGenFunction &CGF, bool isNoReturn) {
1156   // void __cxa_rethrow();
1157 
1158   llvm::FunctionType *FTy =
1159     llvm::FunctionType::get(CGM.VoidTy, /*IsVarArgs=*/false);
1160 
1161   llvm::Constant *Fn = CGM.CreateRuntimeFunction(FTy, "__cxa_rethrow");
1162 
1163   if (isNoReturn)
1164     CGF.EmitNoreturnRuntimeCallOrInvoke(Fn, None);
1165   else
1166     CGF.EmitRuntimeCallOrInvoke(Fn);
1167 }
1168 
getAllocateExceptionFn(CodeGenModule & CGM)1169 static llvm::Constant *getAllocateExceptionFn(CodeGenModule &CGM) {
1170   // void *__cxa_allocate_exception(size_t thrown_size);
1171 
1172   llvm::FunctionType *FTy =
1173     llvm::FunctionType::get(CGM.Int8PtrTy, CGM.SizeTy, /*IsVarArgs=*/false);
1174 
1175   return CGM.CreateRuntimeFunction(FTy, "__cxa_allocate_exception");
1176 }
1177 
getThrowFn(CodeGenModule & CGM)1178 static llvm::Constant *getThrowFn(CodeGenModule &CGM) {
1179   // void __cxa_throw(void *thrown_exception, std::type_info *tinfo,
1180   //                  void (*dest) (void *));
1181 
1182   llvm::Type *Args[3] = { CGM.Int8PtrTy, CGM.Int8PtrTy, CGM.Int8PtrTy };
1183   llvm::FunctionType *FTy =
1184     llvm::FunctionType::get(CGM.VoidTy, Args, /*IsVarArgs=*/false);
1185 
1186   return CGM.CreateRuntimeFunction(FTy, "__cxa_throw");
1187 }
1188 
emitThrow(CodeGenFunction & CGF,const CXXThrowExpr * E)1189 void ItaniumCXXABI::emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) {
1190   QualType ThrowType = E->getSubExpr()->getType();
1191   // Now allocate the exception object.
1192   llvm::Type *SizeTy = CGF.ConvertType(getContext().getSizeType());
1193   uint64_t TypeSize = getContext().getTypeSizeInChars(ThrowType).getQuantity();
1194 
1195   llvm::Constant *AllocExceptionFn = getAllocateExceptionFn(CGM);
1196   llvm::CallInst *ExceptionPtr = CGF.EmitNounwindRuntimeCall(
1197       AllocExceptionFn, llvm::ConstantInt::get(SizeTy, TypeSize), "exception");
1198 
1199   CharUnits ExnAlign = getAlignmentOfExnObject();
1200   CGF.EmitAnyExprToExn(E->getSubExpr(), Address(ExceptionPtr, ExnAlign));
1201 
1202   // Now throw the exception.
1203   llvm::Constant *TypeInfo = CGM.GetAddrOfRTTIDescriptor(ThrowType,
1204                                                          /*ForEH=*/true);
1205 
1206   // The address of the destructor.  If the exception type has a
1207   // trivial destructor (or isn't a record), we just pass null.
1208   llvm::Constant *Dtor = nullptr;
1209   if (const RecordType *RecordTy = ThrowType->getAs<RecordType>()) {
1210     CXXRecordDecl *Record = cast<CXXRecordDecl>(RecordTy->getDecl());
1211     if (!Record->hasTrivialDestructor()) {
1212       CXXDestructorDecl *DtorD = Record->getDestructor();
1213       Dtor = CGM.getAddrOfCXXStructor(DtorD, StructorType::Complete);
1214       Dtor = llvm::ConstantExpr::getBitCast(Dtor, CGM.Int8PtrTy);
1215     }
1216   }
1217   if (!Dtor) Dtor = llvm::Constant::getNullValue(CGM.Int8PtrTy);
1218 
1219   llvm::Value *args[] = { ExceptionPtr, TypeInfo, Dtor };
1220   CGF.EmitNoreturnRuntimeCallOrInvoke(getThrowFn(CGM), args);
1221 }
1222 
getItaniumDynamicCastFn(CodeGenFunction & CGF)1223 static llvm::Constant *getItaniumDynamicCastFn(CodeGenFunction &CGF) {
1224   // void *__dynamic_cast(const void *sub,
1225   //                      const abi::__class_type_info *src,
1226   //                      const abi::__class_type_info *dst,
1227   //                      std::ptrdiff_t src2dst_offset);
1228 
1229   llvm::Type *Int8PtrTy = CGF.Int8PtrTy;
1230   llvm::Type *PtrDiffTy =
1231     CGF.ConvertType(CGF.getContext().getPointerDiffType());
1232 
1233   llvm::Type *Args[4] = { Int8PtrTy, Int8PtrTy, Int8PtrTy, PtrDiffTy };
1234 
1235   llvm::FunctionType *FTy = llvm::FunctionType::get(Int8PtrTy, Args, false);
1236 
1237   // Mark the function as nounwind readonly.
1238   llvm::Attribute::AttrKind FuncAttrs[] = { llvm::Attribute::NoUnwind,
1239                                             llvm::Attribute::ReadOnly };
1240   llvm::AttributeList Attrs = llvm::AttributeList::get(
1241       CGF.getLLVMContext(), llvm::AttributeList::FunctionIndex, FuncAttrs);
1242 
1243   return CGF.CGM.CreateRuntimeFunction(FTy, "__dynamic_cast", Attrs);
1244 }
1245 
getBadCastFn(CodeGenFunction & CGF)1246 static llvm::Constant *getBadCastFn(CodeGenFunction &CGF) {
1247   // void __cxa_bad_cast();
1248   llvm::FunctionType *FTy = llvm::FunctionType::get(CGF.VoidTy, false);
1249   return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_cast");
1250 }
1251 
1252 /// Compute the src2dst_offset hint as described in the
1253 /// Itanium C++ ABI [2.9.7]
computeOffsetHint(ASTContext & Context,const CXXRecordDecl * Src,const CXXRecordDecl * Dst)1254 static CharUnits computeOffsetHint(ASTContext &Context,
1255                                    const CXXRecordDecl *Src,
1256                                    const CXXRecordDecl *Dst) {
1257   CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true,
1258                      /*DetectVirtual=*/false);
1259 
1260   // If Dst is not derived from Src we can skip the whole computation below and
1261   // return that Src is not a public base of Dst.  Record all inheritance paths.
1262   if (!Dst->isDerivedFrom(Src, Paths))
1263     return CharUnits::fromQuantity(-2ULL);
1264 
1265   unsigned NumPublicPaths = 0;
1266   CharUnits Offset;
1267 
1268   // Now walk all possible inheritance paths.
1269   for (const CXXBasePath &Path : Paths) {
1270     if (Path.Access != AS_public)  // Ignore non-public inheritance.
1271       continue;
1272 
1273     ++NumPublicPaths;
1274 
1275     for (const CXXBasePathElement &PathElement : Path) {
1276       // If the path contains a virtual base class we can't give any hint.
1277       // -1: no hint.
1278       if (PathElement.Base->isVirtual())
1279         return CharUnits::fromQuantity(-1ULL);
1280 
1281       if (NumPublicPaths > 1) // Won't use offsets, skip computation.
1282         continue;
1283 
1284       // Accumulate the base class offsets.
1285       const ASTRecordLayout &L = Context.getASTRecordLayout(PathElement.Class);
1286       Offset += L.getBaseClassOffset(
1287           PathElement.Base->getType()->getAsCXXRecordDecl());
1288     }
1289   }
1290 
1291   // -2: Src is not a public base of Dst.
1292   if (NumPublicPaths == 0)
1293     return CharUnits::fromQuantity(-2ULL);
1294 
1295   // -3: Src is a multiple public base type but never a virtual base type.
1296   if (NumPublicPaths > 1)
1297     return CharUnits::fromQuantity(-3ULL);
1298 
1299   // Otherwise, the Src type is a unique public nonvirtual base type of Dst.
1300   // Return the offset of Src from the origin of Dst.
1301   return Offset;
1302 }
1303 
getBadTypeidFn(CodeGenFunction & CGF)1304 static llvm::Constant *getBadTypeidFn(CodeGenFunction &CGF) {
1305   // void __cxa_bad_typeid();
1306   llvm::FunctionType *FTy = llvm::FunctionType::get(CGF.VoidTy, false);
1307 
1308   return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_typeid");
1309 }
1310 
shouldTypeidBeNullChecked(bool IsDeref,QualType SrcRecordTy)1311 bool ItaniumCXXABI::shouldTypeidBeNullChecked(bool IsDeref,
1312                                               QualType SrcRecordTy) {
1313   return IsDeref;
1314 }
1315 
EmitBadTypeidCall(CodeGenFunction & CGF)1316 void ItaniumCXXABI::EmitBadTypeidCall(CodeGenFunction &CGF) {
1317   llvm::Value *Fn = getBadTypeidFn(CGF);
1318   CGF.EmitRuntimeCallOrInvoke(Fn).setDoesNotReturn();
1319   CGF.Builder.CreateUnreachable();
1320 }
1321 
EmitTypeid(CodeGenFunction & CGF,QualType SrcRecordTy,Address ThisPtr,llvm::Type * StdTypeInfoPtrTy)1322 llvm::Value *ItaniumCXXABI::EmitTypeid(CodeGenFunction &CGF,
1323                                        QualType SrcRecordTy,
1324                                        Address ThisPtr,
1325                                        llvm::Type *StdTypeInfoPtrTy) {
1326   auto *ClassDecl =
1327       cast<CXXRecordDecl>(SrcRecordTy->getAs<RecordType>()->getDecl());
1328   llvm::Value *Value =
1329       CGF.GetVTablePtr(ThisPtr, StdTypeInfoPtrTy->getPointerTo(), ClassDecl);
1330 
1331   // Load the type info.
1332   Value = CGF.Builder.CreateConstInBoundsGEP1_64(Value, -1ULL);
1333   return CGF.Builder.CreateAlignedLoad(Value, CGF.getPointerAlign());
1334 }
1335 
shouldDynamicCastCallBeNullChecked(bool SrcIsPtr,QualType SrcRecordTy)1336 bool ItaniumCXXABI::shouldDynamicCastCallBeNullChecked(bool SrcIsPtr,
1337                                                        QualType SrcRecordTy) {
1338   return SrcIsPtr;
1339 }
1340 
EmitDynamicCastCall(CodeGenFunction & CGF,Address ThisAddr,QualType SrcRecordTy,QualType DestTy,QualType DestRecordTy,llvm::BasicBlock * CastEnd)1341 llvm::Value *ItaniumCXXABI::EmitDynamicCastCall(
1342     CodeGenFunction &CGF, Address ThisAddr, QualType SrcRecordTy,
1343     QualType DestTy, QualType DestRecordTy, llvm::BasicBlock *CastEnd) {
1344   llvm::Type *PtrDiffLTy =
1345       CGF.ConvertType(CGF.getContext().getPointerDiffType());
1346   llvm::Type *DestLTy = CGF.ConvertType(DestTy);
1347 
1348   llvm::Value *SrcRTTI =
1349       CGF.CGM.GetAddrOfRTTIDescriptor(SrcRecordTy.getUnqualifiedType());
1350   llvm::Value *DestRTTI =
1351       CGF.CGM.GetAddrOfRTTIDescriptor(DestRecordTy.getUnqualifiedType());
1352 
1353   // Compute the offset hint.
1354   const CXXRecordDecl *SrcDecl = SrcRecordTy->getAsCXXRecordDecl();
1355   const CXXRecordDecl *DestDecl = DestRecordTy->getAsCXXRecordDecl();
1356   llvm::Value *OffsetHint = llvm::ConstantInt::get(
1357       PtrDiffLTy,
1358       computeOffsetHint(CGF.getContext(), SrcDecl, DestDecl).getQuantity());
1359 
1360   // Emit the call to __dynamic_cast.
1361   llvm::Value *Value = ThisAddr.getPointer();
1362   Value = CGF.EmitCastToVoidPtr(Value);
1363 
1364   llvm::Value *args[] = {Value, SrcRTTI, DestRTTI, OffsetHint};
1365   Value = CGF.EmitNounwindRuntimeCall(getItaniumDynamicCastFn(CGF), args);
1366   Value = CGF.Builder.CreateBitCast(Value, DestLTy);
1367 
1368   /// C++ [expr.dynamic.cast]p9:
1369   ///   A failed cast to reference type throws std::bad_cast
1370   if (DestTy->isReferenceType()) {
1371     llvm::BasicBlock *BadCastBlock =
1372         CGF.createBasicBlock("dynamic_cast.bad_cast");
1373 
1374     llvm::Value *IsNull = CGF.Builder.CreateIsNull(Value);
1375     CGF.Builder.CreateCondBr(IsNull, BadCastBlock, CastEnd);
1376 
1377     CGF.EmitBlock(BadCastBlock);
1378     EmitBadCastCall(CGF);
1379   }
1380 
1381   return Value;
1382 }
1383 
EmitDynamicCastToVoid(CodeGenFunction & CGF,Address ThisAddr,QualType SrcRecordTy,QualType DestTy)1384 llvm::Value *ItaniumCXXABI::EmitDynamicCastToVoid(CodeGenFunction &CGF,
1385                                                   Address ThisAddr,
1386                                                   QualType SrcRecordTy,
1387                                                   QualType DestTy) {
1388   llvm::Type *PtrDiffLTy =
1389       CGF.ConvertType(CGF.getContext().getPointerDiffType());
1390   llvm::Type *DestLTy = CGF.ConvertType(DestTy);
1391 
1392   auto *ClassDecl =
1393       cast<CXXRecordDecl>(SrcRecordTy->getAs<RecordType>()->getDecl());
1394   // Get the vtable pointer.
1395   llvm::Value *VTable = CGF.GetVTablePtr(ThisAddr, PtrDiffLTy->getPointerTo(),
1396       ClassDecl);
1397 
1398   // Get the offset-to-top from the vtable.
1399   llvm::Value *OffsetToTop =
1400       CGF.Builder.CreateConstInBoundsGEP1_64(VTable, -2ULL);
1401   OffsetToTop =
1402     CGF.Builder.CreateAlignedLoad(OffsetToTop, CGF.getPointerAlign(),
1403                                   "offset.to.top");
1404 
1405   // Finally, add the offset to the pointer.
1406   llvm::Value *Value = ThisAddr.getPointer();
1407   Value = CGF.EmitCastToVoidPtr(Value);
1408   Value = CGF.Builder.CreateInBoundsGEP(Value, OffsetToTop);
1409 
1410   return CGF.Builder.CreateBitCast(Value, DestLTy);
1411 }
1412 
EmitBadCastCall(CodeGenFunction & CGF)1413 bool ItaniumCXXABI::EmitBadCastCall(CodeGenFunction &CGF) {
1414   llvm::Value *Fn = getBadCastFn(CGF);
1415   CGF.EmitRuntimeCallOrInvoke(Fn).setDoesNotReturn();
1416   CGF.Builder.CreateUnreachable();
1417   return true;
1418 }
1419 
1420 llvm::Value *
GetVirtualBaseClassOffset(CodeGenFunction & CGF,Address This,const CXXRecordDecl * ClassDecl,const CXXRecordDecl * BaseClassDecl)1421 ItaniumCXXABI::GetVirtualBaseClassOffset(CodeGenFunction &CGF,
1422                                          Address This,
1423                                          const CXXRecordDecl *ClassDecl,
1424                                          const CXXRecordDecl *BaseClassDecl) {
1425   llvm::Value *VTablePtr = CGF.GetVTablePtr(This, CGM.Int8PtrTy, ClassDecl);
1426   CharUnits VBaseOffsetOffset =
1427       CGM.getItaniumVTableContext().getVirtualBaseOffsetOffset(ClassDecl,
1428                                                                BaseClassDecl);
1429 
1430   llvm::Value *VBaseOffsetPtr =
1431     CGF.Builder.CreateConstGEP1_64(VTablePtr, VBaseOffsetOffset.getQuantity(),
1432                                    "vbase.offset.ptr");
1433   VBaseOffsetPtr = CGF.Builder.CreateBitCast(VBaseOffsetPtr,
1434                                              CGM.PtrDiffTy->getPointerTo());
1435 
1436   llvm::Value *VBaseOffset =
1437     CGF.Builder.CreateAlignedLoad(VBaseOffsetPtr, CGF.getPointerAlign(),
1438                                   "vbase.offset");
1439 
1440   return VBaseOffset;
1441 }
1442 
EmitCXXConstructors(const CXXConstructorDecl * D)1443 void ItaniumCXXABI::EmitCXXConstructors(const CXXConstructorDecl *D) {
1444   // Just make sure we're in sync with TargetCXXABI.
1445   assert(CGM.getTarget().getCXXABI().hasConstructorVariants());
1446 
1447   // The constructor used for constructing this as a base class;
1448   // ignores virtual bases.
1449   CGM.EmitGlobal(GlobalDecl(D, Ctor_Base));
1450 
1451   // The constructor used for constructing this as a complete class;
1452   // constructs the virtual bases, then calls the base constructor.
1453   if (!D->getParent()->isAbstract()) {
1454     // We don't need to emit the complete ctor if the class is abstract.
1455     CGM.EmitGlobal(GlobalDecl(D, Ctor_Complete));
1456   }
1457 }
1458 
1459 CGCXXABI::AddedStructorArgs
buildStructorSignature(const CXXMethodDecl * MD,StructorType T,SmallVectorImpl<CanQualType> & ArgTys)1460 ItaniumCXXABI::buildStructorSignature(const CXXMethodDecl *MD, StructorType T,
1461                                       SmallVectorImpl<CanQualType> &ArgTys) {
1462   ASTContext &Context = getContext();
1463 
1464   // All parameters are already in place except VTT, which goes after 'this'.
1465   // These are Clang types, so we don't need to worry about sret yet.
1466 
1467   // Check if we need to add a VTT parameter (which has type void **).
1468   if (T == StructorType::Base && MD->getParent()->getNumVBases() != 0) {
1469     ArgTys.insert(ArgTys.begin() + 1,
1470                   Context.getPointerType(Context.VoidPtrTy));
1471     return AddedStructorArgs::prefix(1);
1472   }
1473   return AddedStructorArgs{};
1474 }
1475 
EmitCXXDestructors(const CXXDestructorDecl * D)1476 void ItaniumCXXABI::EmitCXXDestructors(const CXXDestructorDecl *D) {
1477   // The destructor used for destructing this as a base class; ignores
1478   // virtual bases.
1479   CGM.EmitGlobal(GlobalDecl(D, Dtor_Base));
1480 
1481   // The destructor used for destructing this as a most-derived class;
1482   // call the base destructor and then destructs any virtual bases.
1483   CGM.EmitGlobal(GlobalDecl(D, Dtor_Complete));
1484 
1485   // The destructor in a virtual table is always a 'deleting'
1486   // destructor, which calls the complete destructor and then uses the
1487   // appropriate operator delete.
1488   if (D->isVirtual())
1489     CGM.EmitGlobal(GlobalDecl(D, Dtor_Deleting));
1490 }
1491 
addImplicitStructorParams(CodeGenFunction & CGF,QualType & ResTy,FunctionArgList & Params)1492 void ItaniumCXXABI::addImplicitStructorParams(CodeGenFunction &CGF,
1493                                               QualType &ResTy,
1494                                               FunctionArgList &Params) {
1495   const CXXMethodDecl *MD = cast<CXXMethodDecl>(CGF.CurGD.getDecl());
1496   assert(isa<CXXConstructorDecl>(MD) || isa<CXXDestructorDecl>(MD));
1497 
1498   // Check if we need a VTT parameter as well.
1499   if (NeedsVTTParameter(CGF.CurGD)) {
1500     ASTContext &Context = getContext();
1501 
1502     // FIXME: avoid the fake decl
1503     QualType T = Context.getPointerType(Context.VoidPtrTy);
1504     auto *VTTDecl = ImplicitParamDecl::Create(
1505         Context, /*DC=*/nullptr, MD->getLocation(), &Context.Idents.get("vtt"),
1506         T, ImplicitParamDecl::CXXVTT);
1507     Params.insert(Params.begin() + 1, VTTDecl);
1508     getStructorImplicitParamDecl(CGF) = VTTDecl;
1509   }
1510 }
1511 
EmitInstanceFunctionProlog(CodeGenFunction & CGF)1512 void ItaniumCXXABI::EmitInstanceFunctionProlog(CodeGenFunction &CGF) {
1513   // Naked functions have no prolog.
1514   if (CGF.CurFuncDecl && CGF.CurFuncDecl->hasAttr<NakedAttr>())
1515     return;
1516 
1517   /// Initialize the 'this' slot. In the Itanium C++ ABI, no prologue
1518   /// adjustments are required, because they are all handled by thunks.
1519   setCXXABIThisValue(CGF, loadIncomingCXXThis(CGF));
1520 
1521   /// Initialize the 'vtt' slot if needed.
1522   if (getStructorImplicitParamDecl(CGF)) {
1523     getStructorImplicitParamValue(CGF) = CGF.Builder.CreateLoad(
1524         CGF.GetAddrOfLocalVar(getStructorImplicitParamDecl(CGF)), "vtt");
1525   }
1526 
1527   /// If this is a function that the ABI specifies returns 'this', initialize
1528   /// the return slot to 'this' at the start of the function.
1529   ///
1530   /// Unlike the setting of return types, this is done within the ABI
1531   /// implementation instead of by clients of CGCXXABI because:
1532   /// 1) getThisValue is currently protected
1533   /// 2) in theory, an ABI could implement 'this' returns some other way;
1534   ///    HasThisReturn only specifies a contract, not the implementation
1535   if (HasThisReturn(CGF.CurGD))
1536     CGF.Builder.CreateStore(getThisValue(CGF), CGF.ReturnValue);
1537 }
1538 
addImplicitConstructorArgs(CodeGenFunction & CGF,const CXXConstructorDecl * D,CXXCtorType Type,bool ForVirtualBase,bool Delegating,CallArgList & Args)1539 CGCXXABI::AddedStructorArgs ItaniumCXXABI::addImplicitConstructorArgs(
1540     CodeGenFunction &CGF, const CXXConstructorDecl *D, CXXCtorType Type,
1541     bool ForVirtualBase, bool Delegating, CallArgList &Args) {
1542   if (!NeedsVTTParameter(GlobalDecl(D, Type)))
1543     return AddedStructorArgs{};
1544 
1545   // Insert the implicit 'vtt' argument as the second argument.
1546   llvm::Value *VTT =
1547       CGF.GetVTTParameter(GlobalDecl(D, Type), ForVirtualBase, Delegating);
1548   QualType VTTTy = getContext().getPointerType(getContext().VoidPtrTy);
1549   Args.insert(Args.begin() + 1, CallArg(RValue::get(VTT), VTTTy));
1550   return AddedStructorArgs::prefix(1);  // Added one arg.
1551 }
1552 
EmitDestructorCall(CodeGenFunction & CGF,const CXXDestructorDecl * DD,CXXDtorType Type,bool ForVirtualBase,bool Delegating,Address This)1553 void ItaniumCXXABI::EmitDestructorCall(CodeGenFunction &CGF,
1554                                        const CXXDestructorDecl *DD,
1555                                        CXXDtorType Type, bool ForVirtualBase,
1556                                        bool Delegating, Address This) {
1557   GlobalDecl GD(DD, Type);
1558   llvm::Value *VTT = CGF.GetVTTParameter(GD, ForVirtualBase, Delegating);
1559   QualType VTTTy = getContext().getPointerType(getContext().VoidPtrTy);
1560 
1561   CGCallee Callee;
1562   if (getContext().getLangOpts().AppleKext &&
1563       Type != Dtor_Base && DD->isVirtual())
1564     Callee = CGF.BuildAppleKextVirtualDestructorCall(DD, Type, DD->getParent());
1565   else
1566     Callee = CGCallee::forDirect(
1567         CGM.getAddrOfCXXStructor(DD, getFromDtorType(Type)), GD);
1568 
1569   CGF.EmitCXXMemberOrOperatorCall(DD, Callee, ReturnValueSlot(),
1570                                   This.getPointer(), VTT, VTTTy,
1571                                   nullptr, nullptr);
1572 }
1573 
emitVTableDefinitions(CodeGenVTables & CGVT,const CXXRecordDecl * RD)1574 void ItaniumCXXABI::emitVTableDefinitions(CodeGenVTables &CGVT,
1575                                           const CXXRecordDecl *RD) {
1576   llvm::GlobalVariable *VTable = getAddrOfVTable(RD, CharUnits());
1577   if (VTable->hasInitializer())
1578     return;
1579 
1580   ItaniumVTableContext &VTContext = CGM.getItaniumVTableContext();
1581   const VTableLayout &VTLayout = VTContext.getVTableLayout(RD);
1582   llvm::GlobalVariable::LinkageTypes Linkage = CGM.getVTableLinkage(RD);
1583   llvm::Constant *RTTI =
1584       CGM.GetAddrOfRTTIDescriptor(CGM.getContext().getTagDeclType(RD));
1585 
1586   // Create and set the initializer.
1587   ConstantInitBuilder Builder(CGM);
1588   auto Components = Builder.beginStruct();
1589   CGVT.createVTableInitializer(Components, VTLayout, RTTI);
1590   Components.finishAndSetAsInitializer(VTable);
1591 
1592   // Set the correct linkage.
1593   VTable->setLinkage(Linkage);
1594 
1595   if (CGM.supportsCOMDAT() && VTable->isWeakForLinker())
1596     VTable->setComdat(CGM.getModule().getOrInsertComdat(VTable->getName()));
1597 
1598   // Set the right visibility.
1599   CGM.setGVProperties(VTable, RD);
1600 
1601   // If this is the magic class __cxxabiv1::__fundamental_type_info,
1602   // we will emit the typeinfo for the fundamental types. This is the
1603   // same behaviour as GCC.
1604   const DeclContext *DC = RD->getDeclContext();
1605   if (RD->getIdentifier() &&
1606       RD->getIdentifier()->isStr("__fundamental_type_info") &&
1607       isa<NamespaceDecl>(DC) && cast<NamespaceDecl>(DC)->getIdentifier() &&
1608       cast<NamespaceDecl>(DC)->getIdentifier()->isStr("__cxxabiv1") &&
1609       DC->getParent()->isTranslationUnit())
1610     EmitFundamentalRTTIDescriptors(RD);
1611 
1612   if (!VTable->isDeclarationForLinker())
1613     CGM.EmitVTableTypeMetadata(VTable, VTLayout);
1614 }
1615 
isVirtualOffsetNeededForVTableField(CodeGenFunction & CGF,CodeGenFunction::VPtr Vptr)1616 bool ItaniumCXXABI::isVirtualOffsetNeededForVTableField(
1617     CodeGenFunction &CGF, CodeGenFunction::VPtr Vptr) {
1618   if (Vptr.NearestVBase == nullptr)
1619     return false;
1620   return NeedsVTTParameter(CGF.CurGD);
1621 }
1622 
getVTableAddressPointInStructor(CodeGenFunction & CGF,const CXXRecordDecl * VTableClass,BaseSubobject Base,const CXXRecordDecl * NearestVBase)1623 llvm::Value *ItaniumCXXABI::getVTableAddressPointInStructor(
1624     CodeGenFunction &CGF, const CXXRecordDecl *VTableClass, BaseSubobject Base,
1625     const CXXRecordDecl *NearestVBase) {
1626 
1627   if ((Base.getBase()->getNumVBases() || NearestVBase != nullptr) &&
1628       NeedsVTTParameter(CGF.CurGD)) {
1629     return getVTableAddressPointInStructorWithVTT(CGF, VTableClass, Base,
1630                                                   NearestVBase);
1631   }
1632   return getVTableAddressPoint(Base, VTableClass);
1633 }
1634 
1635 llvm::Constant *
getVTableAddressPoint(BaseSubobject Base,const CXXRecordDecl * VTableClass)1636 ItaniumCXXABI::getVTableAddressPoint(BaseSubobject Base,
1637                                      const CXXRecordDecl *VTableClass) {
1638   llvm::GlobalValue *VTable = getAddrOfVTable(VTableClass, CharUnits());
1639 
1640   // Find the appropriate vtable within the vtable group, and the address point
1641   // within that vtable.
1642   VTableLayout::AddressPointLocation AddressPoint =
1643       CGM.getItaniumVTableContext()
1644           .getVTableLayout(VTableClass)
1645           .getAddressPoint(Base);
1646   llvm::Value *Indices[] = {
1647     llvm::ConstantInt::get(CGM.Int32Ty, 0),
1648     llvm::ConstantInt::get(CGM.Int32Ty, AddressPoint.VTableIndex),
1649     llvm::ConstantInt::get(CGM.Int32Ty, AddressPoint.AddressPointIndex),
1650   };
1651 
1652   return llvm::ConstantExpr::getGetElementPtr(VTable->getValueType(), VTable,
1653                                               Indices, /*InBounds=*/true,
1654                                               /*InRangeIndex=*/1);
1655 }
1656 
getVTableAddressPointInStructorWithVTT(CodeGenFunction & CGF,const CXXRecordDecl * VTableClass,BaseSubobject Base,const CXXRecordDecl * NearestVBase)1657 llvm::Value *ItaniumCXXABI::getVTableAddressPointInStructorWithVTT(
1658     CodeGenFunction &CGF, const CXXRecordDecl *VTableClass, BaseSubobject Base,
1659     const CXXRecordDecl *NearestVBase) {
1660   assert((Base.getBase()->getNumVBases() || NearestVBase != nullptr) &&
1661          NeedsVTTParameter(CGF.CurGD) && "This class doesn't have VTT");
1662 
1663   // Get the secondary vpointer index.
1664   uint64_t VirtualPointerIndex =
1665       CGM.getVTables().getSecondaryVirtualPointerIndex(VTableClass, Base);
1666 
1667   /// Load the VTT.
1668   llvm::Value *VTT = CGF.LoadCXXVTT();
1669   if (VirtualPointerIndex)
1670     VTT = CGF.Builder.CreateConstInBoundsGEP1_64(VTT, VirtualPointerIndex);
1671 
1672   // And load the address point from the VTT.
1673   return CGF.Builder.CreateAlignedLoad(VTT, CGF.getPointerAlign());
1674 }
1675 
getVTableAddressPointForConstExpr(BaseSubobject Base,const CXXRecordDecl * VTableClass)1676 llvm::Constant *ItaniumCXXABI::getVTableAddressPointForConstExpr(
1677     BaseSubobject Base, const CXXRecordDecl *VTableClass) {
1678   return getVTableAddressPoint(Base, VTableClass);
1679 }
1680 
getAddrOfVTable(const CXXRecordDecl * RD,CharUnits VPtrOffset)1681 llvm::GlobalVariable *ItaniumCXXABI::getAddrOfVTable(const CXXRecordDecl *RD,
1682                                                      CharUnits VPtrOffset) {
1683   assert(VPtrOffset.isZero() && "Itanium ABI only supports zero vptr offsets");
1684 
1685   llvm::GlobalVariable *&VTable = VTables[RD];
1686   if (VTable)
1687     return VTable;
1688 
1689   // Queue up this vtable for possible deferred emission.
1690   CGM.addDeferredVTable(RD);
1691 
1692   SmallString<256> Name;
1693   llvm::raw_svector_ostream Out(Name);
1694   getMangleContext().mangleCXXVTable(RD, Out);
1695 
1696   const VTableLayout &VTLayout =
1697       CGM.getItaniumVTableContext().getVTableLayout(RD);
1698   llvm::Type *VTableType = CGM.getVTables().getVTableType(VTLayout);
1699 
1700   // Use pointer alignment for the vtable. Otherwise we would align them based
1701   // on the size of the initializer which doesn't make sense as only single
1702   // values are read.
1703   unsigned PAlign = CGM.getTarget().getPointerAlign(0);
1704 
1705   VTable = CGM.CreateOrReplaceCXXRuntimeVariable(
1706       Name, VTableType, llvm::GlobalValue::ExternalLinkage,
1707       getContext().toCharUnitsFromBits(PAlign).getQuantity());
1708   VTable->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
1709 
1710   CGM.setGVProperties(VTable, RD);
1711 
1712   return VTable;
1713 }
1714 
getVirtualFunctionPointer(CodeGenFunction & CGF,GlobalDecl GD,Address This,llvm::Type * Ty,SourceLocation Loc)1715 CGCallee ItaniumCXXABI::getVirtualFunctionPointer(CodeGenFunction &CGF,
1716                                                   GlobalDecl GD,
1717                                                   Address This,
1718                                                   llvm::Type *Ty,
1719                                                   SourceLocation Loc) {
1720   Ty = Ty->getPointerTo()->getPointerTo();
1721   auto *MethodDecl = cast<CXXMethodDecl>(GD.getDecl());
1722   llvm::Value *VTable = CGF.GetVTablePtr(This, Ty, MethodDecl->getParent());
1723 
1724   uint64_t VTableIndex = CGM.getItaniumVTableContext().getMethodVTableIndex(GD);
1725   llvm::Value *VFunc;
1726   if (CGF.ShouldEmitVTableTypeCheckedLoad(MethodDecl->getParent())) {
1727     VFunc = CGF.EmitVTableTypeCheckedLoad(
1728         MethodDecl->getParent(), VTable,
1729         VTableIndex * CGM.getContext().getTargetInfo().getPointerWidth(0) / 8);
1730   } else {
1731     CGF.EmitTypeMetadataCodeForVCall(MethodDecl->getParent(), VTable, Loc);
1732 
1733     llvm::Value *VFuncPtr =
1734         CGF.Builder.CreateConstInBoundsGEP1_64(VTable, VTableIndex, "vfn");
1735     auto *VFuncLoad =
1736         CGF.Builder.CreateAlignedLoad(VFuncPtr, CGF.getPointerAlign());
1737 
1738     // Add !invariant.load md to virtual function load to indicate that
1739     // function didn't change inside vtable.
1740     // It's safe to add it without -fstrict-vtable-pointers, but it would not
1741     // help in devirtualization because it will only matter if we will have 2
1742     // the same virtual function loads from the same vtable load, which won't
1743     // happen without enabled devirtualization with -fstrict-vtable-pointers.
1744     if (CGM.getCodeGenOpts().OptimizationLevel > 0 &&
1745         CGM.getCodeGenOpts().StrictVTablePointers)
1746       VFuncLoad->setMetadata(
1747           llvm::LLVMContext::MD_invariant_load,
1748           llvm::MDNode::get(CGM.getLLVMContext(),
1749                             llvm::ArrayRef<llvm::Metadata *>()));
1750     VFunc = VFuncLoad;
1751   }
1752 
1753   CGCallee Callee(GD, VFunc);
1754   return Callee;
1755 }
1756 
EmitVirtualDestructorCall(CodeGenFunction & CGF,const CXXDestructorDecl * Dtor,CXXDtorType DtorType,Address This,const CXXMemberCallExpr * CE)1757 llvm::Value *ItaniumCXXABI::EmitVirtualDestructorCall(
1758     CodeGenFunction &CGF, const CXXDestructorDecl *Dtor, CXXDtorType DtorType,
1759     Address This, const CXXMemberCallExpr *CE) {
1760   assert(CE == nullptr || CE->arg_begin() == CE->arg_end());
1761   assert(DtorType == Dtor_Deleting || DtorType == Dtor_Complete);
1762 
1763   const CGFunctionInfo *FInfo = &CGM.getTypes().arrangeCXXStructorDeclaration(
1764       Dtor, getFromDtorType(DtorType));
1765   llvm::FunctionType *Ty = CGF.CGM.getTypes().GetFunctionType(*FInfo);
1766   CGCallee Callee =
1767       CGCallee::forVirtual(CE, GlobalDecl(Dtor, DtorType), This, Ty);
1768 
1769   CGF.EmitCXXMemberOrOperatorCall(Dtor, Callee, ReturnValueSlot(),
1770                                   This.getPointer(), /*ImplicitParam=*/nullptr,
1771                                   QualType(), CE, nullptr);
1772   return nullptr;
1773 }
1774 
emitVirtualInheritanceTables(const CXXRecordDecl * RD)1775 void ItaniumCXXABI::emitVirtualInheritanceTables(const CXXRecordDecl *RD) {
1776   CodeGenVTables &VTables = CGM.getVTables();
1777   llvm::GlobalVariable *VTT = VTables.GetAddrOfVTT(RD);
1778   VTables.EmitVTTDefinition(VTT, CGM.getVTableLinkage(RD), RD);
1779 }
1780 
canSpeculativelyEmitVTableAsBaseClass(const CXXRecordDecl * RD) const1781 bool ItaniumCXXABI::canSpeculativelyEmitVTableAsBaseClass(
1782     const CXXRecordDecl *RD) const {
1783   // We don't emit available_externally vtables if we are in -fapple-kext mode
1784   // because kext mode does not permit devirtualization.
1785   if (CGM.getLangOpts().AppleKext)
1786     return false;
1787 
1788   // If the vtable is hidden then it is not safe to emit an available_externally
1789   // copy of vtable.
1790   if (isVTableHidden(RD))
1791     return false;
1792 
1793   if (CGM.getCodeGenOpts().ForceEmitVTables)
1794     return true;
1795 
1796   // If we don't have any not emitted inline virtual function then we are safe
1797   // to emit an available_externally copy of vtable.
1798   // FIXME we can still emit a copy of the vtable if we
1799   // can emit definition of the inline functions.
1800   if (hasAnyUnusedVirtualInlineFunction(RD))
1801     return false;
1802 
1803   // For a class with virtual bases, we must also be able to speculatively
1804   // emit the VTT, because CodeGen doesn't have separate notions of "can emit
1805   // the vtable" and "can emit the VTT". For a base subobject, this means we
1806   // need to be able to emit non-virtual base vtables.
1807   if (RD->getNumVBases()) {
1808     for (const auto &B : RD->bases()) {
1809       auto *BRD = B.getType()->getAsCXXRecordDecl();
1810       assert(BRD && "no class for base specifier");
1811       if (B.isVirtual() || !BRD->isDynamicClass())
1812         continue;
1813       if (!canSpeculativelyEmitVTableAsBaseClass(BRD))
1814         return false;
1815     }
1816   }
1817 
1818   return true;
1819 }
1820 
canSpeculativelyEmitVTable(const CXXRecordDecl * RD) const1821 bool ItaniumCXXABI::canSpeculativelyEmitVTable(const CXXRecordDecl *RD) const {
1822   if (!canSpeculativelyEmitVTableAsBaseClass(RD))
1823     return false;
1824 
1825   // For a complete-object vtable (or more specifically, for the VTT), we need
1826   // to be able to speculatively emit the vtables of all dynamic virtual bases.
1827   for (const auto &B : RD->vbases()) {
1828     auto *BRD = B.getType()->getAsCXXRecordDecl();
1829     assert(BRD && "no class for base specifier");
1830     if (!BRD->isDynamicClass())
1831       continue;
1832     if (!canSpeculativelyEmitVTableAsBaseClass(BRD))
1833       return false;
1834   }
1835 
1836   return true;
1837 }
performTypeAdjustment(CodeGenFunction & CGF,Address InitialPtr,int64_t NonVirtualAdjustment,int64_t VirtualAdjustment,bool IsReturnAdjustment)1838 static llvm::Value *performTypeAdjustment(CodeGenFunction &CGF,
1839                                           Address InitialPtr,
1840                                           int64_t NonVirtualAdjustment,
1841                                           int64_t VirtualAdjustment,
1842                                           bool IsReturnAdjustment) {
1843   if (!NonVirtualAdjustment && !VirtualAdjustment)
1844     return InitialPtr.getPointer();
1845 
1846   Address V = CGF.Builder.CreateElementBitCast(InitialPtr, CGF.Int8Ty);
1847 
1848   // In a base-to-derived cast, the non-virtual adjustment is applied first.
1849   if (NonVirtualAdjustment && !IsReturnAdjustment) {
1850     V = CGF.Builder.CreateConstInBoundsByteGEP(V,
1851                               CharUnits::fromQuantity(NonVirtualAdjustment));
1852   }
1853 
1854   // Perform the virtual adjustment if we have one.
1855   llvm::Value *ResultPtr;
1856   if (VirtualAdjustment) {
1857     llvm::Type *PtrDiffTy =
1858         CGF.ConvertType(CGF.getContext().getPointerDiffType());
1859 
1860     Address VTablePtrPtr = CGF.Builder.CreateElementBitCast(V, CGF.Int8PtrTy);
1861     llvm::Value *VTablePtr = CGF.Builder.CreateLoad(VTablePtrPtr);
1862 
1863     llvm::Value *OffsetPtr =
1864         CGF.Builder.CreateConstInBoundsGEP1_64(VTablePtr, VirtualAdjustment);
1865 
1866     OffsetPtr = CGF.Builder.CreateBitCast(OffsetPtr, PtrDiffTy->getPointerTo());
1867 
1868     // Load the adjustment offset from the vtable.
1869     llvm::Value *Offset =
1870       CGF.Builder.CreateAlignedLoad(OffsetPtr, CGF.getPointerAlign());
1871 
1872     // Adjust our pointer.
1873     ResultPtr = CGF.Builder.CreateInBoundsGEP(V.getPointer(), Offset);
1874   } else {
1875     ResultPtr = V.getPointer();
1876   }
1877 
1878   // In a derived-to-base conversion, the non-virtual adjustment is
1879   // applied second.
1880   if (NonVirtualAdjustment && IsReturnAdjustment) {
1881     ResultPtr = CGF.Builder.CreateConstInBoundsGEP1_64(ResultPtr,
1882                                                        NonVirtualAdjustment);
1883   }
1884 
1885   // Cast back to the original type.
1886   return CGF.Builder.CreateBitCast(ResultPtr, InitialPtr.getType());
1887 }
1888 
performThisAdjustment(CodeGenFunction & CGF,Address This,const ThisAdjustment & TA)1889 llvm::Value *ItaniumCXXABI::performThisAdjustment(CodeGenFunction &CGF,
1890                                                   Address This,
1891                                                   const ThisAdjustment &TA) {
1892   return performTypeAdjustment(CGF, This, TA.NonVirtual,
1893                                TA.Virtual.Itanium.VCallOffsetOffset,
1894                                /*IsReturnAdjustment=*/false);
1895 }
1896 
1897 llvm::Value *
performReturnAdjustment(CodeGenFunction & CGF,Address Ret,const ReturnAdjustment & RA)1898 ItaniumCXXABI::performReturnAdjustment(CodeGenFunction &CGF, Address Ret,
1899                                        const ReturnAdjustment &RA) {
1900   return performTypeAdjustment(CGF, Ret, RA.NonVirtual,
1901                                RA.Virtual.Itanium.VBaseOffsetOffset,
1902                                /*IsReturnAdjustment=*/true);
1903 }
1904 
EmitReturnFromThunk(CodeGenFunction & CGF,RValue RV,QualType ResultType)1905 void ARMCXXABI::EmitReturnFromThunk(CodeGenFunction &CGF,
1906                                     RValue RV, QualType ResultType) {
1907   if (!isa<CXXDestructorDecl>(CGF.CurGD.getDecl()))
1908     return ItaniumCXXABI::EmitReturnFromThunk(CGF, RV, ResultType);
1909 
1910   // Destructor thunks in the ARM ABI have indeterminate results.
1911   llvm::Type *T = CGF.ReturnValue.getElementType();
1912   RValue Undef = RValue::get(llvm::UndefValue::get(T));
1913   return ItaniumCXXABI::EmitReturnFromThunk(CGF, Undef, ResultType);
1914 }
1915 
1916 /************************** Array allocation cookies **************************/
1917 
getArrayCookieSizeImpl(QualType elementType)1918 CharUnits ItaniumCXXABI::getArrayCookieSizeImpl(QualType elementType) {
1919   // The array cookie is a size_t; pad that up to the element alignment.
1920   // The cookie is actually right-justified in that space.
1921   return std::max(CharUnits::fromQuantity(CGM.SizeSizeInBytes),
1922                   CGM.getContext().getTypeAlignInChars(elementType));
1923 }
1924 
InitializeArrayCookie(CodeGenFunction & CGF,Address NewPtr,llvm::Value * NumElements,const CXXNewExpr * expr,QualType ElementType)1925 Address ItaniumCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
1926                                              Address NewPtr,
1927                                              llvm::Value *NumElements,
1928                                              const CXXNewExpr *expr,
1929                                              QualType ElementType) {
1930   assert(requiresArrayCookie(expr));
1931 
1932   unsigned AS = NewPtr.getAddressSpace();
1933 
1934   ASTContext &Ctx = getContext();
1935   CharUnits SizeSize = CGF.getSizeSize();
1936 
1937   // The size of the cookie.
1938   CharUnits CookieSize =
1939     std::max(SizeSize, Ctx.getTypeAlignInChars(ElementType));
1940   assert(CookieSize == getArrayCookieSizeImpl(ElementType));
1941 
1942   // Compute an offset to the cookie.
1943   Address CookiePtr = NewPtr;
1944   CharUnits CookieOffset = CookieSize - SizeSize;
1945   if (!CookieOffset.isZero())
1946     CookiePtr = CGF.Builder.CreateConstInBoundsByteGEP(CookiePtr, CookieOffset);
1947 
1948   // Write the number of elements into the appropriate slot.
1949   Address NumElementsPtr =
1950       CGF.Builder.CreateElementBitCast(CookiePtr, CGF.SizeTy);
1951   llvm::Instruction *SI = CGF.Builder.CreateStore(NumElements, NumElementsPtr);
1952 
1953   // Handle the array cookie specially in ASan.
1954   if (CGM.getLangOpts().Sanitize.has(SanitizerKind::Address) && AS == 0 &&
1955       (expr->getOperatorNew()->isReplaceableGlobalAllocationFunction() ||
1956        CGM.getCodeGenOpts().SanitizeAddressPoisonCustomArrayCookie)) {
1957     // The store to the CookiePtr does not need to be instrumented.
1958     CGM.getSanitizerMetadata()->disableSanitizerForInstruction(SI);
1959     llvm::FunctionType *FTy =
1960         llvm::FunctionType::get(CGM.VoidTy, NumElementsPtr.getType(), false);
1961     llvm::Constant *F =
1962         CGM.CreateRuntimeFunction(FTy, "__asan_poison_cxx_array_cookie");
1963     CGF.Builder.CreateCall(F, NumElementsPtr.getPointer());
1964   }
1965 
1966   // Finally, compute a pointer to the actual data buffer by skipping
1967   // over the cookie completely.
1968   return CGF.Builder.CreateConstInBoundsByteGEP(NewPtr, CookieSize);
1969 }
1970 
readArrayCookieImpl(CodeGenFunction & CGF,Address allocPtr,CharUnits cookieSize)1971 llvm::Value *ItaniumCXXABI::readArrayCookieImpl(CodeGenFunction &CGF,
1972                                                 Address allocPtr,
1973                                                 CharUnits cookieSize) {
1974   // The element size is right-justified in the cookie.
1975   Address numElementsPtr = allocPtr;
1976   CharUnits numElementsOffset = cookieSize - CGF.getSizeSize();
1977   if (!numElementsOffset.isZero())
1978     numElementsPtr =
1979       CGF.Builder.CreateConstInBoundsByteGEP(numElementsPtr, numElementsOffset);
1980 
1981   unsigned AS = allocPtr.getAddressSpace();
1982   numElementsPtr = CGF.Builder.CreateElementBitCast(numElementsPtr, CGF.SizeTy);
1983   if (!CGM.getLangOpts().Sanitize.has(SanitizerKind::Address) || AS != 0)
1984     return CGF.Builder.CreateLoad(numElementsPtr);
1985   // In asan mode emit a function call instead of a regular load and let the
1986   // run-time deal with it: if the shadow is properly poisoned return the
1987   // cookie, otherwise return 0 to avoid an infinite loop calling DTORs.
1988   // We can't simply ignore this load using nosanitize metadata because
1989   // the metadata may be lost.
1990   llvm::FunctionType *FTy =
1991       llvm::FunctionType::get(CGF.SizeTy, CGF.SizeTy->getPointerTo(0), false);
1992   llvm::Constant *F =
1993       CGM.CreateRuntimeFunction(FTy, "__asan_load_cxx_array_cookie");
1994   return CGF.Builder.CreateCall(F, numElementsPtr.getPointer());
1995 }
1996 
getArrayCookieSizeImpl(QualType elementType)1997 CharUnits ARMCXXABI::getArrayCookieSizeImpl(QualType elementType) {
1998   // ARM says that the cookie is always:
1999   //   struct array_cookie {
2000   //     std::size_t element_size; // element_size != 0
2001   //     std::size_t element_count;
2002   //   };
2003   // But the base ABI doesn't give anything an alignment greater than
2004   // 8, so we can dismiss this as typical ABI-author blindness to
2005   // actual language complexity and round up to the element alignment.
2006   return std::max(CharUnits::fromQuantity(2 * CGM.SizeSizeInBytes),
2007                   CGM.getContext().getTypeAlignInChars(elementType));
2008 }
2009 
InitializeArrayCookie(CodeGenFunction & CGF,Address newPtr,llvm::Value * numElements,const CXXNewExpr * expr,QualType elementType)2010 Address ARMCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
2011                                          Address newPtr,
2012                                          llvm::Value *numElements,
2013                                          const CXXNewExpr *expr,
2014                                          QualType elementType) {
2015   assert(requiresArrayCookie(expr));
2016 
2017   // The cookie is always at the start of the buffer.
2018   Address cookie = newPtr;
2019 
2020   // The first element is the element size.
2021   cookie = CGF.Builder.CreateElementBitCast(cookie, CGF.SizeTy);
2022   llvm::Value *elementSize = llvm::ConstantInt::get(CGF.SizeTy,
2023                  getContext().getTypeSizeInChars(elementType).getQuantity());
2024   CGF.Builder.CreateStore(elementSize, cookie);
2025 
2026   // The second element is the element count.
2027   cookie = CGF.Builder.CreateConstInBoundsGEP(cookie, 1, CGF.getSizeSize());
2028   CGF.Builder.CreateStore(numElements, cookie);
2029 
2030   // Finally, compute a pointer to the actual data buffer by skipping
2031   // over the cookie completely.
2032   CharUnits cookieSize = ARMCXXABI::getArrayCookieSizeImpl(elementType);
2033   return CGF.Builder.CreateConstInBoundsByteGEP(newPtr, cookieSize);
2034 }
2035 
readArrayCookieImpl(CodeGenFunction & CGF,Address allocPtr,CharUnits cookieSize)2036 llvm::Value *ARMCXXABI::readArrayCookieImpl(CodeGenFunction &CGF,
2037                                             Address allocPtr,
2038                                             CharUnits cookieSize) {
2039   // The number of elements is at offset sizeof(size_t) relative to
2040   // the allocated pointer.
2041   Address numElementsPtr
2042     = CGF.Builder.CreateConstInBoundsByteGEP(allocPtr, CGF.getSizeSize());
2043 
2044   numElementsPtr = CGF.Builder.CreateElementBitCast(numElementsPtr, CGF.SizeTy);
2045   return CGF.Builder.CreateLoad(numElementsPtr);
2046 }
2047 
2048 /*********************** Static local initialization **************************/
2049 
getGuardAcquireFn(CodeGenModule & CGM,llvm::PointerType * GuardPtrTy)2050 static llvm::Constant *getGuardAcquireFn(CodeGenModule &CGM,
2051                                          llvm::PointerType *GuardPtrTy) {
2052   // int __cxa_guard_acquire(__guard *guard_object);
2053   llvm::FunctionType *FTy =
2054     llvm::FunctionType::get(CGM.getTypes().ConvertType(CGM.getContext().IntTy),
2055                             GuardPtrTy, /*isVarArg=*/false);
2056   return CGM.CreateRuntimeFunction(
2057       FTy, "__cxa_guard_acquire",
2058       llvm::AttributeList::get(CGM.getLLVMContext(),
2059                                llvm::AttributeList::FunctionIndex,
2060                                llvm::Attribute::NoUnwind));
2061 }
2062 
getGuardReleaseFn(CodeGenModule & CGM,llvm::PointerType * GuardPtrTy)2063 static llvm::Constant *getGuardReleaseFn(CodeGenModule &CGM,
2064                                          llvm::PointerType *GuardPtrTy) {
2065   // void __cxa_guard_release(__guard *guard_object);
2066   llvm::FunctionType *FTy =
2067     llvm::FunctionType::get(CGM.VoidTy, GuardPtrTy, /*isVarArg=*/false);
2068   return CGM.CreateRuntimeFunction(
2069       FTy, "__cxa_guard_release",
2070       llvm::AttributeList::get(CGM.getLLVMContext(),
2071                                llvm::AttributeList::FunctionIndex,
2072                                llvm::Attribute::NoUnwind));
2073 }
2074 
getGuardAbortFn(CodeGenModule & CGM,llvm::PointerType * GuardPtrTy)2075 static llvm::Constant *getGuardAbortFn(CodeGenModule &CGM,
2076                                        llvm::PointerType *GuardPtrTy) {
2077   // void __cxa_guard_abort(__guard *guard_object);
2078   llvm::FunctionType *FTy =
2079     llvm::FunctionType::get(CGM.VoidTy, GuardPtrTy, /*isVarArg=*/false);
2080   return CGM.CreateRuntimeFunction(
2081       FTy, "__cxa_guard_abort",
2082       llvm::AttributeList::get(CGM.getLLVMContext(),
2083                                llvm::AttributeList::FunctionIndex,
2084                                llvm::Attribute::NoUnwind));
2085 }
2086 
2087 namespace {
2088   struct CallGuardAbort final : EHScopeStack::Cleanup {
2089     llvm::GlobalVariable *Guard;
CallGuardAbort__anonda3274c20211::CallGuardAbort2090     CallGuardAbort(llvm::GlobalVariable *Guard) : Guard(Guard) {}
2091 
Emit__anonda3274c20211::CallGuardAbort2092     void Emit(CodeGenFunction &CGF, Flags flags) override {
2093       CGF.EmitNounwindRuntimeCall(getGuardAbortFn(CGF.CGM, Guard->getType()),
2094                                   Guard);
2095     }
2096   };
2097 }
2098 
2099 /// The ARM code here follows the Itanium code closely enough that we
2100 /// just special-case it at particular places.
EmitGuardedInit(CodeGenFunction & CGF,const VarDecl & D,llvm::GlobalVariable * var,bool shouldPerformInit)2101 void ItaniumCXXABI::EmitGuardedInit(CodeGenFunction &CGF,
2102                                     const VarDecl &D,
2103                                     llvm::GlobalVariable *var,
2104                                     bool shouldPerformInit) {
2105   CGBuilderTy &Builder = CGF.Builder;
2106 
2107   // Inline variables that weren't instantiated from variable templates have
2108   // partially-ordered initialization within their translation unit.
2109   bool NonTemplateInline =
2110       D.isInline() &&
2111       !isTemplateInstantiation(D.getTemplateSpecializationKind());
2112 
2113   // We only need to use thread-safe statics for local non-TLS variables and
2114   // inline variables; other global initialization is always single-threaded
2115   // or (through lazy dynamic loading in multiple threads) unsequenced.
2116   bool threadsafe = getContext().getLangOpts().ThreadsafeStatics &&
2117                     (D.isLocalVarDecl() || NonTemplateInline) &&
2118                     !D.getTLSKind();
2119 
2120   // If we have a global variable with internal linkage and thread-safe statics
2121   // are disabled, we can just let the guard variable be of type i8.
2122   bool useInt8GuardVariable = !threadsafe && var->hasInternalLinkage();
2123 
2124   llvm::IntegerType *guardTy;
2125   CharUnits guardAlignment;
2126   if (useInt8GuardVariable) {
2127     guardTy = CGF.Int8Ty;
2128     guardAlignment = CharUnits::One();
2129   } else {
2130     // Guard variables are 64 bits in the generic ABI and size width on ARM
2131     // (i.e. 32-bit on AArch32, 64-bit on AArch64).
2132     if (UseARMGuardVarABI) {
2133       guardTy = CGF.SizeTy;
2134       guardAlignment = CGF.getSizeAlign();
2135     } else {
2136       guardTy = CGF.Int64Ty;
2137       guardAlignment = CharUnits::fromQuantity(
2138                              CGM.getDataLayout().getABITypeAlignment(guardTy));
2139     }
2140   }
2141   llvm::PointerType *guardPtrTy = guardTy->getPointerTo();
2142 
2143   // Create the guard variable if we don't already have it (as we
2144   // might if we're double-emitting this function body).
2145   llvm::GlobalVariable *guard = CGM.getStaticLocalDeclGuardAddress(&D);
2146   if (!guard) {
2147     // Mangle the name for the guard.
2148     SmallString<256> guardName;
2149     {
2150       llvm::raw_svector_ostream out(guardName);
2151       getMangleContext().mangleStaticGuardVariable(&D, out);
2152     }
2153 
2154     // Create the guard variable with a zero-initializer.
2155     // Just absorb linkage and visibility from the guarded variable.
2156     guard = new llvm::GlobalVariable(CGM.getModule(), guardTy,
2157                                      false, var->getLinkage(),
2158                                      llvm::ConstantInt::get(guardTy, 0),
2159                                      guardName.str());
2160     guard->setDSOLocal(var->isDSOLocal());
2161     guard->setVisibility(var->getVisibility());
2162     // If the variable is thread-local, so is its guard variable.
2163     guard->setThreadLocalMode(var->getThreadLocalMode());
2164     guard->setAlignment(guardAlignment.getQuantity());
2165 
2166     // The ABI says: "It is suggested that it be emitted in the same COMDAT
2167     // group as the associated data object." In practice, this doesn't work for
2168     // non-ELF and non-Wasm object formats, so only do it for ELF and Wasm.
2169     llvm::Comdat *C = var->getComdat();
2170     if (!D.isLocalVarDecl() && C &&
2171         (CGM.getTarget().getTriple().isOSBinFormatELF() ||
2172          CGM.getTarget().getTriple().isOSBinFormatWasm())) {
2173       guard->setComdat(C);
2174       // An inline variable's guard function is run from the per-TU
2175       // initialization function, not via a dedicated global ctor function, so
2176       // we can't put it in a comdat.
2177       if (!NonTemplateInline)
2178         CGF.CurFn->setComdat(C);
2179     } else if (CGM.supportsCOMDAT() && guard->isWeakForLinker()) {
2180       guard->setComdat(CGM.getModule().getOrInsertComdat(guard->getName()));
2181     }
2182 
2183     CGM.setStaticLocalDeclGuardAddress(&D, guard);
2184   }
2185 
2186   Address guardAddr = Address(guard, guardAlignment);
2187 
2188   // Test whether the variable has completed initialization.
2189   //
2190   // Itanium C++ ABI 3.3.2:
2191   //   The following is pseudo-code showing how these functions can be used:
2192   //     if (obj_guard.first_byte == 0) {
2193   //       if ( __cxa_guard_acquire (&obj_guard) ) {
2194   //         try {
2195   //           ... initialize the object ...;
2196   //         } catch (...) {
2197   //            __cxa_guard_abort (&obj_guard);
2198   //            throw;
2199   //         }
2200   //         ... queue object destructor with __cxa_atexit() ...;
2201   //         __cxa_guard_release (&obj_guard);
2202   //       }
2203   //     }
2204 
2205   // Load the first byte of the guard variable.
2206   llvm::LoadInst *LI =
2207       Builder.CreateLoad(Builder.CreateElementBitCast(guardAddr, CGM.Int8Ty));
2208 
2209   // Itanium ABI:
2210   //   An implementation supporting thread-safety on multiprocessor
2211   //   systems must also guarantee that references to the initialized
2212   //   object do not occur before the load of the initialization flag.
2213   //
2214   // In LLVM, we do this by marking the load Acquire.
2215   if (threadsafe)
2216     LI->setAtomic(llvm::AtomicOrdering::Acquire);
2217 
2218   // For ARM, we should only check the first bit, rather than the entire byte:
2219   //
2220   // ARM C++ ABI 3.2.3.1:
2221   //   To support the potential use of initialization guard variables
2222   //   as semaphores that are the target of ARM SWP and LDREX/STREX
2223   //   synchronizing instructions we define a static initialization
2224   //   guard variable to be a 4-byte aligned, 4-byte word with the
2225   //   following inline access protocol.
2226   //     #define INITIALIZED 1
2227   //     if ((obj_guard & INITIALIZED) != INITIALIZED) {
2228   //       if (__cxa_guard_acquire(&obj_guard))
2229   //         ...
2230   //     }
2231   //
2232   // and similarly for ARM64:
2233   //
2234   // ARM64 C++ ABI 3.2.2:
2235   //   This ABI instead only specifies the value bit 0 of the static guard
2236   //   variable; all other bits are platform defined. Bit 0 shall be 0 when the
2237   //   variable is not initialized and 1 when it is.
2238   llvm::Value *V =
2239       (UseARMGuardVarABI && !useInt8GuardVariable)
2240           ? Builder.CreateAnd(LI, llvm::ConstantInt::get(CGM.Int8Ty, 1))
2241           : LI;
2242   llvm::Value *NeedsInit = Builder.CreateIsNull(V, "guard.uninitialized");
2243 
2244   llvm::BasicBlock *InitCheckBlock = CGF.createBasicBlock("init.check");
2245   llvm::BasicBlock *EndBlock = CGF.createBasicBlock("init.end");
2246 
2247   // Check if the first byte of the guard variable is zero.
2248   CGF.EmitCXXGuardedInitBranch(NeedsInit, InitCheckBlock, EndBlock,
2249                                CodeGenFunction::GuardKind::VariableGuard, &D);
2250 
2251   CGF.EmitBlock(InitCheckBlock);
2252 
2253   // Variables used when coping with thread-safe statics and exceptions.
2254   if (threadsafe) {
2255     // Call __cxa_guard_acquire.
2256     llvm::Value *V
2257       = CGF.EmitNounwindRuntimeCall(getGuardAcquireFn(CGM, guardPtrTy), guard);
2258 
2259     llvm::BasicBlock *InitBlock = CGF.createBasicBlock("init");
2260 
2261     Builder.CreateCondBr(Builder.CreateIsNotNull(V, "tobool"),
2262                          InitBlock, EndBlock);
2263 
2264     // Call __cxa_guard_abort along the exceptional edge.
2265     CGF.EHStack.pushCleanup<CallGuardAbort>(EHCleanup, guard);
2266 
2267     CGF.EmitBlock(InitBlock);
2268   }
2269 
2270   // Emit the initializer and add a global destructor if appropriate.
2271   CGF.EmitCXXGlobalVarDeclInit(D, var, shouldPerformInit);
2272 
2273   if (threadsafe) {
2274     // Pop the guard-abort cleanup if we pushed one.
2275     CGF.PopCleanupBlock();
2276 
2277     // Call __cxa_guard_release.  This cannot throw.
2278     CGF.EmitNounwindRuntimeCall(getGuardReleaseFn(CGM, guardPtrTy),
2279                                 guardAddr.getPointer());
2280   } else {
2281     Builder.CreateStore(llvm::ConstantInt::get(guardTy, 1), guardAddr);
2282   }
2283 
2284   CGF.EmitBlock(EndBlock);
2285 }
2286 
2287 /// Register a global destructor using __cxa_atexit.
emitGlobalDtorWithCXAAtExit(CodeGenFunction & CGF,llvm::Constant * dtor,llvm::Constant * addr,bool TLS)2288 static void emitGlobalDtorWithCXAAtExit(CodeGenFunction &CGF,
2289                                         llvm::Constant *dtor,
2290                                         llvm::Constant *addr,
2291                                         bool TLS) {
2292   const char *Name = "__cxa_atexit";
2293   if (TLS) {
2294     const llvm::Triple &T = CGF.getTarget().getTriple();
2295     Name = T.isOSDarwin() ?  "_tlv_atexit" : "__cxa_thread_atexit";
2296   }
2297 
2298   // We're assuming that the destructor function is something we can
2299   // reasonably call with the default CC.  Go ahead and cast it to the
2300   // right prototype.
2301   llvm::Type *dtorTy =
2302     llvm::FunctionType::get(CGF.VoidTy, CGF.Int8PtrTy, false)->getPointerTo();
2303 
2304   // extern "C" int __cxa_atexit(void (*f)(void *), void *p, void *d);
2305   llvm::Type *paramTys[] = { dtorTy, CGF.Int8PtrTy, CGF.Int8PtrTy };
2306   llvm::FunctionType *atexitTy =
2307     llvm::FunctionType::get(CGF.IntTy, paramTys, false);
2308 
2309   // Fetch the actual function.
2310   llvm::Constant *atexit = CGF.CGM.CreateRuntimeFunction(atexitTy, Name);
2311   if (llvm::Function *fn = dyn_cast<llvm::Function>(atexit))
2312     fn->setDoesNotThrow();
2313 
2314   // Create a variable that binds the atexit to this shared object.
2315   llvm::Constant *handle =
2316       CGF.CGM.CreateRuntimeVariable(CGF.Int8Ty, "__dso_handle");
2317   auto *GV = cast<llvm::GlobalValue>(handle->stripPointerCasts());
2318   GV->setVisibility(llvm::GlobalValue::HiddenVisibility);
2319 
2320   if (!addr)
2321     // addr is null when we are trying to register a dtor annotated with
2322     // __attribute__((destructor)) in a constructor function. Using null here is
2323     // okay because this argument is just passed back to the destructor
2324     // function.
2325     addr = llvm::Constant::getNullValue(CGF.Int8PtrTy);
2326 
2327   llvm::Value *args[] = {
2328     llvm::ConstantExpr::getBitCast(dtor, dtorTy),
2329     llvm::ConstantExpr::getBitCast(addr, CGF.Int8PtrTy),
2330     handle
2331   };
2332   CGF.EmitNounwindRuntimeCall(atexit, args);
2333 }
2334 
registerGlobalDtorsWithAtExit()2335 void CodeGenModule::registerGlobalDtorsWithAtExit() {
2336   for (const auto I : DtorsUsingAtExit) {
2337     int Priority = I.first;
2338     const llvm::TinyPtrVector<llvm::Function *> &Dtors = I.second;
2339 
2340     // Create a function that registers destructors that have the same priority.
2341     //
2342     // Since constructor functions are run in non-descending order of their
2343     // priorities, destructors are registered in non-descending order of their
2344     // priorities, and since destructor functions are run in the reverse order
2345     // of their registration, destructor functions are run in non-ascending
2346     // order of their priorities.
2347     CodeGenFunction CGF(*this);
2348     std::string GlobalInitFnName =
2349         std::string("__GLOBAL_init_") + llvm::to_string(Priority);
2350     llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false);
2351     llvm::Function *GlobalInitFn = CreateGlobalInitOrDestructFunction(
2352         FTy, GlobalInitFnName, getTypes().arrangeNullaryFunction(),
2353         SourceLocation());
2354     ASTContext &Ctx = getContext();
2355     QualType ReturnTy = Ctx.VoidTy;
2356     QualType FunctionTy = Ctx.getFunctionType(ReturnTy, llvm::None, {});
2357     FunctionDecl *FD = FunctionDecl::Create(
2358         Ctx, Ctx.getTranslationUnitDecl(), SourceLocation(), SourceLocation(),
2359         &Ctx.Idents.get(GlobalInitFnName), FunctionTy, nullptr, SC_Static,
2360         false, false);
2361     CGF.StartFunction(GlobalDecl(FD), ReturnTy, GlobalInitFn,
2362                       getTypes().arrangeNullaryFunction(), FunctionArgList(),
2363                       SourceLocation(), SourceLocation());
2364 
2365     for (auto *Dtor : Dtors) {
2366       // Register the destructor function calling __cxa_atexit if it is
2367       // available. Otherwise fall back on calling atexit.
2368       if (getCodeGenOpts().CXAAtExit)
2369         emitGlobalDtorWithCXAAtExit(CGF, Dtor, nullptr, false);
2370       else
2371         CGF.registerGlobalDtorWithAtExit(Dtor);
2372     }
2373 
2374     CGF.FinishFunction();
2375     AddGlobalCtor(GlobalInitFn, Priority, nullptr);
2376   }
2377 }
2378 
2379 /// Register a global destructor as best as we know how.
registerGlobalDtor(CodeGenFunction & CGF,const VarDecl & D,llvm::Constant * dtor,llvm::Constant * addr)2380 void ItaniumCXXABI::registerGlobalDtor(CodeGenFunction &CGF,
2381                                        const VarDecl &D,
2382                                        llvm::Constant *dtor,
2383                                        llvm::Constant *addr) {
2384   if (D.isNoDestroy(CGM.getContext()))
2385     return;
2386 
2387   // Use __cxa_atexit if available.
2388   if (CGM.getCodeGenOpts().CXAAtExit)
2389     return emitGlobalDtorWithCXAAtExit(CGF, dtor, addr, D.getTLSKind());
2390 
2391   if (D.getTLSKind())
2392     CGM.ErrorUnsupported(&D, "non-trivial TLS destruction");
2393 
2394   // In Apple kexts, we want to add a global destructor entry.
2395   // FIXME: shouldn't this be guarded by some variable?
2396   if (CGM.getLangOpts().AppleKext) {
2397     // Generate a global destructor entry.
2398     return CGM.AddCXXDtorEntry(dtor, addr);
2399   }
2400 
2401   CGF.registerGlobalDtorWithAtExit(D, dtor, addr);
2402 }
2403 
isThreadWrapperReplaceable(const VarDecl * VD,CodeGen::CodeGenModule & CGM)2404 static bool isThreadWrapperReplaceable(const VarDecl *VD,
2405                                        CodeGen::CodeGenModule &CGM) {
2406   assert(!VD->isStaticLocal() && "static local VarDecls don't need wrappers!");
2407   // Darwin prefers to have references to thread local variables to go through
2408   // the thread wrapper instead of directly referencing the backing variable.
2409   return VD->getTLSKind() == VarDecl::TLS_Dynamic &&
2410          CGM.getTarget().getTriple().isOSDarwin();
2411 }
2412 
2413 /// Get the appropriate linkage for the wrapper function. This is essentially
2414 /// the weak form of the variable's linkage; every translation unit which needs
2415 /// the wrapper emits a copy, and we want the linker to merge them.
2416 static llvm::GlobalValue::LinkageTypes
getThreadLocalWrapperLinkage(const VarDecl * VD,CodeGen::CodeGenModule & CGM)2417 getThreadLocalWrapperLinkage(const VarDecl *VD, CodeGen::CodeGenModule &CGM) {
2418   llvm::GlobalValue::LinkageTypes VarLinkage =
2419       CGM.getLLVMLinkageVarDefinition(VD, /*isConstant=*/false);
2420 
2421   // For internal linkage variables, we don't need an external or weak wrapper.
2422   if (llvm::GlobalValue::isLocalLinkage(VarLinkage))
2423     return VarLinkage;
2424 
2425   // If the thread wrapper is replaceable, give it appropriate linkage.
2426   if (isThreadWrapperReplaceable(VD, CGM))
2427     if (!llvm::GlobalVariable::isLinkOnceLinkage(VarLinkage) &&
2428         !llvm::GlobalVariable::isWeakODRLinkage(VarLinkage))
2429       return VarLinkage;
2430   return llvm::GlobalValue::WeakODRLinkage;
2431 }
2432 
2433 llvm::Function *
getOrCreateThreadLocalWrapper(const VarDecl * VD,llvm::Value * Val)2434 ItaniumCXXABI::getOrCreateThreadLocalWrapper(const VarDecl *VD,
2435                                              llvm::Value *Val) {
2436   // Mangle the name for the thread_local wrapper function.
2437   SmallString<256> WrapperName;
2438   {
2439     llvm::raw_svector_ostream Out(WrapperName);
2440     getMangleContext().mangleItaniumThreadLocalWrapper(VD, Out);
2441   }
2442 
2443   // FIXME: If VD is a definition, we should regenerate the function attributes
2444   // before returning.
2445   if (llvm::Value *V = CGM.getModule().getNamedValue(WrapperName))
2446     return cast<llvm::Function>(V);
2447 
2448   QualType RetQT = VD->getType();
2449   if (RetQT->isReferenceType())
2450     RetQT = RetQT.getNonReferenceType();
2451 
2452   const CGFunctionInfo &FI = CGM.getTypes().arrangeBuiltinFunctionDeclaration(
2453       getContext().getPointerType(RetQT), FunctionArgList());
2454 
2455   llvm::FunctionType *FnTy = CGM.getTypes().GetFunctionType(FI);
2456   llvm::Function *Wrapper =
2457       llvm::Function::Create(FnTy, getThreadLocalWrapperLinkage(VD, CGM),
2458                              WrapperName.str(), &CGM.getModule());
2459 
2460   CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, Wrapper);
2461 
2462   if (VD->hasDefinition())
2463     CGM.SetLLVMFunctionAttributesForDefinition(nullptr, Wrapper);
2464 
2465   // Always resolve references to the wrapper at link time.
2466   if (!Wrapper->hasLocalLinkage())
2467     if (!isThreadWrapperReplaceable(VD, CGM) ||
2468         llvm::GlobalVariable::isLinkOnceLinkage(Wrapper->getLinkage()) ||
2469         llvm::GlobalVariable::isWeakODRLinkage(Wrapper->getLinkage()) ||
2470         VD->getVisibility() == HiddenVisibility)
2471       Wrapper->setVisibility(llvm::GlobalValue::HiddenVisibility);
2472 
2473   if (isThreadWrapperReplaceable(VD, CGM)) {
2474     Wrapper->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
2475     Wrapper->addFnAttr(llvm::Attribute::NoUnwind);
2476   }
2477   return Wrapper;
2478 }
2479 
EmitThreadLocalInitFuncs(CodeGenModule & CGM,ArrayRef<const VarDecl * > CXXThreadLocals,ArrayRef<llvm::Function * > CXXThreadLocalInits,ArrayRef<const VarDecl * > CXXThreadLocalInitVars)2480 void ItaniumCXXABI::EmitThreadLocalInitFuncs(
2481     CodeGenModule &CGM, ArrayRef<const VarDecl *> CXXThreadLocals,
2482     ArrayRef<llvm::Function *> CXXThreadLocalInits,
2483     ArrayRef<const VarDecl *> CXXThreadLocalInitVars) {
2484   llvm::Function *InitFunc = nullptr;
2485 
2486   // Separate initializers into those with ordered (or partially-ordered)
2487   // initialization and those with unordered initialization.
2488   llvm::SmallVector<llvm::Function *, 8> OrderedInits;
2489   llvm::SmallDenseMap<const VarDecl *, llvm::Function *> UnorderedInits;
2490   for (unsigned I = 0; I != CXXThreadLocalInits.size(); ++I) {
2491     if (isTemplateInstantiation(
2492             CXXThreadLocalInitVars[I]->getTemplateSpecializationKind()))
2493       UnorderedInits[CXXThreadLocalInitVars[I]->getCanonicalDecl()] =
2494           CXXThreadLocalInits[I];
2495     else
2496       OrderedInits.push_back(CXXThreadLocalInits[I]);
2497   }
2498 
2499   if (!OrderedInits.empty()) {
2500     // Generate a guarded initialization function.
2501     llvm::FunctionType *FTy =
2502         llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
2503     const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
2504     InitFunc = CGM.CreateGlobalInitOrDestructFunction(FTy, "__tls_init", FI,
2505                                                       SourceLocation(),
2506                                                       /*TLS=*/true);
2507     llvm::GlobalVariable *Guard = new llvm::GlobalVariable(
2508         CGM.getModule(), CGM.Int8Ty, /*isConstant=*/false,
2509         llvm::GlobalVariable::InternalLinkage,
2510         llvm::ConstantInt::get(CGM.Int8Ty, 0), "__tls_guard");
2511     Guard->setThreadLocal(true);
2512 
2513     CharUnits GuardAlign = CharUnits::One();
2514     Guard->setAlignment(GuardAlign.getQuantity());
2515 
2516     CodeGenFunction(CGM).GenerateCXXGlobalInitFunc(
2517         InitFunc, OrderedInits, ConstantAddress(Guard, GuardAlign));
2518     // On Darwin platforms, use CXX_FAST_TLS calling convention.
2519     if (CGM.getTarget().getTriple().isOSDarwin()) {
2520       InitFunc->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
2521       InitFunc->addFnAttr(llvm::Attribute::NoUnwind);
2522     }
2523   }
2524 
2525   // Emit thread wrappers.
2526   for (const VarDecl *VD : CXXThreadLocals) {
2527     llvm::GlobalVariable *Var =
2528         cast<llvm::GlobalVariable>(CGM.GetGlobalValue(CGM.getMangledName(VD)));
2529     llvm::Function *Wrapper = getOrCreateThreadLocalWrapper(VD, Var);
2530 
2531     // Some targets require that all access to thread local variables go through
2532     // the thread wrapper.  This means that we cannot attempt to create a thread
2533     // wrapper or a thread helper.
2534     if (isThreadWrapperReplaceable(VD, CGM) && !VD->hasDefinition()) {
2535       Wrapper->setLinkage(llvm::Function::ExternalLinkage);
2536       continue;
2537     }
2538 
2539     // Mangle the name for the thread_local initialization function.
2540     SmallString<256> InitFnName;
2541     {
2542       llvm::raw_svector_ostream Out(InitFnName);
2543       getMangleContext().mangleItaniumThreadLocalInit(VD, Out);
2544     }
2545 
2546     // If we have a definition for the variable, emit the initialization
2547     // function as an alias to the global Init function (if any). Otherwise,
2548     // produce a declaration of the initialization function.
2549     llvm::GlobalValue *Init = nullptr;
2550     bool InitIsInitFunc = false;
2551     if (VD->hasDefinition()) {
2552       InitIsInitFunc = true;
2553       llvm::Function *InitFuncToUse = InitFunc;
2554       if (isTemplateInstantiation(VD->getTemplateSpecializationKind()))
2555         InitFuncToUse = UnorderedInits.lookup(VD->getCanonicalDecl());
2556       if (InitFuncToUse)
2557         Init = llvm::GlobalAlias::create(Var->getLinkage(), InitFnName.str(),
2558                                          InitFuncToUse);
2559     } else {
2560       // Emit a weak global function referring to the initialization function.
2561       // This function will not exist if the TU defining the thread_local
2562       // variable in question does not need any dynamic initialization for
2563       // its thread_local variables.
2564       llvm::FunctionType *FnTy = llvm::FunctionType::get(CGM.VoidTy, false);
2565       Init = llvm::Function::Create(FnTy,
2566                                     llvm::GlobalVariable::ExternalWeakLinkage,
2567                                     InitFnName.str(), &CGM.getModule());
2568       const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
2569       CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI,
2570                                     cast<llvm::Function>(Init));
2571     }
2572 
2573     if (Init) {
2574       Init->setVisibility(Var->getVisibility());
2575       Init->setDSOLocal(Var->isDSOLocal());
2576     }
2577 
2578     llvm::LLVMContext &Context = CGM.getModule().getContext();
2579     llvm::BasicBlock *Entry = llvm::BasicBlock::Create(Context, "", Wrapper);
2580     CGBuilderTy Builder(CGM, Entry);
2581     if (InitIsInitFunc) {
2582       if (Init) {
2583         llvm::CallInst *CallVal = Builder.CreateCall(Init);
2584         if (isThreadWrapperReplaceable(VD, CGM)) {
2585           CallVal->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
2586           llvm::Function *Fn =
2587               cast<llvm::Function>(cast<llvm::GlobalAlias>(Init)->getAliasee());
2588           Fn->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
2589         }
2590       }
2591     } else {
2592       // Don't know whether we have an init function. Call it if it exists.
2593       llvm::Value *Have = Builder.CreateIsNotNull(Init);
2594       llvm::BasicBlock *InitBB = llvm::BasicBlock::Create(Context, "", Wrapper);
2595       llvm::BasicBlock *ExitBB = llvm::BasicBlock::Create(Context, "", Wrapper);
2596       Builder.CreateCondBr(Have, InitBB, ExitBB);
2597 
2598       Builder.SetInsertPoint(InitBB);
2599       Builder.CreateCall(Init);
2600       Builder.CreateBr(ExitBB);
2601 
2602       Builder.SetInsertPoint(ExitBB);
2603     }
2604 
2605     // For a reference, the result of the wrapper function is a pointer to
2606     // the referenced object.
2607     llvm::Value *Val = Var;
2608     if (VD->getType()->isReferenceType()) {
2609       CharUnits Align = CGM.getContext().getDeclAlign(VD);
2610       Val = Builder.CreateAlignedLoad(Val, Align);
2611     }
2612     if (Val->getType() != Wrapper->getReturnType())
2613       Val = Builder.CreatePointerBitCastOrAddrSpaceCast(
2614           Val, Wrapper->getReturnType(), "");
2615     Builder.CreateRet(Val);
2616   }
2617 }
2618 
EmitThreadLocalVarDeclLValue(CodeGenFunction & CGF,const VarDecl * VD,QualType LValType)2619 LValue ItaniumCXXABI::EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF,
2620                                                    const VarDecl *VD,
2621                                                    QualType LValType) {
2622   llvm::Value *Val = CGF.CGM.GetAddrOfGlobalVar(VD);
2623   llvm::Function *Wrapper = getOrCreateThreadLocalWrapper(VD, Val);
2624 
2625   llvm::CallInst *CallVal = CGF.Builder.CreateCall(Wrapper);
2626   CallVal->setCallingConv(Wrapper->getCallingConv());
2627 
2628   LValue LV;
2629   if (VD->getType()->isReferenceType())
2630     LV = CGF.MakeNaturalAlignAddrLValue(CallVal, LValType);
2631   else
2632     LV = CGF.MakeAddrLValue(CallVal, LValType,
2633                             CGF.getContext().getDeclAlign(VD));
2634   // FIXME: need setObjCGCLValueClass?
2635   return LV;
2636 }
2637 
2638 /// Return whether the given global decl needs a VTT parameter, which it does
2639 /// if it's a base constructor or destructor with virtual bases.
NeedsVTTParameter(GlobalDecl GD)2640 bool ItaniumCXXABI::NeedsVTTParameter(GlobalDecl GD) {
2641   const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
2642 
2643   // We don't have any virtual bases, just return early.
2644   if (!MD->getParent()->getNumVBases())
2645     return false;
2646 
2647   // Check if we have a base constructor.
2648   if (isa<CXXConstructorDecl>(MD) && GD.getCtorType() == Ctor_Base)
2649     return true;
2650 
2651   // Check if we have a base destructor.
2652   if (isa<CXXDestructorDecl>(MD) && GD.getDtorType() == Dtor_Base)
2653     return true;
2654 
2655   return false;
2656 }
2657 
2658 namespace {
2659 class ItaniumRTTIBuilder {
2660   CodeGenModule &CGM;  // Per-module state.
2661   llvm::LLVMContext &VMContext;
2662   const ItaniumCXXABI &CXXABI;  // Per-module state.
2663 
2664   /// Fields - The fields of the RTTI descriptor currently being built.
2665   SmallVector<llvm::Constant *, 16> Fields;
2666 
2667   /// GetAddrOfTypeName - Returns the mangled type name of the given type.
2668   llvm::GlobalVariable *
2669   GetAddrOfTypeName(QualType Ty, llvm::GlobalVariable::LinkageTypes Linkage);
2670 
2671   /// GetAddrOfExternalRTTIDescriptor - Returns the constant for the RTTI
2672   /// descriptor of the given type.
2673   llvm::Constant *GetAddrOfExternalRTTIDescriptor(QualType Ty);
2674 
2675   /// BuildVTablePointer - Build the vtable pointer for the given type.
2676   void BuildVTablePointer(const Type *Ty);
2677 
2678   /// BuildSIClassTypeInfo - Build an abi::__si_class_type_info, used for single
2679   /// inheritance, according to the Itanium C++ ABI, 2.9.5p6b.
2680   void BuildSIClassTypeInfo(const CXXRecordDecl *RD);
2681 
2682   /// BuildVMIClassTypeInfo - Build an abi::__vmi_class_type_info, used for
2683   /// classes with bases that do not satisfy the abi::__si_class_type_info
2684   /// constraints, according ti the Itanium C++ ABI, 2.9.5p5c.
2685   void BuildVMIClassTypeInfo(const CXXRecordDecl *RD);
2686 
2687   /// BuildPointerTypeInfo - Build an abi::__pointer_type_info struct, used
2688   /// for pointer types.
2689   void BuildPointerTypeInfo(QualType PointeeTy);
2690 
2691   /// BuildObjCObjectTypeInfo - Build the appropriate kind of
2692   /// type_info for an object type.
2693   void BuildObjCObjectTypeInfo(const ObjCObjectType *Ty);
2694 
2695   /// BuildPointerToMemberTypeInfo - Build an abi::__pointer_to_member_type_info
2696   /// struct, used for member pointer types.
2697   void BuildPointerToMemberTypeInfo(const MemberPointerType *Ty);
2698 
2699 public:
ItaniumRTTIBuilder(const ItaniumCXXABI & ABI)2700   ItaniumRTTIBuilder(const ItaniumCXXABI &ABI)
2701       : CGM(ABI.CGM), VMContext(CGM.getModule().getContext()), CXXABI(ABI) {}
2702 
2703   // Pointer type info flags.
2704   enum {
2705     /// PTI_Const - Type has const qualifier.
2706     PTI_Const = 0x1,
2707 
2708     /// PTI_Volatile - Type has volatile qualifier.
2709     PTI_Volatile = 0x2,
2710 
2711     /// PTI_Restrict - Type has restrict qualifier.
2712     PTI_Restrict = 0x4,
2713 
2714     /// PTI_Incomplete - Type is incomplete.
2715     PTI_Incomplete = 0x8,
2716 
2717     /// PTI_ContainingClassIncomplete - Containing class is incomplete.
2718     /// (in pointer to member).
2719     PTI_ContainingClassIncomplete = 0x10,
2720 
2721     /// PTI_TransactionSafe - Pointee is transaction_safe function (C++ TM TS).
2722     //PTI_TransactionSafe = 0x20,
2723 
2724     /// PTI_Noexcept - Pointee is noexcept function (C++1z).
2725     PTI_Noexcept = 0x40,
2726   };
2727 
2728   // VMI type info flags.
2729   enum {
2730     /// VMI_NonDiamondRepeat - Class has non-diamond repeated inheritance.
2731     VMI_NonDiamondRepeat = 0x1,
2732 
2733     /// VMI_DiamondShaped - Class is diamond shaped.
2734     VMI_DiamondShaped = 0x2
2735   };
2736 
2737   // Base class type info flags.
2738   enum {
2739     /// BCTI_Virtual - Base class is virtual.
2740     BCTI_Virtual = 0x1,
2741 
2742     /// BCTI_Public - Base class is public.
2743     BCTI_Public = 0x2
2744   };
2745 
2746   /// BuildTypeInfo - Build the RTTI type info struct for the given type, or
2747   /// link to an existing RTTI descriptor if one already exists.
2748   llvm::Constant *BuildTypeInfo(QualType Ty);
2749 
2750   /// BuildTypeInfo - Build the RTTI type info struct for the given type.
2751   llvm::Constant *BuildTypeInfo(
2752       QualType Ty,
2753       llvm::GlobalVariable::LinkageTypes Linkage,
2754       llvm::GlobalValue::VisibilityTypes Visibility,
2755       llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass);
2756 };
2757 }
2758 
GetAddrOfTypeName(QualType Ty,llvm::GlobalVariable::LinkageTypes Linkage)2759 llvm::GlobalVariable *ItaniumRTTIBuilder::GetAddrOfTypeName(
2760     QualType Ty, llvm::GlobalVariable::LinkageTypes Linkage) {
2761   SmallString<256> Name;
2762   llvm::raw_svector_ostream Out(Name);
2763   CGM.getCXXABI().getMangleContext().mangleCXXRTTIName(Ty, Out);
2764 
2765   // We know that the mangled name of the type starts at index 4 of the
2766   // mangled name of the typename, so we can just index into it in order to
2767   // get the mangled name of the type.
2768   llvm::Constant *Init = llvm::ConstantDataArray::getString(VMContext,
2769                                                             Name.substr(4));
2770   auto Align = CGM.getContext().getTypeAlignInChars(CGM.getContext().CharTy);
2771 
2772   llvm::GlobalVariable *GV = CGM.CreateOrReplaceCXXRuntimeVariable(
2773       Name, Init->getType(), Linkage, Align.getQuantity());
2774 
2775   GV->setInitializer(Init);
2776 
2777   return GV;
2778 }
2779 
2780 llvm::Constant *
GetAddrOfExternalRTTIDescriptor(QualType Ty)2781 ItaniumRTTIBuilder::GetAddrOfExternalRTTIDescriptor(QualType Ty) {
2782   // Mangle the RTTI name.
2783   SmallString<256> Name;
2784   llvm::raw_svector_ostream Out(Name);
2785   CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out);
2786 
2787   // Look for an existing global.
2788   llvm::GlobalVariable *GV = CGM.getModule().getNamedGlobal(Name);
2789 
2790   if (!GV) {
2791     // Create a new global variable.
2792     // Note for the future: If we would ever like to do deferred emission of
2793     // RTTI, check if emitting vtables opportunistically need any adjustment.
2794 
2795     GV = new llvm::GlobalVariable(CGM.getModule(), CGM.Int8PtrTy,
2796                                   /*Constant=*/true,
2797                                   llvm::GlobalValue::ExternalLinkage, nullptr,
2798                                   Name);
2799     const CXXRecordDecl *RD = Ty->getAsCXXRecordDecl();
2800     CGM.setGVProperties(GV, RD);
2801   }
2802 
2803   return llvm::ConstantExpr::getBitCast(GV, CGM.Int8PtrTy);
2804 }
2805 
2806 /// TypeInfoIsInStandardLibrary - Given a builtin type, returns whether the type
2807 /// info for that type is defined in the standard library.
TypeInfoIsInStandardLibrary(const BuiltinType * Ty)2808 static bool TypeInfoIsInStandardLibrary(const BuiltinType *Ty) {
2809   // Itanium C++ ABI 2.9.2:
2810   //   Basic type information (e.g. for "int", "bool", etc.) will be kept in
2811   //   the run-time support library. Specifically, the run-time support
2812   //   library should contain type_info objects for the types X, X* and
2813   //   X const*, for every X in: void, std::nullptr_t, bool, wchar_t, char,
2814   //   unsigned char, signed char, short, unsigned short, int, unsigned int,
2815   //   long, unsigned long, long long, unsigned long long, float, double,
2816   //   long double, char16_t, char32_t, and the IEEE 754r decimal and
2817   //   half-precision floating point types.
2818   //
2819   // GCC also emits RTTI for __int128.
2820   // FIXME: We do not emit RTTI information for decimal types here.
2821 
2822   // Types added here must also be added to EmitFundamentalRTTIDescriptors.
2823   switch (Ty->getKind()) {
2824     case BuiltinType::Void:
2825     case BuiltinType::NullPtr:
2826     case BuiltinType::Bool:
2827     case BuiltinType::WChar_S:
2828     case BuiltinType::WChar_U:
2829     case BuiltinType::Char_U:
2830     case BuiltinType::Char_S:
2831     case BuiltinType::UChar:
2832     case BuiltinType::SChar:
2833     case BuiltinType::Short:
2834     case BuiltinType::UShort:
2835     case BuiltinType::Int:
2836     case BuiltinType::UInt:
2837     case BuiltinType::Long:
2838     case BuiltinType::ULong:
2839     case BuiltinType::LongLong:
2840     case BuiltinType::ULongLong:
2841     case BuiltinType::Half:
2842     case BuiltinType::Float:
2843     case BuiltinType::Double:
2844     case BuiltinType::LongDouble:
2845     case BuiltinType::Float16:
2846     case BuiltinType::Float128:
2847     case BuiltinType::Char8:
2848     case BuiltinType::Char16:
2849     case BuiltinType::Char32:
2850     case BuiltinType::Int128:
2851     case BuiltinType::UInt128:
2852       return true;
2853 
2854 #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
2855     case BuiltinType::Id:
2856 #include "clang/Basic/OpenCLImageTypes.def"
2857 #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
2858     case BuiltinType::Id:
2859 #include "clang/Basic/OpenCLExtensionTypes.def"
2860     case BuiltinType::OCLSampler:
2861     case BuiltinType::OCLEvent:
2862     case BuiltinType::OCLClkEvent:
2863     case BuiltinType::OCLQueue:
2864     case BuiltinType::OCLReserveID:
2865     case BuiltinType::ShortAccum:
2866     case BuiltinType::Accum:
2867     case BuiltinType::LongAccum:
2868     case BuiltinType::UShortAccum:
2869     case BuiltinType::UAccum:
2870     case BuiltinType::ULongAccum:
2871     case BuiltinType::ShortFract:
2872     case BuiltinType::Fract:
2873     case BuiltinType::LongFract:
2874     case BuiltinType::UShortFract:
2875     case BuiltinType::UFract:
2876     case BuiltinType::ULongFract:
2877     case BuiltinType::SatShortAccum:
2878     case BuiltinType::SatAccum:
2879     case BuiltinType::SatLongAccum:
2880     case BuiltinType::SatUShortAccum:
2881     case BuiltinType::SatUAccum:
2882     case BuiltinType::SatULongAccum:
2883     case BuiltinType::SatShortFract:
2884     case BuiltinType::SatFract:
2885     case BuiltinType::SatLongFract:
2886     case BuiltinType::SatUShortFract:
2887     case BuiltinType::SatUFract:
2888     case BuiltinType::SatULongFract:
2889       return false;
2890 
2891     case BuiltinType::Dependent:
2892 #define BUILTIN_TYPE(Id, SingletonId)
2893 #define PLACEHOLDER_TYPE(Id, SingletonId) \
2894     case BuiltinType::Id:
2895 #include "clang/AST/BuiltinTypes.def"
2896       llvm_unreachable("asking for RRTI for a placeholder type!");
2897 
2898     case BuiltinType::ObjCId:
2899     case BuiltinType::ObjCClass:
2900     case BuiltinType::ObjCSel:
2901       llvm_unreachable("FIXME: Objective-C types are unsupported!");
2902   }
2903 
2904   llvm_unreachable("Invalid BuiltinType Kind!");
2905 }
2906 
TypeInfoIsInStandardLibrary(const PointerType * PointerTy)2907 static bool TypeInfoIsInStandardLibrary(const PointerType *PointerTy) {
2908   QualType PointeeTy = PointerTy->getPointeeType();
2909   const BuiltinType *BuiltinTy = dyn_cast<BuiltinType>(PointeeTy);
2910   if (!BuiltinTy)
2911     return false;
2912 
2913   // Check the qualifiers.
2914   Qualifiers Quals = PointeeTy.getQualifiers();
2915   Quals.removeConst();
2916 
2917   if (!Quals.empty())
2918     return false;
2919 
2920   return TypeInfoIsInStandardLibrary(BuiltinTy);
2921 }
2922 
2923 /// IsStandardLibraryRTTIDescriptor - Returns whether the type
2924 /// information for the given type exists in the standard library.
IsStandardLibraryRTTIDescriptor(QualType Ty)2925 static bool IsStandardLibraryRTTIDescriptor(QualType Ty) {
2926   // Type info for builtin types is defined in the standard library.
2927   if (const BuiltinType *BuiltinTy = dyn_cast<BuiltinType>(Ty))
2928     return TypeInfoIsInStandardLibrary(BuiltinTy);
2929 
2930   // Type info for some pointer types to builtin types is defined in the
2931   // standard library.
2932   if (const PointerType *PointerTy = dyn_cast<PointerType>(Ty))
2933     return TypeInfoIsInStandardLibrary(PointerTy);
2934 
2935   return false;
2936 }
2937 
2938 /// ShouldUseExternalRTTIDescriptor - Returns whether the type information for
2939 /// the given type exists somewhere else, and that we should not emit the type
2940 /// information in this translation unit.  Assumes that it is not a
2941 /// standard-library type.
ShouldUseExternalRTTIDescriptor(CodeGenModule & CGM,QualType Ty)2942 static bool ShouldUseExternalRTTIDescriptor(CodeGenModule &CGM,
2943                                             QualType Ty) {
2944   ASTContext &Context = CGM.getContext();
2945 
2946   // If RTTI is disabled, assume it might be disabled in the
2947   // translation unit that defines any potential key function, too.
2948   if (!Context.getLangOpts().RTTI) return false;
2949 
2950   if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) {
2951     const CXXRecordDecl *RD = cast<CXXRecordDecl>(RecordTy->getDecl());
2952     if (!RD->hasDefinition())
2953       return false;
2954 
2955     if (!RD->isDynamicClass())
2956       return false;
2957 
2958     // FIXME: this may need to be reconsidered if the key function
2959     // changes.
2960     // N.B. We must always emit the RTTI data ourselves if there exists a key
2961     // function.
2962     bool IsDLLImport = RD->hasAttr<DLLImportAttr>();
2963 
2964     // Don't import the RTTI but emit it locally.
2965     if (CGM.getTriple().isWindowsGNUEnvironment() && IsDLLImport)
2966       return false;
2967 
2968     if (CGM.getVTables().isVTableExternal(RD))
2969       return IsDLLImport && !CGM.getTriple().isWindowsItaniumEnvironment()
2970                  ? false
2971                  : true;
2972 
2973     if (IsDLLImport)
2974       return true;
2975   }
2976 
2977   return false;
2978 }
2979 
2980 /// IsIncompleteClassType - Returns whether the given record type is incomplete.
IsIncompleteClassType(const RecordType * RecordTy)2981 static bool IsIncompleteClassType(const RecordType *RecordTy) {
2982   return !RecordTy->getDecl()->isCompleteDefinition();
2983 }
2984 
2985 /// ContainsIncompleteClassType - Returns whether the given type contains an
2986 /// incomplete class type. This is true if
2987 ///
2988 ///   * The given type is an incomplete class type.
2989 ///   * The given type is a pointer type whose pointee type contains an
2990 ///     incomplete class type.
2991 ///   * The given type is a member pointer type whose class is an incomplete
2992 ///     class type.
2993 ///   * The given type is a member pointer type whoise pointee type contains an
2994 ///     incomplete class type.
2995 /// is an indirect or direct pointer to an incomplete class type.
ContainsIncompleteClassType(QualType Ty)2996 static bool ContainsIncompleteClassType(QualType Ty) {
2997   if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) {
2998     if (IsIncompleteClassType(RecordTy))
2999       return true;
3000   }
3001 
3002   if (const PointerType *PointerTy = dyn_cast<PointerType>(Ty))
3003     return ContainsIncompleteClassType(PointerTy->getPointeeType());
3004 
3005   if (const MemberPointerType *MemberPointerTy =
3006       dyn_cast<MemberPointerType>(Ty)) {
3007     // Check if the class type is incomplete.
3008     const RecordType *ClassType = cast<RecordType>(MemberPointerTy->getClass());
3009     if (IsIncompleteClassType(ClassType))
3010       return true;
3011 
3012     return ContainsIncompleteClassType(MemberPointerTy->getPointeeType());
3013   }
3014 
3015   return false;
3016 }
3017 
3018 // CanUseSingleInheritance - Return whether the given record decl has a "single,
3019 // public, non-virtual base at offset zero (i.e. the derived class is dynamic
3020 // iff the base is)", according to Itanium C++ ABI, 2.95p6b.
CanUseSingleInheritance(const CXXRecordDecl * RD)3021 static bool CanUseSingleInheritance(const CXXRecordDecl *RD) {
3022   // Check the number of bases.
3023   if (RD->getNumBases() != 1)
3024     return false;
3025 
3026   // Get the base.
3027   CXXRecordDecl::base_class_const_iterator Base = RD->bases_begin();
3028 
3029   // Check that the base is not virtual.
3030   if (Base->isVirtual())
3031     return false;
3032 
3033   // Check that the base is public.
3034   if (Base->getAccessSpecifier() != AS_public)
3035     return false;
3036 
3037   // Check that the class is dynamic iff the base is.
3038   const CXXRecordDecl *BaseDecl =
3039     cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl());
3040   if (!BaseDecl->isEmpty() &&
3041       BaseDecl->isDynamicClass() != RD->isDynamicClass())
3042     return false;
3043 
3044   return true;
3045 }
3046 
BuildVTablePointer(const Type * Ty)3047 void ItaniumRTTIBuilder::BuildVTablePointer(const Type *Ty) {
3048   // abi::__class_type_info.
3049   static const char * const ClassTypeInfo =
3050     "_ZTVN10__cxxabiv117__class_type_infoE";
3051   // abi::__si_class_type_info.
3052   static const char * const SIClassTypeInfo =
3053     "_ZTVN10__cxxabiv120__si_class_type_infoE";
3054   // abi::__vmi_class_type_info.
3055   static const char * const VMIClassTypeInfo =
3056     "_ZTVN10__cxxabiv121__vmi_class_type_infoE";
3057 
3058   const char *VTableName = nullptr;
3059 
3060   switch (Ty->getTypeClass()) {
3061 #define TYPE(Class, Base)
3062 #define ABSTRACT_TYPE(Class, Base)
3063 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
3064 #define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
3065 #define DEPENDENT_TYPE(Class, Base) case Type::Class:
3066 #include "clang/AST/TypeNodes.def"
3067     llvm_unreachable("Non-canonical and dependent types shouldn't get here");
3068 
3069   case Type::LValueReference:
3070   case Type::RValueReference:
3071     llvm_unreachable("References shouldn't get here");
3072 
3073   case Type::Auto:
3074   case Type::DeducedTemplateSpecialization:
3075     llvm_unreachable("Undeduced type shouldn't get here");
3076 
3077   case Type::Pipe:
3078     llvm_unreachable("Pipe types shouldn't get here");
3079 
3080   case Type::Builtin:
3081   // GCC treats vector and complex types as fundamental types.
3082   case Type::Vector:
3083   case Type::ExtVector:
3084   case Type::Complex:
3085   case Type::Atomic:
3086   // FIXME: GCC treats block pointers as fundamental types?!
3087   case Type::BlockPointer:
3088     // abi::__fundamental_type_info.
3089     VTableName = "_ZTVN10__cxxabiv123__fundamental_type_infoE";
3090     break;
3091 
3092   case Type::ConstantArray:
3093   case Type::IncompleteArray:
3094   case Type::VariableArray:
3095     // abi::__array_type_info.
3096     VTableName = "_ZTVN10__cxxabiv117__array_type_infoE";
3097     break;
3098 
3099   case Type::FunctionNoProto:
3100   case Type::FunctionProto:
3101     // abi::__function_type_info.
3102     VTableName = "_ZTVN10__cxxabiv120__function_type_infoE";
3103     break;
3104 
3105   case Type::Enum:
3106     // abi::__enum_type_info.
3107     VTableName = "_ZTVN10__cxxabiv116__enum_type_infoE";
3108     break;
3109 
3110   case Type::Record: {
3111     const CXXRecordDecl *RD =
3112       cast<CXXRecordDecl>(cast<RecordType>(Ty)->getDecl());
3113 
3114     if (!RD->hasDefinition() || !RD->getNumBases()) {
3115       VTableName = ClassTypeInfo;
3116     } else if (CanUseSingleInheritance(RD)) {
3117       VTableName = SIClassTypeInfo;
3118     } else {
3119       VTableName = VMIClassTypeInfo;
3120     }
3121 
3122     break;
3123   }
3124 
3125   case Type::ObjCObject:
3126     // Ignore protocol qualifiers.
3127     Ty = cast<ObjCObjectType>(Ty)->getBaseType().getTypePtr();
3128 
3129     // Handle id and Class.
3130     if (isa<BuiltinType>(Ty)) {
3131       VTableName = ClassTypeInfo;
3132       break;
3133     }
3134 
3135     assert(isa<ObjCInterfaceType>(Ty));
3136     LLVM_FALLTHROUGH;
3137 
3138   case Type::ObjCInterface:
3139     if (cast<ObjCInterfaceType>(Ty)->getDecl()->getSuperClass()) {
3140       VTableName = SIClassTypeInfo;
3141     } else {
3142       VTableName = ClassTypeInfo;
3143     }
3144     break;
3145 
3146   case Type::ObjCObjectPointer:
3147   case Type::Pointer:
3148     // abi::__pointer_type_info.
3149     VTableName = "_ZTVN10__cxxabiv119__pointer_type_infoE";
3150     break;
3151 
3152   case Type::MemberPointer:
3153     // abi::__pointer_to_member_type_info.
3154     VTableName = "_ZTVN10__cxxabiv129__pointer_to_member_type_infoE";
3155     break;
3156   }
3157 
3158   llvm::Constant *VTable =
3159     CGM.getModule().getOrInsertGlobal(VTableName, CGM.Int8PtrTy);
3160   CGM.setDSOLocal(cast<llvm::GlobalValue>(VTable->stripPointerCasts()));
3161 
3162   llvm::Type *PtrDiffTy =
3163     CGM.getTypes().ConvertType(CGM.getContext().getPointerDiffType());
3164 
3165   // The vtable address point is 2.
3166   llvm::Constant *Two = llvm::ConstantInt::get(PtrDiffTy, 2);
3167   VTable =
3168       llvm::ConstantExpr::getInBoundsGetElementPtr(CGM.Int8PtrTy, VTable, Two);
3169   VTable = llvm::ConstantExpr::getBitCast(VTable, CGM.Int8PtrTy);
3170 
3171   Fields.push_back(VTable);
3172 }
3173 
3174 /// Return the linkage that the type info and type info name constants
3175 /// should have for the given type.
getTypeInfoLinkage(CodeGenModule & CGM,QualType Ty)3176 static llvm::GlobalVariable::LinkageTypes getTypeInfoLinkage(CodeGenModule &CGM,
3177                                                              QualType Ty) {
3178   // Itanium C++ ABI 2.9.5p7:
3179   //   In addition, it and all of the intermediate abi::__pointer_type_info
3180   //   structs in the chain down to the abi::__class_type_info for the
3181   //   incomplete class type must be prevented from resolving to the
3182   //   corresponding type_info structs for the complete class type, possibly
3183   //   by making them local static objects. Finally, a dummy class RTTI is
3184   //   generated for the incomplete type that will not resolve to the final
3185   //   complete class RTTI (because the latter need not exist), possibly by
3186   //   making it a local static object.
3187   if (ContainsIncompleteClassType(Ty))
3188     return llvm::GlobalValue::InternalLinkage;
3189 
3190   switch (Ty->getLinkage()) {
3191   case NoLinkage:
3192   case InternalLinkage:
3193   case UniqueExternalLinkage:
3194     return llvm::GlobalValue::InternalLinkage;
3195 
3196   case VisibleNoLinkage:
3197   case ModuleInternalLinkage:
3198   case ModuleLinkage:
3199   case ExternalLinkage:
3200     // RTTI is not enabled, which means that this type info struct is going
3201     // to be used for exception handling. Give it linkonce_odr linkage.
3202     if (!CGM.getLangOpts().RTTI)
3203       return llvm::GlobalValue::LinkOnceODRLinkage;
3204 
3205     if (const RecordType *Record = dyn_cast<RecordType>(Ty)) {
3206       const CXXRecordDecl *RD = cast<CXXRecordDecl>(Record->getDecl());
3207       if (RD->hasAttr<WeakAttr>())
3208         return llvm::GlobalValue::WeakODRLinkage;
3209       if (CGM.getTriple().isWindowsItaniumEnvironment())
3210         if (RD->hasAttr<DLLImportAttr>() &&
3211             ShouldUseExternalRTTIDescriptor(CGM, Ty))
3212           return llvm::GlobalValue::ExternalLinkage;
3213       // MinGW always uses LinkOnceODRLinkage for type info.
3214       if (RD->isDynamicClass() &&
3215           !CGM.getContext()
3216                .getTargetInfo()
3217                .getTriple()
3218                .isWindowsGNUEnvironment())
3219         return CGM.getVTableLinkage(RD);
3220     }
3221 
3222     return llvm::GlobalValue::LinkOnceODRLinkage;
3223   }
3224 
3225   llvm_unreachable("Invalid linkage!");
3226 }
3227 
BuildTypeInfo(QualType Ty)3228 llvm::Constant *ItaniumRTTIBuilder::BuildTypeInfo(QualType Ty) {
3229   // We want to operate on the canonical type.
3230   Ty = Ty.getCanonicalType();
3231 
3232   // Check if we've already emitted an RTTI descriptor for this type.
3233   SmallString<256> Name;
3234   llvm::raw_svector_ostream Out(Name);
3235   CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out);
3236 
3237   llvm::GlobalVariable *OldGV = CGM.getModule().getNamedGlobal(Name);
3238   if (OldGV && !OldGV->isDeclaration()) {
3239     assert(!OldGV->hasAvailableExternallyLinkage() &&
3240            "available_externally typeinfos not yet implemented");
3241 
3242     return llvm::ConstantExpr::getBitCast(OldGV, CGM.Int8PtrTy);
3243   }
3244 
3245   // Check if there is already an external RTTI descriptor for this type.
3246   if (IsStandardLibraryRTTIDescriptor(Ty) ||
3247       ShouldUseExternalRTTIDescriptor(CGM, Ty))
3248     return GetAddrOfExternalRTTIDescriptor(Ty);
3249 
3250   // Emit the standard library with external linkage.
3251   llvm::GlobalVariable::LinkageTypes Linkage = getTypeInfoLinkage(CGM, Ty);
3252 
3253   // Give the type_info object and name the formal visibility of the
3254   // type itself.
3255   llvm::GlobalValue::VisibilityTypes llvmVisibility;
3256   if (llvm::GlobalValue::isLocalLinkage(Linkage))
3257     // If the linkage is local, only default visibility makes sense.
3258     llvmVisibility = llvm::GlobalValue::DefaultVisibility;
3259   else if (CXXABI.classifyRTTIUniqueness(Ty, Linkage) ==
3260            ItaniumCXXABI::RUK_NonUniqueHidden)
3261     llvmVisibility = llvm::GlobalValue::HiddenVisibility;
3262   else
3263     llvmVisibility = CodeGenModule::GetLLVMVisibility(Ty->getVisibility());
3264 
3265   llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass =
3266       llvm::GlobalValue::DefaultStorageClass;
3267   if (CGM.getTriple().isWindowsItaniumEnvironment()) {
3268     auto RD = Ty->getAsCXXRecordDecl();
3269     if (RD && RD->hasAttr<DLLExportAttr>())
3270       DLLStorageClass = llvm::GlobalValue::DLLExportStorageClass;
3271   }
3272 
3273   return BuildTypeInfo(Ty, Linkage, llvmVisibility, DLLStorageClass);
3274 }
3275 
BuildTypeInfo(QualType Ty,llvm::GlobalVariable::LinkageTypes Linkage,llvm::GlobalValue::VisibilityTypes Visibility,llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass)3276 llvm::Constant *ItaniumRTTIBuilder::BuildTypeInfo(
3277       QualType Ty,
3278       llvm::GlobalVariable::LinkageTypes Linkage,
3279       llvm::GlobalValue::VisibilityTypes Visibility,
3280       llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass) {
3281   // Add the vtable pointer.
3282   BuildVTablePointer(cast<Type>(Ty));
3283 
3284   // And the name.
3285   llvm::GlobalVariable *TypeName = GetAddrOfTypeName(Ty, Linkage);
3286   llvm::Constant *TypeNameField;
3287 
3288   // If we're supposed to demote the visibility, be sure to set a flag
3289   // to use a string comparison for type_info comparisons.
3290   ItaniumCXXABI::RTTIUniquenessKind RTTIUniqueness =
3291       CXXABI.classifyRTTIUniqueness(Ty, Linkage);
3292   if (RTTIUniqueness != ItaniumCXXABI::RUK_Unique) {
3293     // The flag is the sign bit, which on ARM64 is defined to be clear
3294     // for global pointers.  This is very ARM64-specific.
3295     TypeNameField = llvm::ConstantExpr::getPtrToInt(TypeName, CGM.Int64Ty);
3296     llvm::Constant *flag =
3297         llvm::ConstantInt::get(CGM.Int64Ty, ((uint64_t)1) << 63);
3298     TypeNameField = llvm::ConstantExpr::getAdd(TypeNameField, flag);
3299     TypeNameField =
3300         llvm::ConstantExpr::getIntToPtr(TypeNameField, CGM.Int8PtrTy);
3301   } else {
3302     TypeNameField = llvm::ConstantExpr::getBitCast(TypeName, CGM.Int8PtrTy);
3303   }
3304   Fields.push_back(TypeNameField);
3305 
3306   switch (Ty->getTypeClass()) {
3307 #define TYPE(Class, Base)
3308 #define ABSTRACT_TYPE(Class, Base)
3309 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
3310 #define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
3311 #define DEPENDENT_TYPE(Class, Base) case Type::Class:
3312 #include "clang/AST/TypeNodes.def"
3313     llvm_unreachable("Non-canonical and dependent types shouldn't get here");
3314 
3315   // GCC treats vector types as fundamental types.
3316   case Type::Builtin:
3317   case Type::Vector:
3318   case Type::ExtVector:
3319   case Type::Complex:
3320   case Type::BlockPointer:
3321     // Itanium C++ ABI 2.9.5p4:
3322     // abi::__fundamental_type_info adds no data members to std::type_info.
3323     break;
3324 
3325   case Type::LValueReference:
3326   case Type::RValueReference:
3327     llvm_unreachable("References shouldn't get here");
3328 
3329   case Type::Auto:
3330   case Type::DeducedTemplateSpecialization:
3331     llvm_unreachable("Undeduced type shouldn't get here");
3332 
3333   case Type::Pipe:
3334     llvm_unreachable("Pipe type shouldn't get here");
3335 
3336   case Type::ConstantArray:
3337   case Type::IncompleteArray:
3338   case Type::VariableArray:
3339     // Itanium C++ ABI 2.9.5p5:
3340     // abi::__array_type_info adds no data members to std::type_info.
3341     break;
3342 
3343   case Type::FunctionNoProto:
3344   case Type::FunctionProto:
3345     // Itanium C++ ABI 2.9.5p5:
3346     // abi::__function_type_info adds no data members to std::type_info.
3347     break;
3348 
3349   case Type::Enum:
3350     // Itanium C++ ABI 2.9.5p5:
3351     // abi::__enum_type_info adds no data members to std::type_info.
3352     break;
3353 
3354   case Type::Record: {
3355     const CXXRecordDecl *RD =
3356       cast<CXXRecordDecl>(cast<RecordType>(Ty)->getDecl());
3357     if (!RD->hasDefinition() || !RD->getNumBases()) {
3358       // We don't need to emit any fields.
3359       break;
3360     }
3361 
3362     if (CanUseSingleInheritance(RD))
3363       BuildSIClassTypeInfo(RD);
3364     else
3365       BuildVMIClassTypeInfo(RD);
3366 
3367     break;
3368   }
3369 
3370   case Type::ObjCObject:
3371   case Type::ObjCInterface:
3372     BuildObjCObjectTypeInfo(cast<ObjCObjectType>(Ty));
3373     break;
3374 
3375   case Type::ObjCObjectPointer:
3376     BuildPointerTypeInfo(cast<ObjCObjectPointerType>(Ty)->getPointeeType());
3377     break;
3378 
3379   case Type::Pointer:
3380     BuildPointerTypeInfo(cast<PointerType>(Ty)->getPointeeType());
3381     break;
3382 
3383   case Type::MemberPointer:
3384     BuildPointerToMemberTypeInfo(cast<MemberPointerType>(Ty));
3385     break;
3386 
3387   case Type::Atomic:
3388     // No fields, at least for the moment.
3389     break;
3390   }
3391 
3392   llvm::Constant *Init = llvm::ConstantStruct::getAnon(Fields);
3393 
3394   SmallString<256> Name;
3395   llvm::raw_svector_ostream Out(Name);
3396   CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out);
3397   llvm::Module &M = CGM.getModule();
3398   llvm::GlobalVariable *OldGV = M.getNamedGlobal(Name);
3399   llvm::GlobalVariable *GV =
3400       new llvm::GlobalVariable(M, Init->getType(),
3401                                /*Constant=*/true, Linkage, Init, Name);
3402 
3403   // If there's already an old global variable, replace it with the new one.
3404   if (OldGV) {
3405     GV->takeName(OldGV);
3406     llvm::Constant *NewPtr =
3407       llvm::ConstantExpr::getBitCast(GV, OldGV->getType());
3408     OldGV->replaceAllUsesWith(NewPtr);
3409     OldGV->eraseFromParent();
3410   }
3411 
3412   if (CGM.supportsCOMDAT() && GV->isWeakForLinker())
3413     GV->setComdat(M.getOrInsertComdat(GV->getName()));
3414 
3415   CharUnits Align =
3416       CGM.getContext().toCharUnitsFromBits(CGM.getTarget().getPointerAlign(0));
3417   GV->setAlignment(Align.getQuantity());
3418 
3419   // The Itanium ABI specifies that type_info objects must be globally
3420   // unique, with one exception: if the type is an incomplete class
3421   // type or a (possibly indirect) pointer to one.  That exception
3422   // affects the general case of comparing type_info objects produced
3423   // by the typeid operator, which is why the comparison operators on
3424   // std::type_info generally use the type_info name pointers instead
3425   // of the object addresses.  However, the language's built-in uses
3426   // of RTTI generally require class types to be complete, even when
3427   // manipulating pointers to those class types.  This allows the
3428   // implementation of dynamic_cast to rely on address equality tests,
3429   // which is much faster.
3430 
3431   // All of this is to say that it's important that both the type_info
3432   // object and the type_info name be uniqued when weakly emitted.
3433 
3434   TypeName->setVisibility(Visibility);
3435   CGM.setDSOLocal(TypeName);
3436 
3437   GV->setVisibility(Visibility);
3438   CGM.setDSOLocal(GV);
3439 
3440   TypeName->setDLLStorageClass(DLLStorageClass);
3441   GV->setDLLStorageClass(DLLStorageClass);
3442 
3443   return llvm::ConstantExpr::getBitCast(GV, CGM.Int8PtrTy);
3444 }
3445 
3446 /// BuildObjCObjectTypeInfo - Build the appropriate kind of type_info
3447 /// for the given Objective-C object type.
BuildObjCObjectTypeInfo(const ObjCObjectType * OT)3448 void ItaniumRTTIBuilder::BuildObjCObjectTypeInfo(const ObjCObjectType *OT) {
3449   // Drop qualifiers.
3450   const Type *T = OT->getBaseType().getTypePtr();
3451   assert(isa<BuiltinType>(T) || isa<ObjCInterfaceType>(T));
3452 
3453   // The builtin types are abi::__class_type_infos and don't require
3454   // extra fields.
3455   if (isa<BuiltinType>(T)) return;
3456 
3457   ObjCInterfaceDecl *Class = cast<ObjCInterfaceType>(T)->getDecl();
3458   ObjCInterfaceDecl *Super = Class->getSuperClass();
3459 
3460   // Root classes are also __class_type_info.
3461   if (!Super) return;
3462 
3463   QualType SuperTy = CGM.getContext().getObjCInterfaceType(Super);
3464 
3465   // Everything else is single inheritance.
3466   llvm::Constant *BaseTypeInfo =
3467       ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(SuperTy);
3468   Fields.push_back(BaseTypeInfo);
3469 }
3470 
3471 /// BuildSIClassTypeInfo - Build an abi::__si_class_type_info, used for single
3472 /// inheritance, according to the Itanium C++ ABI, 2.95p6b.
BuildSIClassTypeInfo(const CXXRecordDecl * RD)3473 void ItaniumRTTIBuilder::BuildSIClassTypeInfo(const CXXRecordDecl *RD) {
3474   // Itanium C++ ABI 2.9.5p6b:
3475   // It adds to abi::__class_type_info a single member pointing to the
3476   // type_info structure for the base type,
3477   llvm::Constant *BaseTypeInfo =
3478     ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(RD->bases_begin()->getType());
3479   Fields.push_back(BaseTypeInfo);
3480 }
3481 
3482 namespace {
3483   /// SeenBases - Contains virtual and non-virtual bases seen when traversing
3484   /// a class hierarchy.
3485   struct SeenBases {
3486     llvm::SmallPtrSet<const CXXRecordDecl *, 16> NonVirtualBases;
3487     llvm::SmallPtrSet<const CXXRecordDecl *, 16> VirtualBases;
3488   };
3489 }
3490 
3491 /// ComputeVMIClassTypeInfoFlags - Compute the value of the flags member in
3492 /// abi::__vmi_class_type_info.
3493 ///
ComputeVMIClassTypeInfoFlags(const CXXBaseSpecifier * Base,SeenBases & Bases)3494 static unsigned ComputeVMIClassTypeInfoFlags(const CXXBaseSpecifier *Base,
3495                                              SeenBases &Bases) {
3496 
3497   unsigned Flags = 0;
3498 
3499   const CXXRecordDecl *BaseDecl =
3500     cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl());
3501 
3502   if (Base->isVirtual()) {
3503     // Mark the virtual base as seen.
3504     if (!Bases.VirtualBases.insert(BaseDecl).second) {
3505       // If this virtual base has been seen before, then the class is diamond
3506       // shaped.
3507       Flags |= ItaniumRTTIBuilder::VMI_DiamondShaped;
3508     } else {
3509       if (Bases.NonVirtualBases.count(BaseDecl))
3510         Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat;
3511     }
3512   } else {
3513     // Mark the non-virtual base as seen.
3514     if (!Bases.NonVirtualBases.insert(BaseDecl).second) {
3515       // If this non-virtual base has been seen before, then the class has non-
3516       // diamond shaped repeated inheritance.
3517       Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat;
3518     } else {
3519       if (Bases.VirtualBases.count(BaseDecl))
3520         Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat;
3521     }
3522   }
3523 
3524   // Walk all bases.
3525   for (const auto &I : BaseDecl->bases())
3526     Flags |= ComputeVMIClassTypeInfoFlags(&I, Bases);
3527 
3528   return Flags;
3529 }
3530 
ComputeVMIClassTypeInfoFlags(const CXXRecordDecl * RD)3531 static unsigned ComputeVMIClassTypeInfoFlags(const CXXRecordDecl *RD) {
3532   unsigned Flags = 0;
3533   SeenBases Bases;
3534 
3535   // Walk all bases.
3536   for (const auto &I : RD->bases())
3537     Flags |= ComputeVMIClassTypeInfoFlags(&I, Bases);
3538 
3539   return Flags;
3540 }
3541 
3542 /// BuildVMIClassTypeInfo - Build an abi::__vmi_class_type_info, used for
3543 /// classes with bases that do not satisfy the abi::__si_class_type_info
3544 /// constraints, according ti the Itanium C++ ABI, 2.9.5p5c.
BuildVMIClassTypeInfo(const CXXRecordDecl * RD)3545 void ItaniumRTTIBuilder::BuildVMIClassTypeInfo(const CXXRecordDecl *RD) {
3546   llvm::Type *UnsignedIntLTy =
3547     CGM.getTypes().ConvertType(CGM.getContext().UnsignedIntTy);
3548 
3549   // Itanium C++ ABI 2.9.5p6c:
3550   //   __flags is a word with flags describing details about the class
3551   //   structure, which may be referenced by using the __flags_masks
3552   //   enumeration. These flags refer to both direct and indirect bases.
3553   unsigned Flags = ComputeVMIClassTypeInfoFlags(RD);
3554   Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags));
3555 
3556   // Itanium C++ ABI 2.9.5p6c:
3557   //   __base_count is a word with the number of direct proper base class
3558   //   descriptions that follow.
3559   Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, RD->getNumBases()));
3560 
3561   if (!RD->getNumBases())
3562     return;
3563 
3564   // Now add the base class descriptions.
3565 
3566   // Itanium C++ ABI 2.9.5p6c:
3567   //   __base_info[] is an array of base class descriptions -- one for every
3568   //   direct proper base. Each description is of the type:
3569   //
3570   //   struct abi::__base_class_type_info {
3571   //   public:
3572   //     const __class_type_info *__base_type;
3573   //     long __offset_flags;
3574   //
3575   //     enum __offset_flags_masks {
3576   //       __virtual_mask = 0x1,
3577   //       __public_mask = 0x2,
3578   //       __offset_shift = 8
3579   //     };
3580   //   };
3581 
3582   // If we're in mingw and 'long' isn't wide enough for a pointer, use 'long
3583   // long' instead of 'long' for __offset_flags. libstdc++abi uses long long on
3584   // LLP64 platforms.
3585   // FIXME: Consider updating libc++abi to match, and extend this logic to all
3586   // LLP64 platforms.
3587   QualType OffsetFlagsTy = CGM.getContext().LongTy;
3588   const TargetInfo &TI = CGM.getContext().getTargetInfo();
3589   if (TI.getTriple().isOSCygMing() && TI.getPointerWidth(0) > TI.getLongWidth())
3590     OffsetFlagsTy = CGM.getContext().LongLongTy;
3591   llvm::Type *OffsetFlagsLTy =
3592       CGM.getTypes().ConvertType(OffsetFlagsTy);
3593 
3594   for (const auto &Base : RD->bases()) {
3595     // The __base_type member points to the RTTI for the base type.
3596     Fields.push_back(ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(Base.getType()));
3597 
3598     const CXXRecordDecl *BaseDecl =
3599       cast<CXXRecordDecl>(Base.getType()->getAs<RecordType>()->getDecl());
3600 
3601     int64_t OffsetFlags = 0;
3602 
3603     // All but the lower 8 bits of __offset_flags are a signed offset.
3604     // For a non-virtual base, this is the offset in the object of the base
3605     // subobject. For a virtual base, this is the offset in the virtual table of
3606     // the virtual base offset for the virtual base referenced (negative).
3607     CharUnits Offset;
3608     if (Base.isVirtual())
3609       Offset =
3610         CGM.getItaniumVTableContext().getVirtualBaseOffsetOffset(RD, BaseDecl);
3611     else {
3612       const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
3613       Offset = Layout.getBaseClassOffset(BaseDecl);
3614     };
3615 
3616     OffsetFlags = uint64_t(Offset.getQuantity()) << 8;
3617 
3618     // The low-order byte of __offset_flags contains flags, as given by the
3619     // masks from the enumeration __offset_flags_masks.
3620     if (Base.isVirtual())
3621       OffsetFlags |= BCTI_Virtual;
3622     if (Base.getAccessSpecifier() == AS_public)
3623       OffsetFlags |= BCTI_Public;
3624 
3625     Fields.push_back(llvm::ConstantInt::get(OffsetFlagsLTy, OffsetFlags));
3626   }
3627 }
3628 
3629 /// Compute the flags for a __pbase_type_info, and remove the corresponding
3630 /// pieces from \p Type.
extractPBaseFlags(ASTContext & Ctx,QualType & Type)3631 static unsigned extractPBaseFlags(ASTContext &Ctx, QualType &Type) {
3632   unsigned Flags = 0;
3633 
3634   if (Type.isConstQualified())
3635     Flags |= ItaniumRTTIBuilder::PTI_Const;
3636   if (Type.isVolatileQualified())
3637     Flags |= ItaniumRTTIBuilder::PTI_Volatile;
3638   if (Type.isRestrictQualified())
3639     Flags |= ItaniumRTTIBuilder::PTI_Restrict;
3640   Type = Type.getUnqualifiedType();
3641 
3642   // Itanium C++ ABI 2.9.5p7:
3643   //   When the abi::__pbase_type_info is for a direct or indirect pointer to an
3644   //   incomplete class type, the incomplete target type flag is set.
3645   if (ContainsIncompleteClassType(Type))
3646     Flags |= ItaniumRTTIBuilder::PTI_Incomplete;
3647 
3648   if (auto *Proto = Type->getAs<FunctionProtoType>()) {
3649     if (Proto->isNothrow()) {
3650       Flags |= ItaniumRTTIBuilder::PTI_Noexcept;
3651       Type = Ctx.getFunctionTypeWithExceptionSpec(Type, EST_None);
3652     }
3653   }
3654 
3655   return Flags;
3656 }
3657 
3658 /// BuildPointerTypeInfo - Build an abi::__pointer_type_info struct,
3659 /// used for pointer types.
BuildPointerTypeInfo(QualType PointeeTy)3660 void ItaniumRTTIBuilder::BuildPointerTypeInfo(QualType PointeeTy) {
3661   // Itanium C++ ABI 2.9.5p7:
3662   //   __flags is a flag word describing the cv-qualification and other
3663   //   attributes of the type pointed to
3664   unsigned Flags = extractPBaseFlags(CGM.getContext(), PointeeTy);
3665 
3666   llvm::Type *UnsignedIntLTy =
3667     CGM.getTypes().ConvertType(CGM.getContext().UnsignedIntTy);
3668   Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags));
3669 
3670   // Itanium C++ ABI 2.9.5p7:
3671   //  __pointee is a pointer to the std::type_info derivation for the
3672   //  unqualified type being pointed to.
3673   llvm::Constant *PointeeTypeInfo =
3674       ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(PointeeTy);
3675   Fields.push_back(PointeeTypeInfo);
3676 }
3677 
3678 /// BuildPointerToMemberTypeInfo - Build an abi::__pointer_to_member_type_info
3679 /// struct, used for member pointer types.
3680 void
BuildPointerToMemberTypeInfo(const MemberPointerType * Ty)3681 ItaniumRTTIBuilder::BuildPointerToMemberTypeInfo(const MemberPointerType *Ty) {
3682   QualType PointeeTy = Ty->getPointeeType();
3683 
3684   // Itanium C++ ABI 2.9.5p7:
3685   //   __flags is a flag word describing the cv-qualification and other
3686   //   attributes of the type pointed to.
3687   unsigned Flags = extractPBaseFlags(CGM.getContext(), PointeeTy);
3688 
3689   const RecordType *ClassType = cast<RecordType>(Ty->getClass());
3690   if (IsIncompleteClassType(ClassType))
3691     Flags |= PTI_ContainingClassIncomplete;
3692 
3693   llvm::Type *UnsignedIntLTy =
3694     CGM.getTypes().ConvertType(CGM.getContext().UnsignedIntTy);
3695   Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags));
3696 
3697   // Itanium C++ ABI 2.9.5p7:
3698   //   __pointee is a pointer to the std::type_info derivation for the
3699   //   unqualified type being pointed to.
3700   llvm::Constant *PointeeTypeInfo =
3701       ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(PointeeTy);
3702   Fields.push_back(PointeeTypeInfo);
3703 
3704   // Itanium C++ ABI 2.9.5p9:
3705   //   __context is a pointer to an abi::__class_type_info corresponding to the
3706   //   class type containing the member pointed to
3707   //   (e.g., the "A" in "int A::*").
3708   Fields.push_back(
3709       ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(QualType(ClassType, 0)));
3710 }
3711 
getAddrOfRTTIDescriptor(QualType Ty)3712 llvm::Constant *ItaniumCXXABI::getAddrOfRTTIDescriptor(QualType Ty) {
3713   return ItaniumRTTIBuilder(*this).BuildTypeInfo(Ty);
3714 }
3715 
EmitFundamentalRTTIDescriptors(const CXXRecordDecl * RD)3716 void ItaniumCXXABI::EmitFundamentalRTTIDescriptors(const CXXRecordDecl *RD) {
3717   // Types added here must also be added to TypeInfoIsInStandardLibrary.
3718   QualType FundamentalTypes[] = {
3719       getContext().VoidTy,             getContext().NullPtrTy,
3720       getContext().BoolTy,             getContext().WCharTy,
3721       getContext().CharTy,             getContext().UnsignedCharTy,
3722       getContext().SignedCharTy,       getContext().ShortTy,
3723       getContext().UnsignedShortTy,    getContext().IntTy,
3724       getContext().UnsignedIntTy,      getContext().LongTy,
3725       getContext().UnsignedLongTy,     getContext().LongLongTy,
3726       getContext().UnsignedLongLongTy, getContext().Int128Ty,
3727       getContext().UnsignedInt128Ty,   getContext().HalfTy,
3728       getContext().FloatTy,            getContext().DoubleTy,
3729       getContext().LongDoubleTy,       getContext().Float128Ty,
3730       getContext().Char8Ty,            getContext().Char16Ty,
3731       getContext().Char32Ty
3732   };
3733   llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass =
3734       RD->hasAttr<DLLExportAttr>()
3735       ? llvm::GlobalValue::DLLExportStorageClass
3736       : llvm::GlobalValue::DefaultStorageClass;
3737   llvm::GlobalValue::VisibilityTypes Visibility =
3738       CodeGenModule::GetLLVMVisibility(RD->getVisibility());
3739   for (const QualType &FundamentalType : FundamentalTypes) {
3740     QualType PointerType = getContext().getPointerType(FundamentalType);
3741     QualType PointerTypeConst = getContext().getPointerType(
3742         FundamentalType.withConst());
3743     for (QualType Type : {FundamentalType, PointerType, PointerTypeConst})
3744       ItaniumRTTIBuilder(*this).BuildTypeInfo(
3745           Type, llvm::GlobalValue::ExternalLinkage,
3746           Visibility, DLLStorageClass);
3747   }
3748 }
3749 
3750 /// What sort of uniqueness rules should we use for the RTTI for the
3751 /// given type?
classifyRTTIUniqueness(QualType CanTy,llvm::GlobalValue::LinkageTypes Linkage) const3752 ItaniumCXXABI::RTTIUniquenessKind ItaniumCXXABI::classifyRTTIUniqueness(
3753     QualType CanTy, llvm::GlobalValue::LinkageTypes Linkage) const {
3754   if (shouldRTTIBeUnique())
3755     return RUK_Unique;
3756 
3757   // It's only necessary for linkonce_odr or weak_odr linkage.
3758   if (Linkage != llvm::GlobalValue::LinkOnceODRLinkage &&
3759       Linkage != llvm::GlobalValue::WeakODRLinkage)
3760     return RUK_Unique;
3761 
3762   // It's only necessary with default visibility.
3763   if (CanTy->getVisibility() != DefaultVisibility)
3764     return RUK_Unique;
3765 
3766   // If we're not required to publish this symbol, hide it.
3767   if (Linkage == llvm::GlobalValue::LinkOnceODRLinkage)
3768     return RUK_NonUniqueHidden;
3769 
3770   // If we're required to publish this symbol, as we might be under an
3771   // explicit instantiation, leave it with default visibility but
3772   // enable string-comparisons.
3773   assert(Linkage == llvm::GlobalValue::WeakODRLinkage);
3774   return RUK_NonUniqueVisible;
3775 }
3776 
3777 // Find out how to codegen the complete destructor and constructor
3778 namespace {
3779 enum class StructorCodegen { Emit, RAUW, Alias, COMDAT };
3780 }
getCodegenToUse(CodeGenModule & CGM,const CXXMethodDecl * MD)3781 static StructorCodegen getCodegenToUse(CodeGenModule &CGM,
3782                                        const CXXMethodDecl *MD) {
3783   if (!CGM.getCodeGenOpts().CXXCtorDtorAliases)
3784     return StructorCodegen::Emit;
3785 
3786   // The complete and base structors are not equivalent if there are any virtual
3787   // bases, so emit separate functions.
3788   if (MD->getParent()->getNumVBases())
3789     return StructorCodegen::Emit;
3790 
3791   GlobalDecl AliasDecl;
3792   if (const auto *DD = dyn_cast<CXXDestructorDecl>(MD)) {
3793     AliasDecl = GlobalDecl(DD, Dtor_Complete);
3794   } else {
3795     const auto *CD = cast<CXXConstructorDecl>(MD);
3796     AliasDecl = GlobalDecl(CD, Ctor_Complete);
3797   }
3798   llvm::GlobalValue::LinkageTypes Linkage = CGM.getFunctionLinkage(AliasDecl);
3799 
3800   if (llvm::GlobalValue::isDiscardableIfUnused(Linkage))
3801     return StructorCodegen::RAUW;
3802 
3803   // FIXME: Should we allow available_externally aliases?
3804   if (!llvm::GlobalAlias::isValidLinkage(Linkage))
3805     return StructorCodegen::RAUW;
3806 
3807   if (llvm::GlobalValue::isWeakForLinker(Linkage)) {
3808     // Only ELF and wasm support COMDATs with arbitrary names (C5/D5).
3809     if (CGM.getTarget().getTriple().isOSBinFormatELF() ||
3810         CGM.getTarget().getTriple().isOSBinFormatWasm())
3811       return StructorCodegen::COMDAT;
3812     return StructorCodegen::Emit;
3813   }
3814 
3815   return StructorCodegen::Alias;
3816 }
3817 
emitConstructorDestructorAlias(CodeGenModule & CGM,GlobalDecl AliasDecl,GlobalDecl TargetDecl)3818 static void emitConstructorDestructorAlias(CodeGenModule &CGM,
3819                                            GlobalDecl AliasDecl,
3820                                            GlobalDecl TargetDecl) {
3821   llvm::GlobalValue::LinkageTypes Linkage = CGM.getFunctionLinkage(AliasDecl);
3822 
3823   StringRef MangledName = CGM.getMangledName(AliasDecl);
3824   llvm::GlobalValue *Entry = CGM.GetGlobalValue(MangledName);
3825   if (Entry && !Entry->isDeclaration())
3826     return;
3827 
3828   auto *Aliasee = cast<llvm::GlobalValue>(CGM.GetAddrOfGlobal(TargetDecl));
3829 
3830   // Create the alias with no name.
3831   auto *Alias = llvm::GlobalAlias::create(Linkage, "", Aliasee);
3832 
3833   // Constructors and destructors are always unnamed_addr.
3834   Alias->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
3835 
3836   // Switch any previous uses to the alias.
3837   if (Entry) {
3838     assert(Entry->getType() == Aliasee->getType() &&
3839            "declaration exists with different type");
3840     Alias->takeName(Entry);
3841     Entry->replaceAllUsesWith(Alias);
3842     Entry->eraseFromParent();
3843   } else {
3844     Alias->setName(MangledName);
3845   }
3846 
3847   // Finally, set up the alias with its proper name and attributes.
3848   CGM.SetCommonAttributes(AliasDecl, Alias);
3849 }
3850 
emitCXXStructor(const CXXMethodDecl * MD,StructorType Type)3851 void ItaniumCXXABI::emitCXXStructor(const CXXMethodDecl *MD,
3852                                     StructorType Type) {
3853   auto *CD = dyn_cast<CXXConstructorDecl>(MD);
3854   const CXXDestructorDecl *DD = CD ? nullptr : cast<CXXDestructorDecl>(MD);
3855 
3856   StructorCodegen CGType = getCodegenToUse(CGM, MD);
3857 
3858   if (Type == StructorType::Complete) {
3859     GlobalDecl CompleteDecl;
3860     GlobalDecl BaseDecl;
3861     if (CD) {
3862       CompleteDecl = GlobalDecl(CD, Ctor_Complete);
3863       BaseDecl = GlobalDecl(CD, Ctor_Base);
3864     } else {
3865       CompleteDecl = GlobalDecl(DD, Dtor_Complete);
3866       BaseDecl = GlobalDecl(DD, Dtor_Base);
3867     }
3868 
3869     if (CGType == StructorCodegen::Alias || CGType == StructorCodegen::COMDAT) {
3870       emitConstructorDestructorAlias(CGM, CompleteDecl, BaseDecl);
3871       return;
3872     }
3873 
3874     if (CGType == StructorCodegen::RAUW) {
3875       StringRef MangledName = CGM.getMangledName(CompleteDecl);
3876       auto *Aliasee = CGM.GetAddrOfGlobal(BaseDecl);
3877       CGM.addReplacement(MangledName, Aliasee);
3878       return;
3879     }
3880   }
3881 
3882   // The base destructor is equivalent to the base destructor of its
3883   // base class if there is exactly one non-virtual base class with a
3884   // non-trivial destructor, there are no fields with a non-trivial
3885   // destructor, and the body of the destructor is trivial.
3886   if (DD && Type == StructorType::Base && CGType != StructorCodegen::COMDAT &&
3887       !CGM.TryEmitBaseDestructorAsAlias(DD))
3888     return;
3889 
3890   // FIXME: The deleting destructor is equivalent to the selected operator
3891   // delete if:
3892   //  * either the delete is a destroying operator delete or the destructor
3893   //    would be trivial if it weren't virtual,
3894   //  * the conversion from the 'this' parameter to the first parameter of the
3895   //    destructor is equivalent to a bitcast,
3896   //  * the destructor does not have an implicit "this" return, and
3897   //  * the operator delete has the same calling convention and IR function type
3898   //    as the destructor.
3899   // In such cases we should try to emit the deleting dtor as an alias to the
3900   // selected 'operator delete'.
3901 
3902   llvm::Function *Fn = CGM.codegenCXXStructor(MD, Type);
3903 
3904   if (CGType == StructorCodegen::COMDAT) {
3905     SmallString<256> Buffer;
3906     llvm::raw_svector_ostream Out(Buffer);
3907     if (DD)
3908       getMangleContext().mangleCXXDtorComdat(DD, Out);
3909     else
3910       getMangleContext().mangleCXXCtorComdat(CD, Out);
3911     llvm::Comdat *C = CGM.getModule().getOrInsertComdat(Out.str());
3912     Fn->setComdat(C);
3913   } else {
3914     CGM.maybeSetTrivialComdat(*MD, *Fn);
3915   }
3916 }
3917 
getBeginCatchFn(CodeGenModule & CGM)3918 static llvm::Constant *getBeginCatchFn(CodeGenModule &CGM) {
3919   // void *__cxa_begin_catch(void*);
3920   llvm::FunctionType *FTy = llvm::FunctionType::get(
3921       CGM.Int8PtrTy, CGM.Int8PtrTy, /*IsVarArgs=*/false);
3922 
3923   return CGM.CreateRuntimeFunction(FTy, "__cxa_begin_catch");
3924 }
3925 
getEndCatchFn(CodeGenModule & CGM)3926 static llvm::Constant *getEndCatchFn(CodeGenModule &CGM) {
3927   // void __cxa_end_catch();
3928   llvm::FunctionType *FTy =
3929       llvm::FunctionType::get(CGM.VoidTy, /*IsVarArgs=*/false);
3930 
3931   return CGM.CreateRuntimeFunction(FTy, "__cxa_end_catch");
3932 }
3933 
getGetExceptionPtrFn(CodeGenModule & CGM)3934 static llvm::Constant *getGetExceptionPtrFn(CodeGenModule &CGM) {
3935   // void *__cxa_get_exception_ptr(void*);
3936   llvm::FunctionType *FTy = llvm::FunctionType::get(
3937       CGM.Int8PtrTy, CGM.Int8PtrTy, /*IsVarArgs=*/false);
3938 
3939   return CGM.CreateRuntimeFunction(FTy, "__cxa_get_exception_ptr");
3940 }
3941 
3942 namespace {
3943   /// A cleanup to call __cxa_end_catch.  In many cases, the caught
3944   /// exception type lets us state definitively that the thrown exception
3945   /// type does not have a destructor.  In particular:
3946   ///   - Catch-alls tell us nothing, so we have to conservatively
3947   ///     assume that the thrown exception might have a destructor.
3948   ///   - Catches by reference behave according to their base types.
3949   ///   - Catches of non-record types will only trigger for exceptions
3950   ///     of non-record types, which never have destructors.
3951   ///   - Catches of record types can trigger for arbitrary subclasses
3952   ///     of the caught type, so we have to assume the actual thrown
3953   ///     exception type might have a throwing destructor, even if the
3954   ///     caught type's destructor is trivial or nothrow.
3955   struct CallEndCatch final : EHScopeStack::Cleanup {
CallEndCatch__anonda3274c20911::CallEndCatch3956     CallEndCatch(bool MightThrow) : MightThrow(MightThrow) {}
3957     bool MightThrow;
3958 
Emit__anonda3274c20911::CallEndCatch3959     void Emit(CodeGenFunction &CGF, Flags flags) override {
3960       if (!MightThrow) {
3961         CGF.EmitNounwindRuntimeCall(getEndCatchFn(CGF.CGM));
3962         return;
3963       }
3964 
3965       CGF.EmitRuntimeCallOrInvoke(getEndCatchFn(CGF.CGM));
3966     }
3967   };
3968 }
3969 
3970 /// Emits a call to __cxa_begin_catch and enters a cleanup to call
3971 /// __cxa_end_catch.
3972 ///
3973 /// \param EndMightThrow - true if __cxa_end_catch might throw
CallBeginCatch(CodeGenFunction & CGF,llvm::Value * Exn,bool EndMightThrow)3974 static llvm::Value *CallBeginCatch(CodeGenFunction &CGF,
3975                                    llvm::Value *Exn,
3976                                    bool EndMightThrow) {
3977   llvm::CallInst *call =
3978     CGF.EmitNounwindRuntimeCall(getBeginCatchFn(CGF.CGM), Exn);
3979 
3980   CGF.EHStack.pushCleanup<CallEndCatch>(NormalAndEHCleanup, EndMightThrow);
3981 
3982   return call;
3983 }
3984 
3985 /// A "special initializer" callback for initializing a catch
3986 /// parameter during catch initialization.
InitCatchParam(CodeGenFunction & CGF,const VarDecl & CatchParam,Address ParamAddr,SourceLocation Loc)3987 static void InitCatchParam(CodeGenFunction &CGF,
3988                            const VarDecl &CatchParam,
3989                            Address ParamAddr,
3990                            SourceLocation Loc) {
3991   // Load the exception from where the landing pad saved it.
3992   llvm::Value *Exn = CGF.getExceptionFromSlot();
3993 
3994   CanQualType CatchType =
3995     CGF.CGM.getContext().getCanonicalType(CatchParam.getType());
3996   llvm::Type *LLVMCatchTy = CGF.ConvertTypeForMem(CatchType);
3997 
3998   // If we're catching by reference, we can just cast the object
3999   // pointer to the appropriate pointer.
4000   if (isa<ReferenceType>(CatchType)) {
4001     QualType CaughtType = cast<ReferenceType>(CatchType)->getPointeeType();
4002     bool EndCatchMightThrow = CaughtType->isRecordType();
4003 
4004     // __cxa_begin_catch returns the adjusted object pointer.
4005     llvm::Value *AdjustedExn = CallBeginCatch(CGF, Exn, EndCatchMightThrow);
4006 
4007     // We have no way to tell the personality function that we're
4008     // catching by reference, so if we're catching a pointer,
4009     // __cxa_begin_catch will actually return that pointer by value.
4010     if (const PointerType *PT = dyn_cast<PointerType>(CaughtType)) {
4011       QualType PointeeType = PT->getPointeeType();
4012 
4013       // When catching by reference, generally we should just ignore
4014       // this by-value pointer and use the exception object instead.
4015       if (!PointeeType->isRecordType()) {
4016 
4017         // Exn points to the struct _Unwind_Exception header, which
4018         // we have to skip past in order to reach the exception data.
4019         unsigned HeaderSize =
4020           CGF.CGM.getTargetCodeGenInfo().getSizeOfUnwindException();
4021         AdjustedExn = CGF.Builder.CreateConstGEP1_32(Exn, HeaderSize);
4022 
4023       // However, if we're catching a pointer-to-record type that won't
4024       // work, because the personality function might have adjusted
4025       // the pointer.  There's actually no way for us to fully satisfy
4026       // the language/ABI contract here:  we can't use Exn because it
4027       // might have the wrong adjustment, but we can't use the by-value
4028       // pointer because it's off by a level of abstraction.
4029       //
4030       // The current solution is to dump the adjusted pointer into an
4031       // alloca, which breaks language semantics (because changing the
4032       // pointer doesn't change the exception) but at least works.
4033       // The better solution would be to filter out non-exact matches
4034       // and rethrow them, but this is tricky because the rethrow
4035       // really needs to be catchable by other sites at this landing
4036       // pad.  The best solution is to fix the personality function.
4037       } else {
4038         // Pull the pointer for the reference type off.
4039         llvm::Type *PtrTy =
4040           cast<llvm::PointerType>(LLVMCatchTy)->getElementType();
4041 
4042         // Create the temporary and write the adjusted pointer into it.
4043         Address ExnPtrTmp =
4044           CGF.CreateTempAlloca(PtrTy, CGF.getPointerAlign(), "exn.byref.tmp");
4045         llvm::Value *Casted = CGF.Builder.CreateBitCast(AdjustedExn, PtrTy);
4046         CGF.Builder.CreateStore(Casted, ExnPtrTmp);
4047 
4048         // Bind the reference to the temporary.
4049         AdjustedExn = ExnPtrTmp.getPointer();
4050       }
4051     }
4052 
4053     llvm::Value *ExnCast =
4054       CGF.Builder.CreateBitCast(AdjustedExn, LLVMCatchTy, "exn.byref");
4055     CGF.Builder.CreateStore(ExnCast, ParamAddr);
4056     return;
4057   }
4058 
4059   // Scalars and complexes.
4060   TypeEvaluationKind TEK = CGF.getEvaluationKind(CatchType);
4061   if (TEK != TEK_Aggregate) {
4062     llvm::Value *AdjustedExn = CallBeginCatch(CGF, Exn, false);
4063 
4064     // If the catch type is a pointer type, __cxa_begin_catch returns
4065     // the pointer by value.
4066     if (CatchType->hasPointerRepresentation()) {
4067       llvm::Value *CastExn =
4068         CGF.Builder.CreateBitCast(AdjustedExn, LLVMCatchTy, "exn.casted");
4069 
4070       switch (CatchType.getQualifiers().getObjCLifetime()) {
4071       case Qualifiers::OCL_Strong:
4072         CastExn = CGF.EmitARCRetainNonBlock(CastExn);
4073         LLVM_FALLTHROUGH;
4074 
4075       case Qualifiers::OCL_None:
4076       case Qualifiers::OCL_ExplicitNone:
4077       case Qualifiers::OCL_Autoreleasing:
4078         CGF.Builder.CreateStore(CastExn, ParamAddr);
4079         return;
4080 
4081       case Qualifiers::OCL_Weak:
4082         CGF.EmitARCInitWeak(ParamAddr, CastExn);
4083         return;
4084       }
4085       llvm_unreachable("bad ownership qualifier!");
4086     }
4087 
4088     // Otherwise, it returns a pointer into the exception object.
4089 
4090     llvm::Type *PtrTy = LLVMCatchTy->getPointerTo(0); // addrspace 0 ok
4091     llvm::Value *Cast = CGF.Builder.CreateBitCast(AdjustedExn, PtrTy);
4092 
4093     LValue srcLV = CGF.MakeNaturalAlignAddrLValue(Cast, CatchType);
4094     LValue destLV = CGF.MakeAddrLValue(ParamAddr, CatchType);
4095     switch (TEK) {
4096     case TEK_Complex:
4097       CGF.EmitStoreOfComplex(CGF.EmitLoadOfComplex(srcLV, Loc), destLV,
4098                              /*init*/ true);
4099       return;
4100     case TEK_Scalar: {
4101       llvm::Value *ExnLoad = CGF.EmitLoadOfScalar(srcLV, Loc);
4102       CGF.EmitStoreOfScalar(ExnLoad, destLV, /*init*/ true);
4103       return;
4104     }
4105     case TEK_Aggregate:
4106       llvm_unreachable("evaluation kind filtered out!");
4107     }
4108     llvm_unreachable("bad evaluation kind");
4109   }
4110 
4111   assert(isa<RecordType>(CatchType) && "unexpected catch type!");
4112   auto catchRD = CatchType->getAsCXXRecordDecl();
4113   CharUnits caughtExnAlignment = CGF.CGM.getClassPointerAlignment(catchRD);
4114 
4115   llvm::Type *PtrTy = LLVMCatchTy->getPointerTo(0); // addrspace 0 ok
4116 
4117   // Check for a copy expression.  If we don't have a copy expression,
4118   // that means a trivial copy is okay.
4119   const Expr *copyExpr = CatchParam.getInit();
4120   if (!copyExpr) {
4121     llvm::Value *rawAdjustedExn = CallBeginCatch(CGF, Exn, true);
4122     Address adjustedExn(CGF.Builder.CreateBitCast(rawAdjustedExn, PtrTy),
4123                         caughtExnAlignment);
4124     LValue Dest = CGF.MakeAddrLValue(ParamAddr, CatchType);
4125     LValue Src = CGF.MakeAddrLValue(adjustedExn, CatchType);
4126     CGF.EmitAggregateCopy(Dest, Src, CatchType, AggValueSlot::DoesNotOverlap);
4127     return;
4128   }
4129 
4130   // We have to call __cxa_get_exception_ptr to get the adjusted
4131   // pointer before copying.
4132   llvm::CallInst *rawAdjustedExn =
4133     CGF.EmitNounwindRuntimeCall(getGetExceptionPtrFn(CGF.CGM), Exn);
4134 
4135   // Cast that to the appropriate type.
4136   Address adjustedExn(CGF.Builder.CreateBitCast(rawAdjustedExn, PtrTy),
4137                       caughtExnAlignment);
4138 
4139   // The copy expression is defined in terms of an OpaqueValueExpr.
4140   // Find it and map it to the adjusted expression.
4141   CodeGenFunction::OpaqueValueMapping
4142     opaque(CGF, OpaqueValueExpr::findInCopyConstruct(copyExpr),
4143            CGF.MakeAddrLValue(adjustedExn, CatchParam.getType()));
4144 
4145   // Call the copy ctor in a terminate scope.
4146   CGF.EHStack.pushTerminate();
4147 
4148   // Perform the copy construction.
4149   CGF.EmitAggExpr(copyExpr,
4150                   AggValueSlot::forAddr(ParamAddr, Qualifiers(),
4151                                         AggValueSlot::IsNotDestructed,
4152                                         AggValueSlot::DoesNotNeedGCBarriers,
4153                                         AggValueSlot::IsNotAliased,
4154                                         AggValueSlot::DoesNotOverlap));
4155 
4156   // Leave the terminate scope.
4157   CGF.EHStack.popTerminate();
4158 
4159   // Undo the opaque value mapping.
4160   opaque.pop();
4161 
4162   // Finally we can call __cxa_begin_catch.
4163   CallBeginCatch(CGF, Exn, true);
4164 }
4165 
4166 /// Begins a catch statement by initializing the catch variable and
4167 /// calling __cxa_begin_catch.
emitBeginCatch(CodeGenFunction & CGF,const CXXCatchStmt * S)4168 void ItaniumCXXABI::emitBeginCatch(CodeGenFunction &CGF,
4169                                    const CXXCatchStmt *S) {
4170   // We have to be very careful with the ordering of cleanups here:
4171   //   C++ [except.throw]p4:
4172   //     The destruction [of the exception temporary] occurs
4173   //     immediately after the destruction of the object declared in
4174   //     the exception-declaration in the handler.
4175   //
4176   // So the precise ordering is:
4177   //   1.  Construct catch variable.
4178   //   2.  __cxa_begin_catch
4179   //   3.  Enter __cxa_end_catch cleanup
4180   //   4.  Enter dtor cleanup
4181   //
4182   // We do this by using a slightly abnormal initialization process.
4183   // Delegation sequence:
4184   //   - ExitCXXTryStmt opens a RunCleanupsScope
4185   //     - EmitAutoVarAlloca creates the variable and debug info
4186   //       - InitCatchParam initializes the variable from the exception
4187   //       - CallBeginCatch calls __cxa_begin_catch
4188   //       - CallBeginCatch enters the __cxa_end_catch cleanup
4189   //     - EmitAutoVarCleanups enters the variable destructor cleanup
4190   //   - EmitCXXTryStmt emits the code for the catch body
4191   //   - EmitCXXTryStmt close the RunCleanupsScope
4192 
4193   VarDecl *CatchParam = S->getExceptionDecl();
4194   if (!CatchParam) {
4195     llvm::Value *Exn = CGF.getExceptionFromSlot();
4196     CallBeginCatch(CGF, Exn, true);
4197     return;
4198   }
4199 
4200   // Emit the local.
4201   CodeGenFunction::AutoVarEmission var = CGF.EmitAutoVarAlloca(*CatchParam);
4202   InitCatchParam(CGF, *CatchParam, var.getObjectAddress(CGF), S->getBeginLoc());
4203   CGF.EmitAutoVarCleanups(var);
4204 }
4205 
4206 /// Get or define the following function:
4207 ///   void @__clang_call_terminate(i8* %exn) nounwind noreturn
4208 /// This code is used only in C++.
getClangCallTerminateFn(CodeGenModule & CGM)4209 static llvm::Constant *getClangCallTerminateFn(CodeGenModule &CGM) {
4210   llvm::FunctionType *fnTy =
4211     llvm::FunctionType::get(CGM.VoidTy, CGM.Int8PtrTy, /*IsVarArgs=*/false);
4212   llvm::Constant *fnRef = CGM.CreateRuntimeFunction(
4213       fnTy, "__clang_call_terminate", llvm::AttributeList(), /*Local=*/true);
4214 
4215   llvm::Function *fn = dyn_cast<llvm::Function>(fnRef);
4216   if (fn && fn->empty()) {
4217     fn->setDoesNotThrow();
4218     fn->setDoesNotReturn();
4219 
4220     // What we really want is to massively penalize inlining without
4221     // forbidding it completely.  The difference between that and
4222     // 'noinline' is negligible.
4223     fn->addFnAttr(llvm::Attribute::NoInline);
4224 
4225     // Allow this function to be shared across translation units, but
4226     // we don't want it to turn into an exported symbol.
4227     fn->setLinkage(llvm::Function::LinkOnceODRLinkage);
4228     fn->setVisibility(llvm::Function::HiddenVisibility);
4229     if (CGM.supportsCOMDAT())
4230       fn->setComdat(CGM.getModule().getOrInsertComdat(fn->getName()));
4231 
4232     // Set up the function.
4233     llvm::BasicBlock *entry =
4234       llvm::BasicBlock::Create(CGM.getLLVMContext(), "", fn);
4235     CGBuilderTy builder(CGM, entry);
4236 
4237     // Pull the exception pointer out of the parameter list.
4238     llvm::Value *exn = &*fn->arg_begin();
4239 
4240     // Call __cxa_begin_catch(exn).
4241     llvm::CallInst *catchCall = builder.CreateCall(getBeginCatchFn(CGM), exn);
4242     catchCall->setDoesNotThrow();
4243     catchCall->setCallingConv(CGM.getRuntimeCC());
4244 
4245     // Call std::terminate().
4246     llvm::CallInst *termCall = builder.CreateCall(CGM.getTerminateFn());
4247     termCall->setDoesNotThrow();
4248     termCall->setDoesNotReturn();
4249     termCall->setCallingConv(CGM.getRuntimeCC());
4250 
4251     // std::terminate cannot return.
4252     builder.CreateUnreachable();
4253   }
4254 
4255   return fnRef;
4256 }
4257 
4258 llvm::CallInst *
emitTerminateForUnexpectedException(CodeGenFunction & CGF,llvm::Value * Exn)4259 ItaniumCXXABI::emitTerminateForUnexpectedException(CodeGenFunction &CGF,
4260                                                    llvm::Value *Exn) {
4261   // In C++, we want to call __cxa_begin_catch() before terminating.
4262   if (Exn) {
4263     assert(CGF.CGM.getLangOpts().CPlusPlus);
4264     return CGF.EmitNounwindRuntimeCall(getClangCallTerminateFn(CGF.CGM), Exn);
4265   }
4266   return CGF.EmitNounwindRuntimeCall(CGF.CGM.getTerminateFn());
4267 }
4268 
4269 std::pair<llvm::Value *, const CXXRecordDecl *>
LoadVTablePtr(CodeGenFunction & CGF,Address This,const CXXRecordDecl * RD)4270 ItaniumCXXABI::LoadVTablePtr(CodeGenFunction &CGF, Address This,
4271                              const CXXRecordDecl *RD) {
4272   return {CGF.GetVTablePtr(This, CGM.Int8PtrTy, RD), RD};
4273 }
4274 
emitBeginCatch(CodeGenFunction & CGF,const CXXCatchStmt * C)4275 void WebAssemblyCXXABI::emitBeginCatch(CodeGenFunction &CGF,
4276                                        const CXXCatchStmt *C) {
4277   if (CGF.getTarget().hasFeature("exception-handling"))
4278     CGF.EHStack.pushCleanup<CatchRetScope>(
4279         NormalCleanup, cast<llvm::CatchPadInst>(CGF.CurrentFuncletPad));
4280   ItaniumCXXABI::emitBeginCatch(CGF, C);
4281 }
4282