1 //===------- ItaniumCXXABI.cpp - Emit LLVM Code from ASTs for a Module ----===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This provides C++ code generation targeting the Itanium C++ ABI.  The class
11 // in this file generates structures that follow the Itanium C++ ABI, which is
12 // documented at:
13 //  http://www.codesourcery.com/public/cxx-abi/abi.html
14 //  http://www.codesourcery.com/public/cxx-abi/abi-eh.html
15 //
16 // It also supports the closely-related ARM ABI, documented at:
17 // http://infocenter.arm.com/help/topic/com.arm.doc.ihi0041c/IHI0041C_cppabi.pdf
18 //
19 //===----------------------------------------------------------------------===//
20 
21 #include "CGCXXABI.h"
22 #include "CGCleanup.h"
23 #include "CGRecordLayout.h"
24 #include "CGVTables.h"
25 #include "CodeGenFunction.h"
26 #include "CodeGenModule.h"
27 #include "TargetInfo.h"
28 #include "clang/CodeGen/ConstantInitBuilder.h"
29 #include "clang/AST/Mangle.h"
30 #include "clang/AST/Type.h"
31 #include "clang/AST/StmtCXX.h"
32 #include "llvm/IR/CallSite.h"
33 #include "llvm/IR/DataLayout.h"
34 #include "llvm/IR/GlobalValue.h"
35 #include "llvm/IR/Instructions.h"
36 #include "llvm/IR/Intrinsics.h"
37 #include "llvm/IR/Value.h"
38 #include "llvm/Support/ScopedPrinter.h"
39 
40 using namespace clang;
41 using namespace CodeGen;
42 
43 namespace {
44 class ItaniumCXXABI : public CodeGen::CGCXXABI {
45   /// VTables - All the vtables which have been defined.
46   llvm::DenseMap<const CXXRecordDecl *, llvm::GlobalVariable *> VTables;
47 
48 protected:
49   bool UseARMMethodPtrABI;
50   bool UseARMGuardVarABI;
51   bool Use32BitVTableOffsetABI;
52 
getMangleContext()53   ItaniumMangleContext &getMangleContext() {
54     return cast<ItaniumMangleContext>(CodeGen::CGCXXABI::getMangleContext());
55   }
56 
57 public:
ItaniumCXXABI(CodeGen::CodeGenModule & CGM,bool UseARMMethodPtrABI=false,bool UseARMGuardVarABI=false)58   ItaniumCXXABI(CodeGen::CodeGenModule &CGM,
59                 bool UseARMMethodPtrABI = false,
60                 bool UseARMGuardVarABI = false) :
61     CGCXXABI(CGM), UseARMMethodPtrABI(UseARMMethodPtrABI),
62     UseARMGuardVarABI(UseARMGuardVarABI),
63     Use32BitVTableOffsetABI(false) { }
64 
65   bool classifyReturnType(CGFunctionInfo &FI) const override;
66 
passClassIndirect(const CXXRecordDecl * RD) const67   bool passClassIndirect(const CXXRecordDecl *RD) const {
68     return !canCopyArgument(RD);
69   }
70 
getRecordArgABI(const CXXRecordDecl * RD) const71   RecordArgABI getRecordArgABI(const CXXRecordDecl *RD) const override {
72     // If C++ prohibits us from making a copy, pass by address.
73     if (passClassIndirect(RD))
74       return RAA_Indirect;
75     return RAA_Default;
76   }
77 
isThisCompleteObject(GlobalDecl GD) const78   bool isThisCompleteObject(GlobalDecl GD) const override {
79     // The Itanium ABI has separate complete-object vs.  base-object
80     // variants of both constructors and destructors.
81     if (isa<CXXDestructorDecl>(GD.getDecl())) {
82       switch (GD.getDtorType()) {
83       case Dtor_Complete:
84       case Dtor_Deleting:
85         return true;
86 
87       case Dtor_Base:
88         return false;
89 
90       case Dtor_Comdat:
91         llvm_unreachable("emitting dtor comdat as function?");
92       }
93       llvm_unreachable("bad dtor kind");
94     }
95     if (isa<CXXConstructorDecl>(GD.getDecl())) {
96       switch (GD.getCtorType()) {
97       case Ctor_Complete:
98         return true;
99 
100       case Ctor_Base:
101         return false;
102 
103       case Ctor_CopyingClosure:
104       case Ctor_DefaultClosure:
105         llvm_unreachable("closure ctors in Itanium ABI?");
106 
107       case Ctor_Comdat:
108         llvm_unreachable("emitting ctor comdat as function?");
109       }
110       llvm_unreachable("bad dtor kind");
111     }
112 
113     // No other kinds.
114     return false;
115   }
116 
117   bool isZeroInitializable(const MemberPointerType *MPT) override;
118 
119   llvm::Type *ConvertMemberPointerType(const MemberPointerType *MPT) override;
120 
121   CGCallee
122     EmitLoadOfMemberFunctionPointer(CodeGenFunction &CGF,
123                                     const Expr *E,
124                                     Address This,
125                                     llvm::Value *&ThisPtrForCall,
126                                     llvm::Value *MemFnPtr,
127                                     const MemberPointerType *MPT) override;
128 
129   llvm::Value *
130     EmitMemberDataPointerAddress(CodeGenFunction &CGF, const Expr *E,
131                                  Address Base,
132                                  llvm::Value *MemPtr,
133                                  const MemberPointerType *MPT) override;
134 
135   llvm::Value *EmitMemberPointerConversion(CodeGenFunction &CGF,
136                                            const CastExpr *E,
137                                            llvm::Value *Src) override;
138   llvm::Constant *EmitMemberPointerConversion(const CastExpr *E,
139                                               llvm::Constant *Src) override;
140 
141   llvm::Constant *EmitNullMemberPointer(const MemberPointerType *MPT) override;
142 
143   llvm::Constant *EmitMemberFunctionPointer(const CXXMethodDecl *MD) override;
144   llvm::Constant *EmitMemberDataPointer(const MemberPointerType *MPT,
145                                         CharUnits offset) override;
146   llvm::Constant *EmitMemberPointer(const APValue &MP, QualType MPT) override;
147   llvm::Constant *BuildMemberPointer(const CXXMethodDecl *MD,
148                                      CharUnits ThisAdjustment);
149 
150   llvm::Value *EmitMemberPointerComparison(CodeGenFunction &CGF,
151                                            llvm::Value *L, llvm::Value *R,
152                                            const MemberPointerType *MPT,
153                                            bool Inequality) override;
154 
155   llvm::Value *EmitMemberPointerIsNotNull(CodeGenFunction &CGF,
156                                          llvm::Value *Addr,
157                                          const MemberPointerType *MPT) override;
158 
159   void emitVirtualObjectDelete(CodeGenFunction &CGF, const CXXDeleteExpr *DE,
160                                Address Ptr, QualType ElementType,
161                                const CXXDestructorDecl *Dtor) override;
162 
163   /// Itanium says that an _Unwind_Exception has to be "double-word"
164   /// aligned (and thus the end of it is also so-aligned), meaning 16
165   /// bytes.  Of course, that was written for the actual Itanium,
166   /// which is a 64-bit platform.  Classically, the ABI doesn't really
167   /// specify the alignment on other platforms, but in practice
168   /// libUnwind declares the struct with __attribute__((aligned)), so
169   /// we assume that alignment here.  (It's generally 16 bytes, but
170   /// some targets overwrite it.)
getAlignmentOfExnObject()171   CharUnits getAlignmentOfExnObject() {
172     auto align = CGM.getContext().getTargetDefaultAlignForAttributeAligned();
173     return CGM.getContext().toCharUnitsFromBits(align);
174   }
175 
176   void emitRethrow(CodeGenFunction &CGF, bool isNoReturn) override;
177   void emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) override;
178 
179   void emitBeginCatch(CodeGenFunction &CGF, const CXXCatchStmt *C) override;
180 
181   llvm::CallInst *
182   emitTerminateForUnexpectedException(CodeGenFunction &CGF,
183                                       llvm::Value *Exn) override;
184 
185   void EmitFundamentalRTTIDescriptors(const CXXRecordDecl *RD);
186   llvm::Constant *getAddrOfRTTIDescriptor(QualType Ty) override;
187   CatchTypeInfo
getAddrOfCXXCatchHandlerType(QualType Ty,QualType CatchHandlerType)188   getAddrOfCXXCatchHandlerType(QualType Ty,
189                                QualType CatchHandlerType) override {
190     return CatchTypeInfo{getAddrOfRTTIDescriptor(Ty), 0};
191   }
192 
193   bool shouldTypeidBeNullChecked(bool IsDeref, QualType SrcRecordTy) override;
194   void EmitBadTypeidCall(CodeGenFunction &CGF) override;
195   llvm::Value *EmitTypeid(CodeGenFunction &CGF, QualType SrcRecordTy,
196                           Address ThisPtr,
197                           llvm::Type *StdTypeInfoPtrTy) override;
198 
199   bool shouldDynamicCastCallBeNullChecked(bool SrcIsPtr,
200                                           QualType SrcRecordTy) override;
201 
202   llvm::Value *EmitDynamicCastCall(CodeGenFunction &CGF, Address Value,
203                                    QualType SrcRecordTy, QualType DestTy,
204                                    QualType DestRecordTy,
205                                    llvm::BasicBlock *CastEnd) override;
206 
207   llvm::Value *EmitDynamicCastToVoid(CodeGenFunction &CGF, Address Value,
208                                      QualType SrcRecordTy,
209                                      QualType DestTy) override;
210 
211   bool EmitBadCastCall(CodeGenFunction &CGF) override;
212 
213   llvm::Value *
214     GetVirtualBaseClassOffset(CodeGenFunction &CGF, Address This,
215                               const CXXRecordDecl *ClassDecl,
216                               const CXXRecordDecl *BaseClassDecl) override;
217 
218   void EmitCXXConstructors(const CXXConstructorDecl *D) override;
219 
220   AddedStructorArgs
221   buildStructorSignature(const CXXMethodDecl *MD, StructorType T,
222                          SmallVectorImpl<CanQualType> &ArgTys) override;
223 
useThunkForDtorVariant(const CXXDestructorDecl * Dtor,CXXDtorType DT) const224   bool useThunkForDtorVariant(const CXXDestructorDecl *Dtor,
225                               CXXDtorType DT) const override {
226     // Itanium does not emit any destructor variant as an inline thunk.
227     // Delegating may occur as an optimization, but all variants are either
228     // emitted with external linkage or as linkonce if they are inline and used.
229     return false;
230   }
231 
232   void EmitCXXDestructors(const CXXDestructorDecl *D) override;
233 
234   void addImplicitStructorParams(CodeGenFunction &CGF, QualType &ResTy,
235                                  FunctionArgList &Params) override;
236 
237   void EmitInstanceFunctionProlog(CodeGenFunction &CGF) override;
238 
239   AddedStructorArgs
240   addImplicitConstructorArgs(CodeGenFunction &CGF, const CXXConstructorDecl *D,
241                              CXXCtorType Type, bool ForVirtualBase,
242                              bool Delegating, CallArgList &Args) override;
243 
244   void EmitDestructorCall(CodeGenFunction &CGF, const CXXDestructorDecl *DD,
245                           CXXDtorType Type, bool ForVirtualBase,
246                           bool Delegating, Address This) override;
247 
248   void emitVTableDefinitions(CodeGenVTables &CGVT,
249                              const CXXRecordDecl *RD) override;
250 
251   bool isVirtualOffsetNeededForVTableField(CodeGenFunction &CGF,
252                                            CodeGenFunction::VPtr Vptr) override;
253 
doStructorsInitializeVPtrs(const CXXRecordDecl * VTableClass)254   bool doStructorsInitializeVPtrs(const CXXRecordDecl *VTableClass) override {
255     return true;
256   }
257 
258   llvm::Constant *
259   getVTableAddressPoint(BaseSubobject Base,
260                         const CXXRecordDecl *VTableClass) override;
261 
262   llvm::Value *getVTableAddressPointInStructor(
263       CodeGenFunction &CGF, const CXXRecordDecl *VTableClass,
264       BaseSubobject Base, const CXXRecordDecl *NearestVBase) override;
265 
266   llvm::Value *getVTableAddressPointInStructorWithVTT(
267       CodeGenFunction &CGF, const CXXRecordDecl *VTableClass,
268       BaseSubobject Base, const CXXRecordDecl *NearestVBase);
269 
270   llvm::Constant *
271   getVTableAddressPointForConstExpr(BaseSubobject Base,
272                                     const CXXRecordDecl *VTableClass) override;
273 
274   llvm::GlobalVariable *getAddrOfVTable(const CXXRecordDecl *RD,
275                                         CharUnits VPtrOffset) override;
276 
277   CGCallee getVirtualFunctionPointer(CodeGenFunction &CGF, GlobalDecl GD,
278                                      Address This, llvm::Type *Ty,
279                                      SourceLocation Loc) override;
280 
281   llvm::Value *EmitVirtualDestructorCall(CodeGenFunction &CGF,
282                                          const CXXDestructorDecl *Dtor,
283                                          CXXDtorType DtorType,
284                                          Address This,
285                                          const CXXMemberCallExpr *CE) override;
286 
287   void emitVirtualInheritanceTables(const CXXRecordDecl *RD) override;
288 
289   bool canSpeculativelyEmitVTable(const CXXRecordDecl *RD) const override;
290 
setThunkLinkage(llvm::Function * Thunk,bool ForVTable,GlobalDecl GD,bool ReturnAdjustment)291   void setThunkLinkage(llvm::Function *Thunk, bool ForVTable, GlobalDecl GD,
292                        bool ReturnAdjustment) override {
293     // Allow inlining of thunks by emitting them with available_externally
294     // linkage together with vtables when needed.
295     if (ForVTable && !Thunk->hasLocalLinkage())
296       Thunk->setLinkage(llvm::GlobalValue::AvailableExternallyLinkage);
297     CGM.setGVProperties(Thunk, GD);
298   }
299 
exportThunk()300   bool exportThunk() override { return true; }
301 
302   llvm::Value *performThisAdjustment(CodeGenFunction &CGF, Address This,
303                                      const ThisAdjustment &TA) override;
304 
305   llvm::Value *performReturnAdjustment(CodeGenFunction &CGF, Address Ret,
306                                        const ReturnAdjustment &RA) override;
307 
getSrcArgforCopyCtor(const CXXConstructorDecl *,FunctionArgList & Args) const308   size_t getSrcArgforCopyCtor(const CXXConstructorDecl *,
309                               FunctionArgList &Args) const override {
310     assert(!Args.empty() && "expected the arglist to not be empty!");
311     return Args.size() - 1;
312   }
313 
GetPureVirtualCallName()314   StringRef GetPureVirtualCallName() override { return "__cxa_pure_virtual"; }
GetDeletedVirtualCallName()315   StringRef GetDeletedVirtualCallName() override
316     { return "__cxa_deleted_virtual"; }
317 
318   CharUnits getArrayCookieSizeImpl(QualType elementType) override;
319   Address InitializeArrayCookie(CodeGenFunction &CGF,
320                                 Address NewPtr,
321                                 llvm::Value *NumElements,
322                                 const CXXNewExpr *expr,
323                                 QualType ElementType) override;
324   llvm::Value *readArrayCookieImpl(CodeGenFunction &CGF,
325                                    Address allocPtr,
326                                    CharUnits cookieSize) override;
327 
328   void EmitGuardedInit(CodeGenFunction &CGF, const VarDecl &D,
329                        llvm::GlobalVariable *DeclPtr,
330                        bool PerformInit) override;
331   void registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
332                           llvm::Constant *dtor, llvm::Constant *addr) override;
333 
334   llvm::Function *getOrCreateThreadLocalWrapper(const VarDecl *VD,
335                                                 llvm::Value *Val);
336   void EmitThreadLocalInitFuncs(
337       CodeGenModule &CGM,
338       ArrayRef<const VarDecl *> CXXThreadLocals,
339       ArrayRef<llvm::Function *> CXXThreadLocalInits,
340       ArrayRef<const VarDecl *> CXXThreadLocalInitVars) override;
341 
usesThreadWrapperFunction() const342   bool usesThreadWrapperFunction() const override { return true; }
343   LValue EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF, const VarDecl *VD,
344                                       QualType LValType) override;
345 
346   bool NeedsVTTParameter(GlobalDecl GD) override;
347 
348   /**************************** RTTI Uniqueness ******************************/
349 
350 protected:
351   /// Returns true if the ABI requires RTTI type_info objects to be unique
352   /// across a program.
shouldRTTIBeUnique() const353   virtual bool shouldRTTIBeUnique() const { return true; }
354 
355 public:
356   /// What sort of unique-RTTI behavior should we use?
357   enum RTTIUniquenessKind {
358     /// We are guaranteeing, or need to guarantee, that the RTTI string
359     /// is unique.
360     RUK_Unique,
361 
362     /// We are not guaranteeing uniqueness for the RTTI string, so we
363     /// can demote to hidden visibility but must use string comparisons.
364     RUK_NonUniqueHidden,
365 
366     /// We are not guaranteeing uniqueness for the RTTI string, so we
367     /// have to use string comparisons, but we also have to emit it with
368     /// non-hidden visibility.
369     RUK_NonUniqueVisible
370   };
371 
372   /// Return the required visibility status for the given type and linkage in
373   /// the current ABI.
374   RTTIUniquenessKind
375   classifyRTTIUniqueness(QualType CanTy,
376                          llvm::GlobalValue::LinkageTypes Linkage) const;
377   friend class ItaniumRTTIBuilder;
378 
379   void emitCXXStructor(const CXXMethodDecl *MD, StructorType Type) override;
380 
381   std::pair<llvm::Value *, const CXXRecordDecl *>
382   LoadVTablePtr(CodeGenFunction &CGF, Address This,
383                 const CXXRecordDecl *RD) override;
384 
385  private:
hasAnyUnusedVirtualInlineFunction(const CXXRecordDecl * RD) const386    bool hasAnyUnusedVirtualInlineFunction(const CXXRecordDecl *RD) const {
387      const auto &VtableLayout =
388          CGM.getItaniumVTableContext().getVTableLayout(RD);
389 
390      for (const auto &VtableComponent : VtableLayout.vtable_components()) {
391        // Skip empty slot.
392        if (!VtableComponent.isUsedFunctionPointerKind())
393          continue;
394 
395        const CXXMethodDecl *Method = VtableComponent.getFunctionDecl();
396        if (!Method->getCanonicalDecl()->isInlined())
397          continue;
398 
399        StringRef Name = CGM.getMangledName(VtableComponent.getGlobalDecl());
400        auto *Entry = CGM.GetGlobalValue(Name);
401        // This checks if virtual inline function has already been emitted.
402        // Note that it is possible that this inline function would be emitted
403        // after trying to emit vtable speculatively. Because of this we do
404        // an extra pass after emitting all deferred vtables to find and emit
405        // these vtables opportunistically.
406        if (!Entry || Entry->isDeclaration())
407          return true;
408      }
409      return false;
410   }
411 
isVTableHidden(const CXXRecordDecl * RD) const412   bool isVTableHidden(const CXXRecordDecl *RD) const {
413     const auto &VtableLayout =
414             CGM.getItaniumVTableContext().getVTableLayout(RD);
415 
416     for (const auto &VtableComponent : VtableLayout.vtable_components()) {
417       if (VtableComponent.isRTTIKind()) {
418         const CXXRecordDecl *RTTIDecl = VtableComponent.getRTTIDecl();
419         if (RTTIDecl->getVisibility() == Visibility::HiddenVisibility)
420           return true;
421       } else if (VtableComponent.isUsedFunctionPointerKind()) {
422         const CXXMethodDecl *Method = VtableComponent.getFunctionDecl();
423         if (Method->getVisibility() == Visibility::HiddenVisibility &&
424             !Method->isDefined())
425           return true;
426       }
427     }
428     return false;
429   }
430 };
431 
432 class ARMCXXABI : public ItaniumCXXABI {
433 public:
ARMCXXABI(CodeGen::CodeGenModule & CGM)434   ARMCXXABI(CodeGen::CodeGenModule &CGM) :
435     ItaniumCXXABI(CGM, /* UseARMMethodPtrABI = */ true,
436                   /* UseARMGuardVarABI = */ true) {}
437 
HasThisReturn(GlobalDecl GD) const438   bool HasThisReturn(GlobalDecl GD) const override {
439     return (isa<CXXConstructorDecl>(GD.getDecl()) || (
440               isa<CXXDestructorDecl>(GD.getDecl()) &&
441               GD.getDtorType() != Dtor_Deleting));
442   }
443 
444   void EmitReturnFromThunk(CodeGenFunction &CGF, RValue RV,
445                            QualType ResTy) override;
446 
447   CharUnits getArrayCookieSizeImpl(QualType elementType) override;
448   Address InitializeArrayCookie(CodeGenFunction &CGF,
449                                 Address NewPtr,
450                                 llvm::Value *NumElements,
451                                 const CXXNewExpr *expr,
452                                 QualType ElementType) override;
453   llvm::Value *readArrayCookieImpl(CodeGenFunction &CGF, Address allocPtr,
454                                    CharUnits cookieSize) override;
455 };
456 
457 class iOS64CXXABI : public ARMCXXABI {
458 public:
iOS64CXXABI(CodeGen::CodeGenModule & CGM)459   iOS64CXXABI(CodeGen::CodeGenModule &CGM) : ARMCXXABI(CGM) {
460     Use32BitVTableOffsetABI = true;
461   }
462 
463   // ARM64 libraries are prepared for non-unique RTTI.
shouldRTTIBeUnique() const464   bool shouldRTTIBeUnique() const override { return false; }
465 };
466 
467 class WebAssemblyCXXABI final : public ItaniumCXXABI {
468 public:
WebAssemblyCXXABI(CodeGen::CodeGenModule & CGM)469   explicit WebAssemblyCXXABI(CodeGen::CodeGenModule &CGM)
470       : ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true,
471                       /*UseARMGuardVarABI=*/true) {}
472   void emitBeginCatch(CodeGenFunction &CGF, const CXXCatchStmt *C) override;
473 
474 private:
HasThisReturn(GlobalDecl GD) const475   bool HasThisReturn(GlobalDecl GD) const override {
476     return isa<CXXConstructorDecl>(GD.getDecl()) ||
477            (isa<CXXDestructorDecl>(GD.getDecl()) &&
478             GD.getDtorType() != Dtor_Deleting);
479   }
canCallMismatchedFunctionType() const480   bool canCallMismatchedFunctionType() const override { return false; }
481 };
482 }
483 
CreateItaniumCXXABI(CodeGenModule & CGM)484 CodeGen::CGCXXABI *CodeGen::CreateItaniumCXXABI(CodeGenModule &CGM) {
485   switch (CGM.getTarget().getCXXABI().getKind()) {
486   // For IR-generation purposes, there's no significant difference
487   // between the ARM and iOS ABIs.
488   case TargetCXXABI::GenericARM:
489   case TargetCXXABI::iOS:
490   case TargetCXXABI::WatchOS:
491     return new ARMCXXABI(CGM);
492 
493   case TargetCXXABI::iOS64:
494     return new iOS64CXXABI(CGM);
495 
496   // Note that AArch64 uses the generic ItaniumCXXABI class since it doesn't
497   // include the other 32-bit ARM oddities: constructor/destructor return values
498   // and array cookies.
499   case TargetCXXABI::GenericAArch64:
500     return new ItaniumCXXABI(CGM, /* UseARMMethodPtrABI = */ true,
501                              /* UseARMGuardVarABI = */ true);
502 
503   case TargetCXXABI::GenericMIPS:
504     return new ItaniumCXXABI(CGM, /* UseARMMethodPtrABI = */ true);
505 
506   case TargetCXXABI::WebAssembly:
507     return new WebAssemblyCXXABI(CGM);
508 
509   case TargetCXXABI::GenericItanium:
510     if (CGM.getContext().getTargetInfo().getTriple().getArch()
511         == llvm::Triple::le32) {
512       // For PNaCl, use ARM-style method pointers so that PNaCl code
513       // does not assume anything about the alignment of function
514       // pointers.
515       return new ItaniumCXXABI(CGM, /* UseARMMethodPtrABI = */ true,
516                                /* UseARMGuardVarABI = */ false);
517     }
518     return new ItaniumCXXABI(CGM);
519 
520   case TargetCXXABI::Microsoft:
521     llvm_unreachable("Microsoft ABI is not Itanium-based");
522   }
523   llvm_unreachable("bad ABI kind");
524 }
525 
526 llvm::Type *
ConvertMemberPointerType(const MemberPointerType * MPT)527 ItaniumCXXABI::ConvertMemberPointerType(const MemberPointerType *MPT) {
528   if (MPT->isMemberDataPointer())
529     return CGM.PtrDiffTy;
530   return llvm::StructType::get(CGM.PtrDiffTy, CGM.PtrDiffTy);
531 }
532 
533 /// In the Itanium and ARM ABIs, method pointers have the form:
534 ///   struct { ptrdiff_t ptr; ptrdiff_t adj; } memptr;
535 ///
536 /// In the Itanium ABI:
537 ///  - method pointers are virtual if (memptr.ptr & 1) is nonzero
538 ///  - the this-adjustment is (memptr.adj)
539 ///  - the virtual offset is (memptr.ptr - 1)
540 ///
541 /// In the ARM ABI:
542 ///  - method pointers are virtual if (memptr.adj & 1) is nonzero
543 ///  - the this-adjustment is (memptr.adj >> 1)
544 ///  - the virtual offset is (memptr.ptr)
545 /// ARM uses 'adj' for the virtual flag because Thumb functions
546 /// may be only single-byte aligned.
547 ///
548 /// If the member is virtual, the adjusted 'this' pointer points
549 /// to a vtable pointer from which the virtual offset is applied.
550 ///
551 /// If the member is non-virtual, memptr.ptr is the address of
552 /// the function to call.
EmitLoadOfMemberFunctionPointer(CodeGenFunction & CGF,const Expr * E,Address ThisAddr,llvm::Value * & ThisPtrForCall,llvm::Value * MemFnPtr,const MemberPointerType * MPT)553 CGCallee ItaniumCXXABI::EmitLoadOfMemberFunctionPointer(
554     CodeGenFunction &CGF, const Expr *E, Address ThisAddr,
555     llvm::Value *&ThisPtrForCall,
556     llvm::Value *MemFnPtr, const MemberPointerType *MPT) {
557   CGBuilderTy &Builder = CGF.Builder;
558 
559   const FunctionProtoType *FPT =
560     MPT->getPointeeType()->getAs<FunctionProtoType>();
561   const CXXRecordDecl *RD =
562     cast<CXXRecordDecl>(MPT->getClass()->getAs<RecordType>()->getDecl());
563 
564   llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(
565       CGM.getTypes().arrangeCXXMethodType(RD, FPT, /*FD=*/nullptr));
566 
567   llvm::Constant *ptrdiff_1 = llvm::ConstantInt::get(CGM.PtrDiffTy, 1);
568 
569   llvm::BasicBlock *FnVirtual = CGF.createBasicBlock("memptr.virtual");
570   llvm::BasicBlock *FnNonVirtual = CGF.createBasicBlock("memptr.nonvirtual");
571   llvm::BasicBlock *FnEnd = CGF.createBasicBlock("memptr.end");
572 
573   // Extract memptr.adj, which is in the second field.
574   llvm::Value *RawAdj = Builder.CreateExtractValue(MemFnPtr, 1, "memptr.adj");
575 
576   // Compute the true adjustment.
577   llvm::Value *Adj = RawAdj;
578   if (UseARMMethodPtrABI)
579     Adj = Builder.CreateAShr(Adj, ptrdiff_1, "memptr.adj.shifted");
580 
581   // Apply the adjustment and cast back to the original struct type
582   // for consistency.
583   llvm::Value *This = ThisAddr.getPointer();
584   llvm::Value *Ptr = Builder.CreateBitCast(This, Builder.getInt8PtrTy());
585   Ptr = Builder.CreateInBoundsGEP(Ptr, Adj);
586   This = Builder.CreateBitCast(Ptr, This->getType(), "this.adjusted");
587   ThisPtrForCall = This;
588 
589   // Load the function pointer.
590   llvm::Value *FnAsInt = Builder.CreateExtractValue(MemFnPtr, 0, "memptr.ptr");
591 
592   // If the LSB in the function pointer is 1, the function pointer points to
593   // a virtual function.
594   llvm::Value *IsVirtual;
595   if (UseARMMethodPtrABI)
596     IsVirtual = Builder.CreateAnd(RawAdj, ptrdiff_1);
597   else
598     IsVirtual = Builder.CreateAnd(FnAsInt, ptrdiff_1);
599   IsVirtual = Builder.CreateIsNotNull(IsVirtual, "memptr.isvirtual");
600   Builder.CreateCondBr(IsVirtual, FnVirtual, FnNonVirtual);
601 
602   // In the virtual path, the adjustment left 'This' pointing to the
603   // vtable of the correct base subobject.  The "function pointer" is an
604   // offset within the vtable (+1 for the virtual flag on non-ARM).
605   CGF.EmitBlock(FnVirtual);
606 
607   // Cast the adjusted this to a pointer to vtable pointer and load.
608   llvm::Type *VTableTy = Builder.getInt8PtrTy();
609   CharUnits VTablePtrAlign =
610     CGF.CGM.getDynamicOffsetAlignment(ThisAddr.getAlignment(), RD,
611                                       CGF.getPointerAlign());
612   llvm::Value *VTable =
613     CGF.GetVTablePtr(Address(This, VTablePtrAlign), VTableTy, RD);
614 
615   // Apply the offset.
616   // On ARM64, to reserve extra space in virtual member function pointers,
617   // we only pay attention to the low 32 bits of the offset.
618   llvm::Value *VTableOffset = FnAsInt;
619   if (!UseARMMethodPtrABI)
620     VTableOffset = Builder.CreateSub(VTableOffset, ptrdiff_1);
621   if (Use32BitVTableOffsetABI) {
622     VTableOffset = Builder.CreateTrunc(VTableOffset, CGF.Int32Ty);
623     VTableOffset = Builder.CreateZExt(VTableOffset, CGM.PtrDiffTy);
624   }
625   // Compute the address of the virtual function pointer.
626   llvm::Value *VFPAddr = Builder.CreateGEP(VTable, VTableOffset);
627 
628   // Check the address of the function pointer if CFI on member function
629   // pointers is enabled.
630   llvm::Constant *CheckSourceLocation;
631   llvm::Constant *CheckTypeDesc;
632   bool ShouldEmitCFICheck = CGF.SanOpts.has(SanitizerKind::CFIMFCall) &&
633                             CGM.HasHiddenLTOVisibility(RD);
634   if (ShouldEmitCFICheck) {
635     CodeGenFunction::SanitizerScope SanScope(&CGF);
636 
637     CheckSourceLocation = CGF.EmitCheckSourceLocation(E->getLocStart());
638     CheckTypeDesc = CGF.EmitCheckTypeDescriptor(QualType(MPT, 0));
639     llvm::Constant *StaticData[] = {
640         llvm::ConstantInt::get(CGF.Int8Ty, CodeGenFunction::CFITCK_VMFCall),
641         CheckSourceLocation,
642         CheckTypeDesc,
643     };
644 
645     llvm::Metadata *MD =
646         CGM.CreateMetadataIdentifierForVirtualMemPtrType(QualType(MPT, 0));
647     llvm::Value *TypeId = llvm::MetadataAsValue::get(CGF.getLLVMContext(), MD);
648 
649     llvm::Value *TypeTest = Builder.CreateCall(
650         CGM.getIntrinsic(llvm::Intrinsic::type_test), {VFPAddr, TypeId});
651 
652     if (CGM.getCodeGenOpts().SanitizeTrap.has(SanitizerKind::CFIMFCall)) {
653       CGF.EmitTrapCheck(TypeTest);
654     } else {
655       llvm::Value *AllVtables = llvm::MetadataAsValue::get(
656           CGM.getLLVMContext(),
657           llvm::MDString::get(CGM.getLLVMContext(), "all-vtables"));
658       llvm::Value *ValidVtable = Builder.CreateCall(
659           CGM.getIntrinsic(llvm::Intrinsic::type_test), {VTable, AllVtables});
660       CGF.EmitCheck(std::make_pair(TypeTest, SanitizerKind::CFIMFCall),
661                     SanitizerHandler::CFICheckFail, StaticData,
662                     {VTable, ValidVtable});
663     }
664 
665     FnVirtual = Builder.GetInsertBlock();
666   }
667 
668   // Load the virtual function to call.
669   VFPAddr = Builder.CreateBitCast(VFPAddr, FTy->getPointerTo()->getPointerTo());
670   llvm::Value *VirtualFn = Builder.CreateAlignedLoad(
671       VFPAddr, CGF.getPointerAlign(), "memptr.virtualfn");
672   CGF.EmitBranch(FnEnd);
673 
674   // In the non-virtual path, the function pointer is actually a
675   // function pointer.
676   CGF.EmitBlock(FnNonVirtual);
677   llvm::Value *NonVirtualFn =
678     Builder.CreateIntToPtr(FnAsInt, FTy->getPointerTo(), "memptr.nonvirtualfn");
679 
680   // Check the function pointer if CFI on member function pointers is enabled.
681   if (ShouldEmitCFICheck) {
682     CXXRecordDecl *RD = MPT->getClass()->getAsCXXRecordDecl();
683     if (RD->hasDefinition()) {
684       CodeGenFunction::SanitizerScope SanScope(&CGF);
685 
686       llvm::Constant *StaticData[] = {
687           llvm::ConstantInt::get(CGF.Int8Ty, CodeGenFunction::CFITCK_NVMFCall),
688           CheckSourceLocation,
689           CheckTypeDesc,
690       };
691 
692       llvm::Value *Bit = Builder.getFalse();
693       llvm::Value *CastedNonVirtualFn =
694           Builder.CreateBitCast(NonVirtualFn, CGF.Int8PtrTy);
695       for (const CXXRecordDecl *Base : CGM.getMostBaseClasses(RD)) {
696         llvm::Metadata *MD = CGM.CreateMetadataIdentifierForType(
697             getContext().getMemberPointerType(
698                 MPT->getPointeeType(),
699                 getContext().getRecordType(Base).getTypePtr()));
700         llvm::Value *TypeId =
701             llvm::MetadataAsValue::get(CGF.getLLVMContext(), MD);
702 
703         llvm::Value *TypeTest =
704             Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::type_test),
705                                {CastedNonVirtualFn, TypeId});
706         Bit = Builder.CreateOr(Bit, TypeTest);
707       }
708 
709       CGF.EmitCheck(std::make_pair(Bit, SanitizerKind::CFIMFCall),
710                     SanitizerHandler::CFICheckFail, StaticData,
711                     {CastedNonVirtualFn, llvm::UndefValue::get(CGF.IntPtrTy)});
712 
713       FnNonVirtual = Builder.GetInsertBlock();
714     }
715   }
716 
717   // We're done.
718   CGF.EmitBlock(FnEnd);
719   llvm::PHINode *CalleePtr = Builder.CreatePHI(FTy->getPointerTo(), 2);
720   CalleePtr->addIncoming(VirtualFn, FnVirtual);
721   CalleePtr->addIncoming(NonVirtualFn, FnNonVirtual);
722 
723   CGCallee Callee(FPT, CalleePtr);
724   return Callee;
725 }
726 
727 /// Compute an l-value by applying the given pointer-to-member to a
728 /// base object.
EmitMemberDataPointerAddress(CodeGenFunction & CGF,const Expr * E,Address Base,llvm::Value * MemPtr,const MemberPointerType * MPT)729 llvm::Value *ItaniumCXXABI::EmitMemberDataPointerAddress(
730     CodeGenFunction &CGF, const Expr *E, Address Base, llvm::Value *MemPtr,
731     const MemberPointerType *MPT) {
732   assert(MemPtr->getType() == CGM.PtrDiffTy);
733 
734   CGBuilderTy &Builder = CGF.Builder;
735 
736   // Cast to char*.
737   Base = Builder.CreateElementBitCast(Base, CGF.Int8Ty);
738 
739   // Apply the offset, which we assume is non-null.
740   llvm::Value *Addr =
741     Builder.CreateInBoundsGEP(Base.getPointer(), MemPtr, "memptr.offset");
742 
743   // Cast the address to the appropriate pointer type, adopting the
744   // address space of the base pointer.
745   llvm::Type *PType = CGF.ConvertTypeForMem(MPT->getPointeeType())
746                             ->getPointerTo(Base.getAddressSpace());
747   return Builder.CreateBitCast(Addr, PType);
748 }
749 
750 /// Perform a bitcast, derived-to-base, or base-to-derived member pointer
751 /// conversion.
752 ///
753 /// Bitcast conversions are always a no-op under Itanium.
754 ///
755 /// Obligatory offset/adjustment diagram:
756 ///         <-- offset -->          <-- adjustment -->
757 ///   |--------------------------|----------------------|--------------------|
758 ///   ^Derived address point     ^Base address point    ^Member address point
759 ///
760 /// So when converting a base member pointer to a derived member pointer,
761 /// we add the offset to the adjustment because the address point has
762 /// decreased;  and conversely, when converting a derived MP to a base MP
763 /// we subtract the offset from the adjustment because the address point
764 /// has increased.
765 ///
766 /// The standard forbids (at compile time) conversion to and from
767 /// virtual bases, which is why we don't have to consider them here.
768 ///
769 /// The standard forbids (at run time) casting a derived MP to a base
770 /// MP when the derived MP does not point to a member of the base.
771 /// This is why -1 is a reasonable choice for null data member
772 /// pointers.
773 llvm::Value *
EmitMemberPointerConversion(CodeGenFunction & CGF,const CastExpr * E,llvm::Value * src)774 ItaniumCXXABI::EmitMemberPointerConversion(CodeGenFunction &CGF,
775                                            const CastExpr *E,
776                                            llvm::Value *src) {
777   assert(E->getCastKind() == CK_DerivedToBaseMemberPointer ||
778          E->getCastKind() == CK_BaseToDerivedMemberPointer ||
779          E->getCastKind() == CK_ReinterpretMemberPointer);
780 
781   // Under Itanium, reinterprets don't require any additional processing.
782   if (E->getCastKind() == CK_ReinterpretMemberPointer) return src;
783 
784   // Use constant emission if we can.
785   if (isa<llvm::Constant>(src))
786     return EmitMemberPointerConversion(E, cast<llvm::Constant>(src));
787 
788   llvm::Constant *adj = getMemberPointerAdjustment(E);
789   if (!adj) return src;
790 
791   CGBuilderTy &Builder = CGF.Builder;
792   bool isDerivedToBase = (E->getCastKind() == CK_DerivedToBaseMemberPointer);
793 
794   const MemberPointerType *destTy =
795     E->getType()->castAs<MemberPointerType>();
796 
797   // For member data pointers, this is just a matter of adding the
798   // offset if the source is non-null.
799   if (destTy->isMemberDataPointer()) {
800     llvm::Value *dst;
801     if (isDerivedToBase)
802       dst = Builder.CreateNSWSub(src, adj, "adj");
803     else
804       dst = Builder.CreateNSWAdd(src, adj, "adj");
805 
806     // Null check.
807     llvm::Value *null = llvm::Constant::getAllOnesValue(src->getType());
808     llvm::Value *isNull = Builder.CreateICmpEQ(src, null, "memptr.isnull");
809     return Builder.CreateSelect(isNull, src, dst);
810   }
811 
812   // The this-adjustment is left-shifted by 1 on ARM.
813   if (UseARMMethodPtrABI) {
814     uint64_t offset = cast<llvm::ConstantInt>(adj)->getZExtValue();
815     offset <<= 1;
816     adj = llvm::ConstantInt::get(adj->getType(), offset);
817   }
818 
819   llvm::Value *srcAdj = Builder.CreateExtractValue(src, 1, "src.adj");
820   llvm::Value *dstAdj;
821   if (isDerivedToBase)
822     dstAdj = Builder.CreateNSWSub(srcAdj, adj, "adj");
823   else
824     dstAdj = Builder.CreateNSWAdd(srcAdj, adj, "adj");
825 
826   return Builder.CreateInsertValue(src, dstAdj, 1);
827 }
828 
829 llvm::Constant *
EmitMemberPointerConversion(const CastExpr * E,llvm::Constant * src)830 ItaniumCXXABI::EmitMemberPointerConversion(const CastExpr *E,
831                                            llvm::Constant *src) {
832   assert(E->getCastKind() == CK_DerivedToBaseMemberPointer ||
833          E->getCastKind() == CK_BaseToDerivedMemberPointer ||
834          E->getCastKind() == CK_ReinterpretMemberPointer);
835 
836   // Under Itanium, reinterprets don't require any additional processing.
837   if (E->getCastKind() == CK_ReinterpretMemberPointer) return src;
838 
839   // If the adjustment is trivial, we don't need to do anything.
840   llvm::Constant *adj = getMemberPointerAdjustment(E);
841   if (!adj) return src;
842 
843   bool isDerivedToBase = (E->getCastKind() == CK_DerivedToBaseMemberPointer);
844 
845   const MemberPointerType *destTy =
846     E->getType()->castAs<MemberPointerType>();
847 
848   // For member data pointers, this is just a matter of adding the
849   // offset if the source is non-null.
850   if (destTy->isMemberDataPointer()) {
851     // null maps to null.
852     if (src->isAllOnesValue()) return src;
853 
854     if (isDerivedToBase)
855       return llvm::ConstantExpr::getNSWSub(src, adj);
856     else
857       return llvm::ConstantExpr::getNSWAdd(src, adj);
858   }
859 
860   // The this-adjustment is left-shifted by 1 on ARM.
861   if (UseARMMethodPtrABI) {
862     uint64_t offset = cast<llvm::ConstantInt>(adj)->getZExtValue();
863     offset <<= 1;
864     adj = llvm::ConstantInt::get(adj->getType(), offset);
865   }
866 
867   llvm::Constant *srcAdj = llvm::ConstantExpr::getExtractValue(src, 1);
868   llvm::Constant *dstAdj;
869   if (isDerivedToBase)
870     dstAdj = llvm::ConstantExpr::getNSWSub(srcAdj, adj);
871   else
872     dstAdj = llvm::ConstantExpr::getNSWAdd(srcAdj, adj);
873 
874   return llvm::ConstantExpr::getInsertValue(src, dstAdj, 1);
875 }
876 
877 llvm::Constant *
EmitNullMemberPointer(const MemberPointerType * MPT)878 ItaniumCXXABI::EmitNullMemberPointer(const MemberPointerType *MPT) {
879   // Itanium C++ ABI 2.3:
880   //   A NULL pointer is represented as -1.
881   if (MPT->isMemberDataPointer())
882     return llvm::ConstantInt::get(CGM.PtrDiffTy, -1ULL, /*isSigned=*/true);
883 
884   llvm::Constant *Zero = llvm::ConstantInt::get(CGM.PtrDiffTy, 0);
885   llvm::Constant *Values[2] = { Zero, Zero };
886   return llvm::ConstantStruct::getAnon(Values);
887 }
888 
889 llvm::Constant *
EmitMemberDataPointer(const MemberPointerType * MPT,CharUnits offset)890 ItaniumCXXABI::EmitMemberDataPointer(const MemberPointerType *MPT,
891                                      CharUnits offset) {
892   // Itanium C++ ABI 2.3:
893   //   A pointer to data member is an offset from the base address of
894   //   the class object containing it, represented as a ptrdiff_t
895   return llvm::ConstantInt::get(CGM.PtrDiffTy, offset.getQuantity());
896 }
897 
898 llvm::Constant *
EmitMemberFunctionPointer(const CXXMethodDecl * MD)899 ItaniumCXXABI::EmitMemberFunctionPointer(const CXXMethodDecl *MD) {
900   return BuildMemberPointer(MD, CharUnits::Zero());
901 }
902 
BuildMemberPointer(const CXXMethodDecl * MD,CharUnits ThisAdjustment)903 llvm::Constant *ItaniumCXXABI::BuildMemberPointer(const CXXMethodDecl *MD,
904                                                   CharUnits ThisAdjustment) {
905   assert(MD->isInstance() && "Member function must not be static!");
906 
907   CodeGenTypes &Types = CGM.getTypes();
908 
909   // Get the function pointer (or index if this is a virtual function).
910   llvm::Constant *MemPtr[2];
911   if (MD->isVirtual()) {
912     uint64_t Index = CGM.getItaniumVTableContext().getMethodVTableIndex(MD);
913 
914     const ASTContext &Context = getContext();
915     CharUnits PointerWidth =
916       Context.toCharUnitsFromBits(Context.getTargetInfo().getPointerWidth(0));
917     uint64_t VTableOffset = (Index * PointerWidth.getQuantity());
918 
919     if (UseARMMethodPtrABI) {
920       // ARM C++ ABI 3.2.1:
921       //   This ABI specifies that adj contains twice the this
922       //   adjustment, plus 1 if the member function is virtual. The
923       //   least significant bit of adj then makes exactly the same
924       //   discrimination as the least significant bit of ptr does for
925       //   Itanium.
926       MemPtr[0] = llvm::ConstantInt::get(CGM.PtrDiffTy, VTableOffset);
927       MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy,
928                                          2 * ThisAdjustment.getQuantity() + 1);
929     } else {
930       // Itanium C++ ABI 2.3:
931       //   For a virtual function, [the pointer field] is 1 plus the
932       //   virtual table offset (in bytes) of the function,
933       //   represented as a ptrdiff_t.
934       MemPtr[0] = llvm::ConstantInt::get(CGM.PtrDiffTy, VTableOffset + 1);
935       MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy,
936                                          ThisAdjustment.getQuantity());
937     }
938   } else {
939     const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
940     llvm::Type *Ty;
941     // Check whether the function has a computable LLVM signature.
942     if (Types.isFuncTypeConvertible(FPT)) {
943       // The function has a computable LLVM signature; use the correct type.
944       Ty = Types.GetFunctionType(Types.arrangeCXXMethodDeclaration(MD));
945     } else {
946       // Use an arbitrary non-function type to tell GetAddrOfFunction that the
947       // function type is incomplete.
948       Ty = CGM.PtrDiffTy;
949     }
950     llvm::Constant *addr = CGM.GetAddrOfFunction(MD, Ty);
951 
952     MemPtr[0] = llvm::ConstantExpr::getPtrToInt(addr, CGM.PtrDiffTy);
953     MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy,
954                                        (UseARMMethodPtrABI ? 2 : 1) *
955                                        ThisAdjustment.getQuantity());
956   }
957 
958   return llvm::ConstantStruct::getAnon(MemPtr);
959 }
960 
EmitMemberPointer(const APValue & MP,QualType MPType)961 llvm::Constant *ItaniumCXXABI::EmitMemberPointer(const APValue &MP,
962                                                  QualType MPType) {
963   const MemberPointerType *MPT = MPType->castAs<MemberPointerType>();
964   const ValueDecl *MPD = MP.getMemberPointerDecl();
965   if (!MPD)
966     return EmitNullMemberPointer(MPT);
967 
968   CharUnits ThisAdjustment = getMemberPointerPathAdjustment(MP);
969 
970   if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(MPD))
971     return BuildMemberPointer(MD, ThisAdjustment);
972 
973   CharUnits FieldOffset =
974     getContext().toCharUnitsFromBits(getContext().getFieldOffset(MPD));
975   return EmitMemberDataPointer(MPT, ThisAdjustment + FieldOffset);
976 }
977 
978 /// The comparison algorithm is pretty easy: the member pointers are
979 /// the same if they're either bitwise identical *or* both null.
980 ///
981 /// ARM is different here only because null-ness is more complicated.
982 llvm::Value *
EmitMemberPointerComparison(CodeGenFunction & CGF,llvm::Value * L,llvm::Value * R,const MemberPointerType * MPT,bool Inequality)983 ItaniumCXXABI::EmitMemberPointerComparison(CodeGenFunction &CGF,
984                                            llvm::Value *L,
985                                            llvm::Value *R,
986                                            const MemberPointerType *MPT,
987                                            bool Inequality) {
988   CGBuilderTy &Builder = CGF.Builder;
989 
990   llvm::ICmpInst::Predicate Eq;
991   llvm::Instruction::BinaryOps And, Or;
992   if (Inequality) {
993     Eq = llvm::ICmpInst::ICMP_NE;
994     And = llvm::Instruction::Or;
995     Or = llvm::Instruction::And;
996   } else {
997     Eq = llvm::ICmpInst::ICMP_EQ;
998     And = llvm::Instruction::And;
999     Or = llvm::Instruction::Or;
1000   }
1001 
1002   // Member data pointers are easy because there's a unique null
1003   // value, so it just comes down to bitwise equality.
1004   if (MPT->isMemberDataPointer())
1005     return Builder.CreateICmp(Eq, L, R);
1006 
1007   // For member function pointers, the tautologies are more complex.
1008   // The Itanium tautology is:
1009   //   (L == R) <==> (L.ptr == R.ptr && (L.ptr == 0 || L.adj == R.adj))
1010   // The ARM tautology is:
1011   //   (L == R) <==> (L.ptr == R.ptr &&
1012   //                  (L.adj == R.adj ||
1013   //                   (L.ptr == 0 && ((L.adj|R.adj) & 1) == 0)))
1014   // The inequality tautologies have exactly the same structure, except
1015   // applying De Morgan's laws.
1016 
1017   llvm::Value *LPtr = Builder.CreateExtractValue(L, 0, "lhs.memptr.ptr");
1018   llvm::Value *RPtr = Builder.CreateExtractValue(R, 0, "rhs.memptr.ptr");
1019 
1020   // This condition tests whether L.ptr == R.ptr.  This must always be
1021   // true for equality to hold.
1022   llvm::Value *PtrEq = Builder.CreateICmp(Eq, LPtr, RPtr, "cmp.ptr");
1023 
1024   // This condition, together with the assumption that L.ptr == R.ptr,
1025   // tests whether the pointers are both null.  ARM imposes an extra
1026   // condition.
1027   llvm::Value *Zero = llvm::Constant::getNullValue(LPtr->getType());
1028   llvm::Value *EqZero = Builder.CreateICmp(Eq, LPtr, Zero, "cmp.ptr.null");
1029 
1030   // This condition tests whether L.adj == R.adj.  If this isn't
1031   // true, the pointers are unequal unless they're both null.
1032   llvm::Value *LAdj = Builder.CreateExtractValue(L, 1, "lhs.memptr.adj");
1033   llvm::Value *RAdj = Builder.CreateExtractValue(R, 1, "rhs.memptr.adj");
1034   llvm::Value *AdjEq = Builder.CreateICmp(Eq, LAdj, RAdj, "cmp.adj");
1035 
1036   // Null member function pointers on ARM clear the low bit of Adj,
1037   // so the zero condition has to check that neither low bit is set.
1038   if (UseARMMethodPtrABI) {
1039     llvm::Value *One = llvm::ConstantInt::get(LPtr->getType(), 1);
1040 
1041     // Compute (l.adj | r.adj) & 1 and test it against zero.
1042     llvm::Value *OrAdj = Builder.CreateOr(LAdj, RAdj, "or.adj");
1043     llvm::Value *OrAdjAnd1 = Builder.CreateAnd(OrAdj, One);
1044     llvm::Value *OrAdjAnd1EqZero = Builder.CreateICmp(Eq, OrAdjAnd1, Zero,
1045                                                       "cmp.or.adj");
1046     EqZero = Builder.CreateBinOp(And, EqZero, OrAdjAnd1EqZero);
1047   }
1048 
1049   // Tie together all our conditions.
1050   llvm::Value *Result = Builder.CreateBinOp(Or, EqZero, AdjEq);
1051   Result = Builder.CreateBinOp(And, PtrEq, Result,
1052                                Inequality ? "memptr.ne" : "memptr.eq");
1053   return Result;
1054 }
1055 
1056 llvm::Value *
EmitMemberPointerIsNotNull(CodeGenFunction & CGF,llvm::Value * MemPtr,const MemberPointerType * MPT)1057 ItaniumCXXABI::EmitMemberPointerIsNotNull(CodeGenFunction &CGF,
1058                                           llvm::Value *MemPtr,
1059                                           const MemberPointerType *MPT) {
1060   CGBuilderTy &Builder = CGF.Builder;
1061 
1062   /// For member data pointers, this is just a check against -1.
1063   if (MPT->isMemberDataPointer()) {
1064     assert(MemPtr->getType() == CGM.PtrDiffTy);
1065     llvm::Value *NegativeOne =
1066       llvm::Constant::getAllOnesValue(MemPtr->getType());
1067     return Builder.CreateICmpNE(MemPtr, NegativeOne, "memptr.tobool");
1068   }
1069 
1070   // In Itanium, a member function pointer is not null if 'ptr' is not null.
1071   llvm::Value *Ptr = Builder.CreateExtractValue(MemPtr, 0, "memptr.ptr");
1072 
1073   llvm::Constant *Zero = llvm::ConstantInt::get(Ptr->getType(), 0);
1074   llvm::Value *Result = Builder.CreateICmpNE(Ptr, Zero, "memptr.tobool");
1075 
1076   // On ARM, a member function pointer is also non-null if the low bit of 'adj'
1077   // (the virtual bit) is set.
1078   if (UseARMMethodPtrABI) {
1079     llvm::Constant *One = llvm::ConstantInt::get(Ptr->getType(), 1);
1080     llvm::Value *Adj = Builder.CreateExtractValue(MemPtr, 1, "memptr.adj");
1081     llvm::Value *VirtualBit = Builder.CreateAnd(Adj, One, "memptr.virtualbit");
1082     llvm::Value *IsVirtual = Builder.CreateICmpNE(VirtualBit, Zero,
1083                                                   "memptr.isvirtual");
1084     Result = Builder.CreateOr(Result, IsVirtual);
1085   }
1086 
1087   return Result;
1088 }
1089 
classifyReturnType(CGFunctionInfo & FI) const1090 bool ItaniumCXXABI::classifyReturnType(CGFunctionInfo &FI) const {
1091   const CXXRecordDecl *RD = FI.getReturnType()->getAsCXXRecordDecl();
1092   if (!RD)
1093     return false;
1094 
1095   // If C++ prohibits us from making a copy, return by address.
1096   if (passClassIndirect(RD)) {
1097     auto Align = CGM.getContext().getTypeAlignInChars(FI.getReturnType());
1098     FI.getReturnInfo() = ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
1099     return true;
1100   }
1101   return false;
1102 }
1103 
1104 /// The Itanium ABI requires non-zero initialization only for data
1105 /// member pointers, for which '0' is a valid offset.
isZeroInitializable(const MemberPointerType * MPT)1106 bool ItaniumCXXABI::isZeroInitializable(const MemberPointerType *MPT) {
1107   return MPT->isMemberFunctionPointer();
1108 }
1109 
1110 /// The Itanium ABI always places an offset to the complete object
1111 /// at entry -2 in the vtable.
emitVirtualObjectDelete(CodeGenFunction & CGF,const CXXDeleteExpr * DE,Address Ptr,QualType ElementType,const CXXDestructorDecl * Dtor)1112 void ItaniumCXXABI::emitVirtualObjectDelete(CodeGenFunction &CGF,
1113                                             const CXXDeleteExpr *DE,
1114                                             Address Ptr,
1115                                             QualType ElementType,
1116                                             const CXXDestructorDecl *Dtor) {
1117   bool UseGlobalDelete = DE->isGlobalDelete();
1118   if (UseGlobalDelete) {
1119     // Derive the complete-object pointer, which is what we need
1120     // to pass to the deallocation function.
1121 
1122     // Grab the vtable pointer as an intptr_t*.
1123     auto *ClassDecl =
1124         cast<CXXRecordDecl>(ElementType->getAs<RecordType>()->getDecl());
1125     llvm::Value *VTable =
1126         CGF.GetVTablePtr(Ptr, CGF.IntPtrTy->getPointerTo(), ClassDecl);
1127 
1128     // Track back to entry -2 and pull out the offset there.
1129     llvm::Value *OffsetPtr = CGF.Builder.CreateConstInBoundsGEP1_64(
1130         VTable, -2, "complete-offset.ptr");
1131     llvm::Value *Offset =
1132       CGF.Builder.CreateAlignedLoad(OffsetPtr, CGF.getPointerAlign());
1133 
1134     // Apply the offset.
1135     llvm::Value *CompletePtr =
1136       CGF.Builder.CreateBitCast(Ptr.getPointer(), CGF.Int8PtrTy);
1137     CompletePtr = CGF.Builder.CreateInBoundsGEP(CompletePtr, Offset);
1138 
1139     // If we're supposed to call the global delete, make sure we do so
1140     // even if the destructor throws.
1141     CGF.pushCallObjectDeleteCleanup(DE->getOperatorDelete(), CompletePtr,
1142                                     ElementType);
1143   }
1144 
1145   // FIXME: Provide a source location here even though there's no
1146   // CXXMemberCallExpr for dtor call.
1147   CXXDtorType DtorType = UseGlobalDelete ? Dtor_Complete : Dtor_Deleting;
1148   EmitVirtualDestructorCall(CGF, Dtor, DtorType, Ptr, /*CE=*/nullptr);
1149 
1150   if (UseGlobalDelete)
1151     CGF.PopCleanupBlock();
1152 }
1153 
emitRethrow(CodeGenFunction & CGF,bool isNoReturn)1154 void ItaniumCXXABI::emitRethrow(CodeGenFunction &CGF, bool isNoReturn) {
1155   // void __cxa_rethrow();
1156 
1157   llvm::FunctionType *FTy =
1158     llvm::FunctionType::get(CGM.VoidTy, /*IsVarArgs=*/false);
1159 
1160   llvm::Constant *Fn = CGM.CreateRuntimeFunction(FTy, "__cxa_rethrow");
1161 
1162   if (isNoReturn)
1163     CGF.EmitNoreturnRuntimeCallOrInvoke(Fn, None);
1164   else
1165     CGF.EmitRuntimeCallOrInvoke(Fn);
1166 }
1167 
getAllocateExceptionFn(CodeGenModule & CGM)1168 static llvm::Constant *getAllocateExceptionFn(CodeGenModule &CGM) {
1169   // void *__cxa_allocate_exception(size_t thrown_size);
1170 
1171   llvm::FunctionType *FTy =
1172     llvm::FunctionType::get(CGM.Int8PtrTy, CGM.SizeTy, /*IsVarArgs=*/false);
1173 
1174   return CGM.CreateRuntimeFunction(FTy, "__cxa_allocate_exception");
1175 }
1176 
getThrowFn(CodeGenModule & CGM)1177 static llvm::Constant *getThrowFn(CodeGenModule &CGM) {
1178   // void __cxa_throw(void *thrown_exception, std::type_info *tinfo,
1179   //                  void (*dest) (void *));
1180 
1181   llvm::Type *Args[3] = { CGM.Int8PtrTy, CGM.Int8PtrTy, CGM.Int8PtrTy };
1182   llvm::FunctionType *FTy =
1183     llvm::FunctionType::get(CGM.VoidTy, Args, /*IsVarArgs=*/false);
1184 
1185   return CGM.CreateRuntimeFunction(FTy, "__cxa_throw");
1186 }
1187 
emitThrow(CodeGenFunction & CGF,const CXXThrowExpr * E)1188 void ItaniumCXXABI::emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) {
1189   QualType ThrowType = E->getSubExpr()->getType();
1190   // Now allocate the exception object.
1191   llvm::Type *SizeTy = CGF.ConvertType(getContext().getSizeType());
1192   uint64_t TypeSize = getContext().getTypeSizeInChars(ThrowType).getQuantity();
1193 
1194   llvm::Constant *AllocExceptionFn = getAllocateExceptionFn(CGM);
1195   llvm::CallInst *ExceptionPtr = CGF.EmitNounwindRuntimeCall(
1196       AllocExceptionFn, llvm::ConstantInt::get(SizeTy, TypeSize), "exception");
1197 
1198   CharUnits ExnAlign = getAlignmentOfExnObject();
1199   CGF.EmitAnyExprToExn(E->getSubExpr(), Address(ExceptionPtr, ExnAlign));
1200 
1201   // Now throw the exception.
1202   llvm::Constant *TypeInfo = CGM.GetAddrOfRTTIDescriptor(ThrowType,
1203                                                          /*ForEH=*/true);
1204 
1205   // The address of the destructor.  If the exception type has a
1206   // trivial destructor (or isn't a record), we just pass null.
1207   llvm::Constant *Dtor = nullptr;
1208   if (const RecordType *RecordTy = ThrowType->getAs<RecordType>()) {
1209     CXXRecordDecl *Record = cast<CXXRecordDecl>(RecordTy->getDecl());
1210     if (!Record->hasTrivialDestructor()) {
1211       CXXDestructorDecl *DtorD = Record->getDestructor();
1212       Dtor = CGM.getAddrOfCXXStructor(DtorD, StructorType::Complete);
1213       Dtor = llvm::ConstantExpr::getBitCast(Dtor, CGM.Int8PtrTy);
1214     }
1215   }
1216   if (!Dtor) Dtor = llvm::Constant::getNullValue(CGM.Int8PtrTy);
1217 
1218   llvm::Value *args[] = { ExceptionPtr, TypeInfo, Dtor };
1219   CGF.EmitNoreturnRuntimeCallOrInvoke(getThrowFn(CGM), args);
1220 }
1221 
getItaniumDynamicCastFn(CodeGenFunction & CGF)1222 static llvm::Constant *getItaniumDynamicCastFn(CodeGenFunction &CGF) {
1223   // void *__dynamic_cast(const void *sub,
1224   //                      const abi::__class_type_info *src,
1225   //                      const abi::__class_type_info *dst,
1226   //                      std::ptrdiff_t src2dst_offset);
1227 
1228   llvm::Type *Int8PtrTy = CGF.Int8PtrTy;
1229   llvm::Type *PtrDiffTy =
1230     CGF.ConvertType(CGF.getContext().getPointerDiffType());
1231 
1232   llvm::Type *Args[4] = { Int8PtrTy, Int8PtrTy, Int8PtrTy, PtrDiffTy };
1233 
1234   llvm::FunctionType *FTy = llvm::FunctionType::get(Int8PtrTy, Args, false);
1235 
1236   // Mark the function as nounwind readonly.
1237   llvm::Attribute::AttrKind FuncAttrs[] = { llvm::Attribute::NoUnwind,
1238                                             llvm::Attribute::ReadOnly };
1239   llvm::AttributeList Attrs = llvm::AttributeList::get(
1240       CGF.getLLVMContext(), llvm::AttributeList::FunctionIndex, FuncAttrs);
1241 
1242   return CGF.CGM.CreateRuntimeFunction(FTy, "__dynamic_cast", Attrs);
1243 }
1244 
getBadCastFn(CodeGenFunction & CGF)1245 static llvm::Constant *getBadCastFn(CodeGenFunction &CGF) {
1246   // void __cxa_bad_cast();
1247   llvm::FunctionType *FTy = llvm::FunctionType::get(CGF.VoidTy, false);
1248   return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_cast");
1249 }
1250 
1251 /// Compute the src2dst_offset hint as described in the
1252 /// Itanium C++ ABI [2.9.7]
computeOffsetHint(ASTContext & Context,const CXXRecordDecl * Src,const CXXRecordDecl * Dst)1253 static CharUnits computeOffsetHint(ASTContext &Context,
1254                                    const CXXRecordDecl *Src,
1255                                    const CXXRecordDecl *Dst) {
1256   CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true,
1257                      /*DetectVirtual=*/false);
1258 
1259   // If Dst is not derived from Src we can skip the whole computation below and
1260   // return that Src is not a public base of Dst.  Record all inheritance paths.
1261   if (!Dst->isDerivedFrom(Src, Paths))
1262     return CharUnits::fromQuantity(-2ULL);
1263 
1264   unsigned NumPublicPaths = 0;
1265   CharUnits Offset;
1266 
1267   // Now walk all possible inheritance paths.
1268   for (const CXXBasePath &Path : Paths) {
1269     if (Path.Access != AS_public)  // Ignore non-public inheritance.
1270       continue;
1271 
1272     ++NumPublicPaths;
1273 
1274     for (const CXXBasePathElement &PathElement : Path) {
1275       // If the path contains a virtual base class we can't give any hint.
1276       // -1: no hint.
1277       if (PathElement.Base->isVirtual())
1278         return CharUnits::fromQuantity(-1ULL);
1279 
1280       if (NumPublicPaths > 1) // Won't use offsets, skip computation.
1281         continue;
1282 
1283       // Accumulate the base class offsets.
1284       const ASTRecordLayout &L = Context.getASTRecordLayout(PathElement.Class);
1285       Offset += L.getBaseClassOffset(
1286           PathElement.Base->getType()->getAsCXXRecordDecl());
1287     }
1288   }
1289 
1290   // -2: Src is not a public base of Dst.
1291   if (NumPublicPaths == 0)
1292     return CharUnits::fromQuantity(-2ULL);
1293 
1294   // -3: Src is a multiple public base type but never a virtual base type.
1295   if (NumPublicPaths > 1)
1296     return CharUnits::fromQuantity(-3ULL);
1297 
1298   // Otherwise, the Src type is a unique public nonvirtual base type of Dst.
1299   // Return the offset of Src from the origin of Dst.
1300   return Offset;
1301 }
1302 
getBadTypeidFn(CodeGenFunction & CGF)1303 static llvm::Constant *getBadTypeidFn(CodeGenFunction &CGF) {
1304   // void __cxa_bad_typeid();
1305   llvm::FunctionType *FTy = llvm::FunctionType::get(CGF.VoidTy, false);
1306 
1307   return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_typeid");
1308 }
1309 
shouldTypeidBeNullChecked(bool IsDeref,QualType SrcRecordTy)1310 bool ItaniumCXXABI::shouldTypeidBeNullChecked(bool IsDeref,
1311                                               QualType SrcRecordTy) {
1312   return IsDeref;
1313 }
1314 
EmitBadTypeidCall(CodeGenFunction & CGF)1315 void ItaniumCXXABI::EmitBadTypeidCall(CodeGenFunction &CGF) {
1316   llvm::Value *Fn = getBadTypeidFn(CGF);
1317   CGF.EmitRuntimeCallOrInvoke(Fn).setDoesNotReturn();
1318   CGF.Builder.CreateUnreachable();
1319 }
1320 
EmitTypeid(CodeGenFunction & CGF,QualType SrcRecordTy,Address ThisPtr,llvm::Type * StdTypeInfoPtrTy)1321 llvm::Value *ItaniumCXXABI::EmitTypeid(CodeGenFunction &CGF,
1322                                        QualType SrcRecordTy,
1323                                        Address ThisPtr,
1324                                        llvm::Type *StdTypeInfoPtrTy) {
1325   auto *ClassDecl =
1326       cast<CXXRecordDecl>(SrcRecordTy->getAs<RecordType>()->getDecl());
1327   llvm::Value *Value =
1328       CGF.GetVTablePtr(ThisPtr, StdTypeInfoPtrTy->getPointerTo(), ClassDecl);
1329 
1330   // Load the type info.
1331   Value = CGF.Builder.CreateConstInBoundsGEP1_64(Value, -1ULL);
1332   return CGF.Builder.CreateAlignedLoad(Value, CGF.getPointerAlign());
1333 }
1334 
shouldDynamicCastCallBeNullChecked(bool SrcIsPtr,QualType SrcRecordTy)1335 bool ItaniumCXXABI::shouldDynamicCastCallBeNullChecked(bool SrcIsPtr,
1336                                                        QualType SrcRecordTy) {
1337   return SrcIsPtr;
1338 }
1339 
EmitDynamicCastCall(CodeGenFunction & CGF,Address ThisAddr,QualType SrcRecordTy,QualType DestTy,QualType DestRecordTy,llvm::BasicBlock * CastEnd)1340 llvm::Value *ItaniumCXXABI::EmitDynamicCastCall(
1341     CodeGenFunction &CGF, Address ThisAddr, QualType SrcRecordTy,
1342     QualType DestTy, QualType DestRecordTy, llvm::BasicBlock *CastEnd) {
1343   llvm::Type *PtrDiffLTy =
1344       CGF.ConvertType(CGF.getContext().getPointerDiffType());
1345   llvm::Type *DestLTy = CGF.ConvertType(DestTy);
1346 
1347   llvm::Value *SrcRTTI =
1348       CGF.CGM.GetAddrOfRTTIDescriptor(SrcRecordTy.getUnqualifiedType());
1349   llvm::Value *DestRTTI =
1350       CGF.CGM.GetAddrOfRTTIDescriptor(DestRecordTy.getUnqualifiedType());
1351 
1352   // Compute the offset hint.
1353   const CXXRecordDecl *SrcDecl = SrcRecordTy->getAsCXXRecordDecl();
1354   const CXXRecordDecl *DestDecl = DestRecordTy->getAsCXXRecordDecl();
1355   llvm::Value *OffsetHint = llvm::ConstantInt::get(
1356       PtrDiffLTy,
1357       computeOffsetHint(CGF.getContext(), SrcDecl, DestDecl).getQuantity());
1358 
1359   // Emit the call to __dynamic_cast.
1360   llvm::Value *Value = ThisAddr.getPointer();
1361   Value = CGF.EmitCastToVoidPtr(Value);
1362 
1363   llvm::Value *args[] = {Value, SrcRTTI, DestRTTI, OffsetHint};
1364   Value = CGF.EmitNounwindRuntimeCall(getItaniumDynamicCastFn(CGF), args);
1365   Value = CGF.Builder.CreateBitCast(Value, DestLTy);
1366 
1367   /// C++ [expr.dynamic.cast]p9:
1368   ///   A failed cast to reference type throws std::bad_cast
1369   if (DestTy->isReferenceType()) {
1370     llvm::BasicBlock *BadCastBlock =
1371         CGF.createBasicBlock("dynamic_cast.bad_cast");
1372 
1373     llvm::Value *IsNull = CGF.Builder.CreateIsNull(Value);
1374     CGF.Builder.CreateCondBr(IsNull, BadCastBlock, CastEnd);
1375 
1376     CGF.EmitBlock(BadCastBlock);
1377     EmitBadCastCall(CGF);
1378   }
1379 
1380   return Value;
1381 }
1382 
EmitDynamicCastToVoid(CodeGenFunction & CGF,Address ThisAddr,QualType SrcRecordTy,QualType DestTy)1383 llvm::Value *ItaniumCXXABI::EmitDynamicCastToVoid(CodeGenFunction &CGF,
1384                                                   Address ThisAddr,
1385                                                   QualType SrcRecordTy,
1386                                                   QualType DestTy) {
1387   llvm::Type *PtrDiffLTy =
1388       CGF.ConvertType(CGF.getContext().getPointerDiffType());
1389   llvm::Type *DestLTy = CGF.ConvertType(DestTy);
1390 
1391   auto *ClassDecl =
1392       cast<CXXRecordDecl>(SrcRecordTy->getAs<RecordType>()->getDecl());
1393   // Get the vtable pointer.
1394   llvm::Value *VTable = CGF.GetVTablePtr(ThisAddr, PtrDiffLTy->getPointerTo(),
1395       ClassDecl);
1396 
1397   // Get the offset-to-top from the vtable.
1398   llvm::Value *OffsetToTop =
1399       CGF.Builder.CreateConstInBoundsGEP1_64(VTable, -2ULL);
1400   OffsetToTop =
1401     CGF.Builder.CreateAlignedLoad(OffsetToTop, CGF.getPointerAlign(),
1402                                   "offset.to.top");
1403 
1404   // Finally, add the offset to the pointer.
1405   llvm::Value *Value = ThisAddr.getPointer();
1406   Value = CGF.EmitCastToVoidPtr(Value);
1407   Value = CGF.Builder.CreateInBoundsGEP(Value, OffsetToTop);
1408 
1409   return CGF.Builder.CreateBitCast(Value, DestLTy);
1410 }
1411 
EmitBadCastCall(CodeGenFunction & CGF)1412 bool ItaniumCXXABI::EmitBadCastCall(CodeGenFunction &CGF) {
1413   llvm::Value *Fn = getBadCastFn(CGF);
1414   CGF.EmitRuntimeCallOrInvoke(Fn).setDoesNotReturn();
1415   CGF.Builder.CreateUnreachable();
1416   return true;
1417 }
1418 
1419 llvm::Value *
GetVirtualBaseClassOffset(CodeGenFunction & CGF,Address This,const CXXRecordDecl * ClassDecl,const CXXRecordDecl * BaseClassDecl)1420 ItaniumCXXABI::GetVirtualBaseClassOffset(CodeGenFunction &CGF,
1421                                          Address This,
1422                                          const CXXRecordDecl *ClassDecl,
1423                                          const CXXRecordDecl *BaseClassDecl) {
1424   llvm::Value *VTablePtr = CGF.GetVTablePtr(This, CGM.Int8PtrTy, ClassDecl);
1425   CharUnits VBaseOffsetOffset =
1426       CGM.getItaniumVTableContext().getVirtualBaseOffsetOffset(ClassDecl,
1427                                                                BaseClassDecl);
1428 
1429   llvm::Value *VBaseOffsetPtr =
1430     CGF.Builder.CreateConstGEP1_64(VTablePtr, VBaseOffsetOffset.getQuantity(),
1431                                    "vbase.offset.ptr");
1432   VBaseOffsetPtr = CGF.Builder.CreateBitCast(VBaseOffsetPtr,
1433                                              CGM.PtrDiffTy->getPointerTo());
1434 
1435   llvm::Value *VBaseOffset =
1436     CGF.Builder.CreateAlignedLoad(VBaseOffsetPtr, CGF.getPointerAlign(),
1437                                   "vbase.offset");
1438 
1439   return VBaseOffset;
1440 }
1441 
EmitCXXConstructors(const CXXConstructorDecl * D)1442 void ItaniumCXXABI::EmitCXXConstructors(const CXXConstructorDecl *D) {
1443   // Just make sure we're in sync with TargetCXXABI.
1444   assert(CGM.getTarget().getCXXABI().hasConstructorVariants());
1445 
1446   // The constructor used for constructing this as a base class;
1447   // ignores virtual bases.
1448   CGM.EmitGlobal(GlobalDecl(D, Ctor_Base));
1449 
1450   // The constructor used for constructing this as a complete class;
1451   // constructs the virtual bases, then calls the base constructor.
1452   if (!D->getParent()->isAbstract()) {
1453     // We don't need to emit the complete ctor if the class is abstract.
1454     CGM.EmitGlobal(GlobalDecl(D, Ctor_Complete));
1455   }
1456 }
1457 
1458 CGCXXABI::AddedStructorArgs
buildStructorSignature(const CXXMethodDecl * MD,StructorType T,SmallVectorImpl<CanQualType> & ArgTys)1459 ItaniumCXXABI::buildStructorSignature(const CXXMethodDecl *MD, StructorType T,
1460                                       SmallVectorImpl<CanQualType> &ArgTys) {
1461   ASTContext &Context = getContext();
1462 
1463   // All parameters are already in place except VTT, which goes after 'this'.
1464   // These are Clang types, so we don't need to worry about sret yet.
1465 
1466   // Check if we need to add a VTT parameter (which has type void **).
1467   if (T == StructorType::Base && MD->getParent()->getNumVBases() != 0) {
1468     ArgTys.insert(ArgTys.begin() + 1,
1469                   Context.getPointerType(Context.VoidPtrTy));
1470     return AddedStructorArgs::prefix(1);
1471   }
1472   return AddedStructorArgs{};
1473 }
1474 
EmitCXXDestructors(const CXXDestructorDecl * D)1475 void ItaniumCXXABI::EmitCXXDestructors(const CXXDestructorDecl *D) {
1476   // The destructor used for destructing this as a base class; ignores
1477   // virtual bases.
1478   CGM.EmitGlobal(GlobalDecl(D, Dtor_Base));
1479 
1480   // The destructor used for destructing this as a most-derived class;
1481   // call the base destructor and then destructs any virtual bases.
1482   CGM.EmitGlobal(GlobalDecl(D, Dtor_Complete));
1483 
1484   // The destructor in a virtual table is always a 'deleting'
1485   // destructor, which calls the complete destructor and then uses the
1486   // appropriate operator delete.
1487   if (D->isVirtual())
1488     CGM.EmitGlobal(GlobalDecl(D, Dtor_Deleting));
1489 }
1490 
addImplicitStructorParams(CodeGenFunction & CGF,QualType & ResTy,FunctionArgList & Params)1491 void ItaniumCXXABI::addImplicitStructorParams(CodeGenFunction &CGF,
1492                                               QualType &ResTy,
1493                                               FunctionArgList &Params) {
1494   const CXXMethodDecl *MD = cast<CXXMethodDecl>(CGF.CurGD.getDecl());
1495   assert(isa<CXXConstructorDecl>(MD) || isa<CXXDestructorDecl>(MD));
1496 
1497   // Check if we need a VTT parameter as well.
1498   if (NeedsVTTParameter(CGF.CurGD)) {
1499     ASTContext &Context = getContext();
1500 
1501     // FIXME: avoid the fake decl
1502     QualType T = Context.getPointerType(Context.VoidPtrTy);
1503     auto *VTTDecl = ImplicitParamDecl::Create(
1504         Context, /*DC=*/nullptr, MD->getLocation(), &Context.Idents.get("vtt"),
1505         T, ImplicitParamDecl::CXXVTT);
1506     Params.insert(Params.begin() + 1, VTTDecl);
1507     getStructorImplicitParamDecl(CGF) = VTTDecl;
1508   }
1509 }
1510 
EmitInstanceFunctionProlog(CodeGenFunction & CGF)1511 void ItaniumCXXABI::EmitInstanceFunctionProlog(CodeGenFunction &CGF) {
1512   // Naked functions have no prolog.
1513   if (CGF.CurFuncDecl && CGF.CurFuncDecl->hasAttr<NakedAttr>())
1514     return;
1515 
1516   /// Initialize the 'this' slot. In the Itanium C++ ABI, no prologue
1517   /// adjustments are required, because they are all handled by thunks.
1518   setCXXABIThisValue(CGF, loadIncomingCXXThis(CGF));
1519 
1520   /// Initialize the 'vtt' slot if needed.
1521   if (getStructorImplicitParamDecl(CGF)) {
1522     getStructorImplicitParamValue(CGF) = CGF.Builder.CreateLoad(
1523         CGF.GetAddrOfLocalVar(getStructorImplicitParamDecl(CGF)), "vtt");
1524   }
1525 
1526   /// If this is a function that the ABI specifies returns 'this', initialize
1527   /// the return slot to 'this' at the start of the function.
1528   ///
1529   /// Unlike the setting of return types, this is done within the ABI
1530   /// implementation instead of by clients of CGCXXABI because:
1531   /// 1) getThisValue is currently protected
1532   /// 2) in theory, an ABI could implement 'this' returns some other way;
1533   ///    HasThisReturn only specifies a contract, not the implementation
1534   if (HasThisReturn(CGF.CurGD))
1535     CGF.Builder.CreateStore(getThisValue(CGF), CGF.ReturnValue);
1536 }
1537 
addImplicitConstructorArgs(CodeGenFunction & CGF,const CXXConstructorDecl * D,CXXCtorType Type,bool ForVirtualBase,bool Delegating,CallArgList & Args)1538 CGCXXABI::AddedStructorArgs ItaniumCXXABI::addImplicitConstructorArgs(
1539     CodeGenFunction &CGF, const CXXConstructorDecl *D, CXXCtorType Type,
1540     bool ForVirtualBase, bool Delegating, CallArgList &Args) {
1541   if (!NeedsVTTParameter(GlobalDecl(D, Type)))
1542     return AddedStructorArgs{};
1543 
1544   // Insert the implicit 'vtt' argument as the second argument.
1545   llvm::Value *VTT =
1546       CGF.GetVTTParameter(GlobalDecl(D, Type), ForVirtualBase, Delegating);
1547   QualType VTTTy = getContext().getPointerType(getContext().VoidPtrTy);
1548   Args.insert(Args.begin() + 1, CallArg(RValue::get(VTT), VTTTy));
1549   return AddedStructorArgs::prefix(1);  // Added one arg.
1550 }
1551 
EmitDestructorCall(CodeGenFunction & CGF,const CXXDestructorDecl * DD,CXXDtorType Type,bool ForVirtualBase,bool Delegating,Address This)1552 void ItaniumCXXABI::EmitDestructorCall(CodeGenFunction &CGF,
1553                                        const CXXDestructorDecl *DD,
1554                                        CXXDtorType Type, bool ForVirtualBase,
1555                                        bool Delegating, Address This) {
1556   GlobalDecl GD(DD, Type);
1557   llvm::Value *VTT = CGF.GetVTTParameter(GD, ForVirtualBase, Delegating);
1558   QualType VTTTy = getContext().getPointerType(getContext().VoidPtrTy);
1559 
1560   CGCallee Callee;
1561   if (getContext().getLangOpts().AppleKext &&
1562       Type != Dtor_Base && DD->isVirtual())
1563     Callee = CGF.BuildAppleKextVirtualDestructorCall(DD, Type, DD->getParent());
1564   else
1565     Callee =
1566       CGCallee::forDirect(CGM.getAddrOfCXXStructor(DD, getFromDtorType(Type)),
1567                           DD);
1568 
1569   CGF.EmitCXXMemberOrOperatorCall(DD, Callee, ReturnValueSlot(),
1570                                   This.getPointer(), VTT, VTTTy,
1571                                   nullptr, nullptr);
1572 }
1573 
emitVTableDefinitions(CodeGenVTables & CGVT,const CXXRecordDecl * RD)1574 void ItaniumCXXABI::emitVTableDefinitions(CodeGenVTables &CGVT,
1575                                           const CXXRecordDecl *RD) {
1576   llvm::GlobalVariable *VTable = getAddrOfVTable(RD, CharUnits());
1577   if (VTable->hasInitializer())
1578     return;
1579 
1580   ItaniumVTableContext &VTContext = CGM.getItaniumVTableContext();
1581   const VTableLayout &VTLayout = VTContext.getVTableLayout(RD);
1582   llvm::GlobalVariable::LinkageTypes Linkage = CGM.getVTableLinkage(RD);
1583   llvm::Constant *RTTI =
1584       CGM.GetAddrOfRTTIDescriptor(CGM.getContext().getTagDeclType(RD));
1585 
1586   // Create and set the initializer.
1587   ConstantInitBuilder Builder(CGM);
1588   auto Components = Builder.beginStruct();
1589   CGVT.createVTableInitializer(Components, VTLayout, RTTI);
1590   Components.finishAndSetAsInitializer(VTable);
1591 
1592   // Set the correct linkage.
1593   VTable->setLinkage(Linkage);
1594 
1595   if (CGM.supportsCOMDAT() && VTable->isWeakForLinker())
1596     VTable->setComdat(CGM.getModule().getOrInsertComdat(VTable->getName()));
1597 
1598   // Set the right visibility.
1599   CGM.setGVProperties(VTable, RD);
1600 
1601   // Use pointer alignment for the vtable. Otherwise we would align them based
1602   // on the size of the initializer which doesn't make sense as only single
1603   // values are read.
1604   unsigned PAlign = CGM.getTarget().getPointerAlign(0);
1605   VTable->setAlignment(getContext().toCharUnitsFromBits(PAlign).getQuantity());
1606 
1607   // If this is the magic class __cxxabiv1::__fundamental_type_info,
1608   // we will emit the typeinfo for the fundamental types. This is the
1609   // same behaviour as GCC.
1610   const DeclContext *DC = RD->getDeclContext();
1611   if (RD->getIdentifier() &&
1612       RD->getIdentifier()->isStr("__fundamental_type_info") &&
1613       isa<NamespaceDecl>(DC) && cast<NamespaceDecl>(DC)->getIdentifier() &&
1614       cast<NamespaceDecl>(DC)->getIdentifier()->isStr("__cxxabiv1") &&
1615       DC->getParent()->isTranslationUnit())
1616     EmitFundamentalRTTIDescriptors(RD);
1617 
1618   if (!VTable->isDeclarationForLinker())
1619     CGM.EmitVTableTypeMetadata(VTable, VTLayout);
1620 }
1621 
isVirtualOffsetNeededForVTableField(CodeGenFunction & CGF,CodeGenFunction::VPtr Vptr)1622 bool ItaniumCXXABI::isVirtualOffsetNeededForVTableField(
1623     CodeGenFunction &CGF, CodeGenFunction::VPtr Vptr) {
1624   if (Vptr.NearestVBase == nullptr)
1625     return false;
1626   return NeedsVTTParameter(CGF.CurGD);
1627 }
1628 
getVTableAddressPointInStructor(CodeGenFunction & CGF,const CXXRecordDecl * VTableClass,BaseSubobject Base,const CXXRecordDecl * NearestVBase)1629 llvm::Value *ItaniumCXXABI::getVTableAddressPointInStructor(
1630     CodeGenFunction &CGF, const CXXRecordDecl *VTableClass, BaseSubobject Base,
1631     const CXXRecordDecl *NearestVBase) {
1632 
1633   if ((Base.getBase()->getNumVBases() || NearestVBase != nullptr) &&
1634       NeedsVTTParameter(CGF.CurGD)) {
1635     return getVTableAddressPointInStructorWithVTT(CGF, VTableClass, Base,
1636                                                   NearestVBase);
1637   }
1638   return getVTableAddressPoint(Base, VTableClass);
1639 }
1640 
1641 llvm::Constant *
getVTableAddressPoint(BaseSubobject Base,const CXXRecordDecl * VTableClass)1642 ItaniumCXXABI::getVTableAddressPoint(BaseSubobject Base,
1643                                      const CXXRecordDecl *VTableClass) {
1644   llvm::GlobalValue *VTable = getAddrOfVTable(VTableClass, CharUnits());
1645 
1646   // Find the appropriate vtable within the vtable group, and the address point
1647   // within that vtable.
1648   VTableLayout::AddressPointLocation AddressPoint =
1649       CGM.getItaniumVTableContext()
1650           .getVTableLayout(VTableClass)
1651           .getAddressPoint(Base);
1652   llvm::Value *Indices[] = {
1653     llvm::ConstantInt::get(CGM.Int32Ty, 0),
1654     llvm::ConstantInt::get(CGM.Int32Ty, AddressPoint.VTableIndex),
1655     llvm::ConstantInt::get(CGM.Int32Ty, AddressPoint.AddressPointIndex),
1656   };
1657 
1658   return llvm::ConstantExpr::getGetElementPtr(VTable->getValueType(), VTable,
1659                                               Indices, /*InBounds=*/true,
1660                                               /*InRangeIndex=*/1);
1661 }
1662 
getVTableAddressPointInStructorWithVTT(CodeGenFunction & CGF,const CXXRecordDecl * VTableClass,BaseSubobject Base,const CXXRecordDecl * NearestVBase)1663 llvm::Value *ItaniumCXXABI::getVTableAddressPointInStructorWithVTT(
1664     CodeGenFunction &CGF, const CXXRecordDecl *VTableClass, BaseSubobject Base,
1665     const CXXRecordDecl *NearestVBase) {
1666   assert((Base.getBase()->getNumVBases() || NearestVBase != nullptr) &&
1667          NeedsVTTParameter(CGF.CurGD) && "This class doesn't have VTT");
1668 
1669   // Get the secondary vpointer index.
1670   uint64_t VirtualPointerIndex =
1671       CGM.getVTables().getSecondaryVirtualPointerIndex(VTableClass, Base);
1672 
1673   /// Load the VTT.
1674   llvm::Value *VTT = CGF.LoadCXXVTT();
1675   if (VirtualPointerIndex)
1676     VTT = CGF.Builder.CreateConstInBoundsGEP1_64(VTT, VirtualPointerIndex);
1677 
1678   // And load the address point from the VTT.
1679   return CGF.Builder.CreateAlignedLoad(VTT, CGF.getPointerAlign());
1680 }
1681 
getVTableAddressPointForConstExpr(BaseSubobject Base,const CXXRecordDecl * VTableClass)1682 llvm::Constant *ItaniumCXXABI::getVTableAddressPointForConstExpr(
1683     BaseSubobject Base, const CXXRecordDecl *VTableClass) {
1684   return getVTableAddressPoint(Base, VTableClass);
1685 }
1686 
getAddrOfVTable(const CXXRecordDecl * RD,CharUnits VPtrOffset)1687 llvm::GlobalVariable *ItaniumCXXABI::getAddrOfVTable(const CXXRecordDecl *RD,
1688                                                      CharUnits VPtrOffset) {
1689   assert(VPtrOffset.isZero() && "Itanium ABI only supports zero vptr offsets");
1690 
1691   llvm::GlobalVariable *&VTable = VTables[RD];
1692   if (VTable)
1693     return VTable;
1694 
1695   // Queue up this vtable for possible deferred emission.
1696   CGM.addDeferredVTable(RD);
1697 
1698   SmallString<256> Name;
1699   llvm::raw_svector_ostream Out(Name);
1700   getMangleContext().mangleCXXVTable(RD, Out);
1701 
1702   const VTableLayout &VTLayout =
1703       CGM.getItaniumVTableContext().getVTableLayout(RD);
1704   llvm::Type *VTableType = CGM.getVTables().getVTableType(VTLayout);
1705 
1706   VTable = CGM.CreateOrReplaceCXXRuntimeVariable(
1707       Name, VTableType, llvm::GlobalValue::ExternalLinkage);
1708   VTable->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
1709 
1710   CGM.setGVProperties(VTable, RD);
1711 
1712   return VTable;
1713 }
1714 
getVirtualFunctionPointer(CodeGenFunction & CGF,GlobalDecl GD,Address This,llvm::Type * Ty,SourceLocation Loc)1715 CGCallee ItaniumCXXABI::getVirtualFunctionPointer(CodeGenFunction &CGF,
1716                                                   GlobalDecl GD,
1717                                                   Address This,
1718                                                   llvm::Type *Ty,
1719                                                   SourceLocation Loc) {
1720   Ty = Ty->getPointerTo()->getPointerTo();
1721   auto *MethodDecl = cast<CXXMethodDecl>(GD.getDecl());
1722   llvm::Value *VTable = CGF.GetVTablePtr(This, Ty, MethodDecl->getParent());
1723 
1724   uint64_t VTableIndex = CGM.getItaniumVTableContext().getMethodVTableIndex(GD);
1725   llvm::Value *VFunc;
1726   if (CGF.ShouldEmitVTableTypeCheckedLoad(MethodDecl->getParent())) {
1727     VFunc = CGF.EmitVTableTypeCheckedLoad(
1728         MethodDecl->getParent(), VTable,
1729         VTableIndex * CGM.getContext().getTargetInfo().getPointerWidth(0) / 8);
1730   } else {
1731     CGF.EmitTypeMetadataCodeForVCall(MethodDecl->getParent(), VTable, Loc);
1732 
1733     llvm::Value *VFuncPtr =
1734         CGF.Builder.CreateConstInBoundsGEP1_64(VTable, VTableIndex, "vfn");
1735     auto *VFuncLoad =
1736         CGF.Builder.CreateAlignedLoad(VFuncPtr, CGF.getPointerAlign());
1737 
1738     // Add !invariant.load md to virtual function load to indicate that
1739     // function didn't change inside vtable.
1740     // It's safe to add it without -fstrict-vtable-pointers, but it would not
1741     // help in devirtualization because it will only matter if we will have 2
1742     // the same virtual function loads from the same vtable load, which won't
1743     // happen without enabled devirtualization with -fstrict-vtable-pointers.
1744     if (CGM.getCodeGenOpts().OptimizationLevel > 0 &&
1745         CGM.getCodeGenOpts().StrictVTablePointers)
1746       VFuncLoad->setMetadata(
1747           llvm::LLVMContext::MD_invariant_load,
1748           llvm::MDNode::get(CGM.getLLVMContext(),
1749                             llvm::ArrayRef<llvm::Metadata *>()));
1750     VFunc = VFuncLoad;
1751   }
1752 
1753   CGCallee Callee(MethodDecl->getCanonicalDecl(), VFunc);
1754   return Callee;
1755 }
1756 
EmitVirtualDestructorCall(CodeGenFunction & CGF,const CXXDestructorDecl * Dtor,CXXDtorType DtorType,Address This,const CXXMemberCallExpr * CE)1757 llvm::Value *ItaniumCXXABI::EmitVirtualDestructorCall(
1758     CodeGenFunction &CGF, const CXXDestructorDecl *Dtor, CXXDtorType DtorType,
1759     Address This, const CXXMemberCallExpr *CE) {
1760   assert(CE == nullptr || CE->arg_begin() == CE->arg_end());
1761   assert(DtorType == Dtor_Deleting || DtorType == Dtor_Complete);
1762 
1763   const CGFunctionInfo *FInfo = &CGM.getTypes().arrangeCXXStructorDeclaration(
1764       Dtor, getFromDtorType(DtorType));
1765   llvm::FunctionType *Ty = CGF.CGM.getTypes().GetFunctionType(*FInfo);
1766   CGCallee Callee =
1767       CGCallee::forVirtual(CE, GlobalDecl(Dtor, DtorType), This, Ty);
1768 
1769   CGF.EmitCXXMemberOrOperatorCall(Dtor, Callee, ReturnValueSlot(),
1770                                   This.getPointer(), /*ImplicitParam=*/nullptr,
1771                                   QualType(), CE, nullptr);
1772   return nullptr;
1773 }
1774 
emitVirtualInheritanceTables(const CXXRecordDecl * RD)1775 void ItaniumCXXABI::emitVirtualInheritanceTables(const CXXRecordDecl *RD) {
1776   CodeGenVTables &VTables = CGM.getVTables();
1777   llvm::GlobalVariable *VTT = VTables.GetAddrOfVTT(RD);
1778   VTables.EmitVTTDefinition(VTT, CGM.getVTableLinkage(RD), RD);
1779 }
1780 
canSpeculativelyEmitVTable(const CXXRecordDecl * RD) const1781 bool ItaniumCXXABI::canSpeculativelyEmitVTable(const CXXRecordDecl *RD) const {
1782   // We don't emit available_externally vtables if we are in -fapple-kext mode
1783   // because kext mode does not permit devirtualization.
1784   if (CGM.getLangOpts().AppleKext)
1785     return false;
1786 
1787   // If the vtable is hidden then it is not safe to emit an available_externally
1788   // copy of vtable.
1789   if (isVTableHidden(RD))
1790     return false;
1791 
1792   if (CGM.getCodeGenOpts().ForceEmitVTables)
1793     return true;
1794 
1795   // If we don't have any not emitted inline virtual function then we are safe
1796   // to emit an available_externally copy of vtable.
1797   // FIXME we can still emit a copy of the vtable if we
1798   // can emit definition of the inline functions.
1799   return !hasAnyUnusedVirtualInlineFunction(RD);
1800 }
performTypeAdjustment(CodeGenFunction & CGF,Address InitialPtr,int64_t NonVirtualAdjustment,int64_t VirtualAdjustment,bool IsReturnAdjustment)1801 static llvm::Value *performTypeAdjustment(CodeGenFunction &CGF,
1802                                           Address InitialPtr,
1803                                           int64_t NonVirtualAdjustment,
1804                                           int64_t VirtualAdjustment,
1805                                           bool IsReturnAdjustment) {
1806   if (!NonVirtualAdjustment && !VirtualAdjustment)
1807     return InitialPtr.getPointer();
1808 
1809   Address V = CGF.Builder.CreateElementBitCast(InitialPtr, CGF.Int8Ty);
1810 
1811   // In a base-to-derived cast, the non-virtual adjustment is applied first.
1812   if (NonVirtualAdjustment && !IsReturnAdjustment) {
1813     V = CGF.Builder.CreateConstInBoundsByteGEP(V,
1814                               CharUnits::fromQuantity(NonVirtualAdjustment));
1815   }
1816 
1817   // Perform the virtual adjustment if we have one.
1818   llvm::Value *ResultPtr;
1819   if (VirtualAdjustment) {
1820     llvm::Type *PtrDiffTy =
1821         CGF.ConvertType(CGF.getContext().getPointerDiffType());
1822 
1823     Address VTablePtrPtr = CGF.Builder.CreateElementBitCast(V, CGF.Int8PtrTy);
1824     llvm::Value *VTablePtr = CGF.Builder.CreateLoad(VTablePtrPtr);
1825 
1826     llvm::Value *OffsetPtr =
1827         CGF.Builder.CreateConstInBoundsGEP1_64(VTablePtr, VirtualAdjustment);
1828 
1829     OffsetPtr = CGF.Builder.CreateBitCast(OffsetPtr, PtrDiffTy->getPointerTo());
1830 
1831     // Load the adjustment offset from the vtable.
1832     llvm::Value *Offset =
1833       CGF.Builder.CreateAlignedLoad(OffsetPtr, CGF.getPointerAlign());
1834 
1835     // Adjust our pointer.
1836     ResultPtr = CGF.Builder.CreateInBoundsGEP(V.getPointer(), Offset);
1837   } else {
1838     ResultPtr = V.getPointer();
1839   }
1840 
1841   // In a derived-to-base conversion, the non-virtual adjustment is
1842   // applied second.
1843   if (NonVirtualAdjustment && IsReturnAdjustment) {
1844     ResultPtr = CGF.Builder.CreateConstInBoundsGEP1_64(ResultPtr,
1845                                                        NonVirtualAdjustment);
1846   }
1847 
1848   // Cast back to the original type.
1849   return CGF.Builder.CreateBitCast(ResultPtr, InitialPtr.getType());
1850 }
1851 
performThisAdjustment(CodeGenFunction & CGF,Address This,const ThisAdjustment & TA)1852 llvm::Value *ItaniumCXXABI::performThisAdjustment(CodeGenFunction &CGF,
1853                                                   Address This,
1854                                                   const ThisAdjustment &TA) {
1855   return performTypeAdjustment(CGF, This, TA.NonVirtual,
1856                                TA.Virtual.Itanium.VCallOffsetOffset,
1857                                /*IsReturnAdjustment=*/false);
1858 }
1859 
1860 llvm::Value *
performReturnAdjustment(CodeGenFunction & CGF,Address Ret,const ReturnAdjustment & RA)1861 ItaniumCXXABI::performReturnAdjustment(CodeGenFunction &CGF, Address Ret,
1862                                        const ReturnAdjustment &RA) {
1863   return performTypeAdjustment(CGF, Ret, RA.NonVirtual,
1864                                RA.Virtual.Itanium.VBaseOffsetOffset,
1865                                /*IsReturnAdjustment=*/true);
1866 }
1867 
EmitReturnFromThunk(CodeGenFunction & CGF,RValue RV,QualType ResultType)1868 void ARMCXXABI::EmitReturnFromThunk(CodeGenFunction &CGF,
1869                                     RValue RV, QualType ResultType) {
1870   if (!isa<CXXDestructorDecl>(CGF.CurGD.getDecl()))
1871     return ItaniumCXXABI::EmitReturnFromThunk(CGF, RV, ResultType);
1872 
1873   // Destructor thunks in the ARM ABI have indeterminate results.
1874   llvm::Type *T = CGF.ReturnValue.getElementType();
1875   RValue Undef = RValue::get(llvm::UndefValue::get(T));
1876   return ItaniumCXXABI::EmitReturnFromThunk(CGF, Undef, ResultType);
1877 }
1878 
1879 /************************** Array allocation cookies **************************/
1880 
getArrayCookieSizeImpl(QualType elementType)1881 CharUnits ItaniumCXXABI::getArrayCookieSizeImpl(QualType elementType) {
1882   // The array cookie is a size_t; pad that up to the element alignment.
1883   // The cookie is actually right-justified in that space.
1884   return std::max(CharUnits::fromQuantity(CGM.SizeSizeInBytes),
1885                   CGM.getContext().getTypeAlignInChars(elementType));
1886 }
1887 
InitializeArrayCookie(CodeGenFunction & CGF,Address NewPtr,llvm::Value * NumElements,const CXXNewExpr * expr,QualType ElementType)1888 Address ItaniumCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
1889                                              Address NewPtr,
1890                                              llvm::Value *NumElements,
1891                                              const CXXNewExpr *expr,
1892                                              QualType ElementType) {
1893   assert(requiresArrayCookie(expr));
1894 
1895   unsigned AS = NewPtr.getAddressSpace();
1896 
1897   ASTContext &Ctx = getContext();
1898   CharUnits SizeSize = CGF.getSizeSize();
1899 
1900   // The size of the cookie.
1901   CharUnits CookieSize =
1902     std::max(SizeSize, Ctx.getTypeAlignInChars(ElementType));
1903   assert(CookieSize == getArrayCookieSizeImpl(ElementType));
1904 
1905   // Compute an offset to the cookie.
1906   Address CookiePtr = NewPtr;
1907   CharUnits CookieOffset = CookieSize - SizeSize;
1908   if (!CookieOffset.isZero())
1909     CookiePtr = CGF.Builder.CreateConstInBoundsByteGEP(CookiePtr, CookieOffset);
1910 
1911   // Write the number of elements into the appropriate slot.
1912   Address NumElementsPtr =
1913       CGF.Builder.CreateElementBitCast(CookiePtr, CGF.SizeTy);
1914   llvm::Instruction *SI = CGF.Builder.CreateStore(NumElements, NumElementsPtr);
1915 
1916   // Handle the array cookie specially in ASan.
1917   if (CGM.getLangOpts().Sanitize.has(SanitizerKind::Address) && AS == 0 &&
1918       (expr->getOperatorNew()->isReplaceableGlobalAllocationFunction() ||
1919        CGM.getCodeGenOpts().SanitizeAddressPoisonClassMemberArrayNewCookie)) {
1920     // The store to the CookiePtr does not need to be instrumented.
1921     CGM.getSanitizerMetadata()->disableSanitizerForInstruction(SI);
1922     llvm::FunctionType *FTy =
1923         llvm::FunctionType::get(CGM.VoidTy, NumElementsPtr.getType(), false);
1924     llvm::Constant *F =
1925         CGM.CreateRuntimeFunction(FTy, "__asan_poison_cxx_array_cookie");
1926     CGF.Builder.CreateCall(F, NumElementsPtr.getPointer());
1927   }
1928 
1929   // Finally, compute a pointer to the actual data buffer by skipping
1930   // over the cookie completely.
1931   return CGF.Builder.CreateConstInBoundsByteGEP(NewPtr, CookieSize);
1932 }
1933 
readArrayCookieImpl(CodeGenFunction & CGF,Address allocPtr,CharUnits cookieSize)1934 llvm::Value *ItaniumCXXABI::readArrayCookieImpl(CodeGenFunction &CGF,
1935                                                 Address allocPtr,
1936                                                 CharUnits cookieSize) {
1937   // The element size is right-justified in the cookie.
1938   Address numElementsPtr = allocPtr;
1939   CharUnits numElementsOffset = cookieSize - CGF.getSizeSize();
1940   if (!numElementsOffset.isZero())
1941     numElementsPtr =
1942       CGF.Builder.CreateConstInBoundsByteGEP(numElementsPtr, numElementsOffset);
1943 
1944   unsigned AS = allocPtr.getAddressSpace();
1945   numElementsPtr = CGF.Builder.CreateElementBitCast(numElementsPtr, CGF.SizeTy);
1946   if (!CGM.getLangOpts().Sanitize.has(SanitizerKind::Address) || AS != 0)
1947     return CGF.Builder.CreateLoad(numElementsPtr);
1948   // In asan mode emit a function call instead of a regular load and let the
1949   // run-time deal with it: if the shadow is properly poisoned return the
1950   // cookie, otherwise return 0 to avoid an infinite loop calling DTORs.
1951   // We can't simply ignore this load using nosanitize metadata because
1952   // the metadata may be lost.
1953   llvm::FunctionType *FTy =
1954       llvm::FunctionType::get(CGF.SizeTy, CGF.SizeTy->getPointerTo(0), false);
1955   llvm::Constant *F =
1956       CGM.CreateRuntimeFunction(FTy, "__asan_load_cxx_array_cookie");
1957   return CGF.Builder.CreateCall(F, numElementsPtr.getPointer());
1958 }
1959 
getArrayCookieSizeImpl(QualType elementType)1960 CharUnits ARMCXXABI::getArrayCookieSizeImpl(QualType elementType) {
1961   // ARM says that the cookie is always:
1962   //   struct array_cookie {
1963   //     std::size_t element_size; // element_size != 0
1964   //     std::size_t element_count;
1965   //   };
1966   // But the base ABI doesn't give anything an alignment greater than
1967   // 8, so we can dismiss this as typical ABI-author blindness to
1968   // actual language complexity and round up to the element alignment.
1969   return std::max(CharUnits::fromQuantity(2 * CGM.SizeSizeInBytes),
1970                   CGM.getContext().getTypeAlignInChars(elementType));
1971 }
1972 
InitializeArrayCookie(CodeGenFunction & CGF,Address newPtr,llvm::Value * numElements,const CXXNewExpr * expr,QualType elementType)1973 Address ARMCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
1974                                          Address newPtr,
1975                                          llvm::Value *numElements,
1976                                          const CXXNewExpr *expr,
1977                                          QualType elementType) {
1978   assert(requiresArrayCookie(expr));
1979 
1980   // The cookie is always at the start of the buffer.
1981   Address cookie = newPtr;
1982 
1983   // The first element is the element size.
1984   cookie = CGF.Builder.CreateElementBitCast(cookie, CGF.SizeTy);
1985   llvm::Value *elementSize = llvm::ConstantInt::get(CGF.SizeTy,
1986                  getContext().getTypeSizeInChars(elementType).getQuantity());
1987   CGF.Builder.CreateStore(elementSize, cookie);
1988 
1989   // The second element is the element count.
1990   cookie = CGF.Builder.CreateConstInBoundsGEP(cookie, 1, CGF.getSizeSize());
1991   CGF.Builder.CreateStore(numElements, cookie);
1992 
1993   // Finally, compute a pointer to the actual data buffer by skipping
1994   // over the cookie completely.
1995   CharUnits cookieSize = ARMCXXABI::getArrayCookieSizeImpl(elementType);
1996   return CGF.Builder.CreateConstInBoundsByteGEP(newPtr, cookieSize);
1997 }
1998 
readArrayCookieImpl(CodeGenFunction & CGF,Address allocPtr,CharUnits cookieSize)1999 llvm::Value *ARMCXXABI::readArrayCookieImpl(CodeGenFunction &CGF,
2000                                             Address allocPtr,
2001                                             CharUnits cookieSize) {
2002   // The number of elements is at offset sizeof(size_t) relative to
2003   // the allocated pointer.
2004   Address numElementsPtr
2005     = CGF.Builder.CreateConstInBoundsByteGEP(allocPtr, CGF.getSizeSize());
2006 
2007   numElementsPtr = CGF.Builder.CreateElementBitCast(numElementsPtr, CGF.SizeTy);
2008   return CGF.Builder.CreateLoad(numElementsPtr);
2009 }
2010 
2011 /*********************** Static local initialization **************************/
2012 
getGuardAcquireFn(CodeGenModule & CGM,llvm::PointerType * GuardPtrTy)2013 static llvm::Constant *getGuardAcquireFn(CodeGenModule &CGM,
2014                                          llvm::PointerType *GuardPtrTy) {
2015   // int __cxa_guard_acquire(__guard *guard_object);
2016   llvm::FunctionType *FTy =
2017     llvm::FunctionType::get(CGM.getTypes().ConvertType(CGM.getContext().IntTy),
2018                             GuardPtrTy, /*isVarArg=*/false);
2019   return CGM.CreateRuntimeFunction(
2020       FTy, "__cxa_guard_acquire",
2021       llvm::AttributeList::get(CGM.getLLVMContext(),
2022                                llvm::AttributeList::FunctionIndex,
2023                                llvm::Attribute::NoUnwind));
2024 }
2025 
getGuardReleaseFn(CodeGenModule & CGM,llvm::PointerType * GuardPtrTy)2026 static llvm::Constant *getGuardReleaseFn(CodeGenModule &CGM,
2027                                          llvm::PointerType *GuardPtrTy) {
2028   // void __cxa_guard_release(__guard *guard_object);
2029   llvm::FunctionType *FTy =
2030     llvm::FunctionType::get(CGM.VoidTy, GuardPtrTy, /*isVarArg=*/false);
2031   return CGM.CreateRuntimeFunction(
2032       FTy, "__cxa_guard_release",
2033       llvm::AttributeList::get(CGM.getLLVMContext(),
2034                                llvm::AttributeList::FunctionIndex,
2035                                llvm::Attribute::NoUnwind));
2036 }
2037 
getGuardAbortFn(CodeGenModule & CGM,llvm::PointerType * GuardPtrTy)2038 static llvm::Constant *getGuardAbortFn(CodeGenModule &CGM,
2039                                        llvm::PointerType *GuardPtrTy) {
2040   // void __cxa_guard_abort(__guard *guard_object);
2041   llvm::FunctionType *FTy =
2042     llvm::FunctionType::get(CGM.VoidTy, GuardPtrTy, /*isVarArg=*/false);
2043   return CGM.CreateRuntimeFunction(
2044       FTy, "__cxa_guard_abort",
2045       llvm::AttributeList::get(CGM.getLLVMContext(),
2046                                llvm::AttributeList::FunctionIndex,
2047                                llvm::Attribute::NoUnwind));
2048 }
2049 
2050 namespace {
2051   struct CallGuardAbort final : EHScopeStack::Cleanup {
2052     llvm::GlobalVariable *Guard;
CallGuardAbort__anon90d96f400211::CallGuardAbort2053     CallGuardAbort(llvm::GlobalVariable *Guard) : Guard(Guard) {}
2054 
Emit__anon90d96f400211::CallGuardAbort2055     void Emit(CodeGenFunction &CGF, Flags flags) override {
2056       CGF.EmitNounwindRuntimeCall(getGuardAbortFn(CGF.CGM, Guard->getType()),
2057                                   Guard);
2058     }
2059   };
2060 }
2061 
2062 /// The ARM code here follows the Itanium code closely enough that we
2063 /// just special-case it at particular places.
EmitGuardedInit(CodeGenFunction & CGF,const VarDecl & D,llvm::GlobalVariable * var,bool shouldPerformInit)2064 void ItaniumCXXABI::EmitGuardedInit(CodeGenFunction &CGF,
2065                                     const VarDecl &D,
2066                                     llvm::GlobalVariable *var,
2067                                     bool shouldPerformInit) {
2068   CGBuilderTy &Builder = CGF.Builder;
2069 
2070   // Inline variables that weren't instantiated from variable templates have
2071   // partially-ordered initialization within their translation unit.
2072   bool NonTemplateInline =
2073       D.isInline() &&
2074       !isTemplateInstantiation(D.getTemplateSpecializationKind());
2075 
2076   // We only need to use thread-safe statics for local non-TLS variables and
2077   // inline variables; other global initialization is always single-threaded
2078   // or (through lazy dynamic loading in multiple threads) unsequenced.
2079   bool threadsafe = getContext().getLangOpts().ThreadsafeStatics &&
2080                     (D.isLocalVarDecl() || NonTemplateInline) &&
2081                     !D.getTLSKind();
2082 
2083   // If we have a global variable with internal linkage and thread-safe statics
2084   // are disabled, we can just let the guard variable be of type i8.
2085   bool useInt8GuardVariable = !threadsafe && var->hasInternalLinkage();
2086 
2087   llvm::IntegerType *guardTy;
2088   CharUnits guardAlignment;
2089   if (useInt8GuardVariable) {
2090     guardTy = CGF.Int8Ty;
2091     guardAlignment = CharUnits::One();
2092   } else {
2093     // Guard variables are 64 bits in the generic ABI and size width on ARM
2094     // (i.e. 32-bit on AArch32, 64-bit on AArch64).
2095     if (UseARMGuardVarABI) {
2096       guardTy = CGF.SizeTy;
2097       guardAlignment = CGF.getSizeAlign();
2098     } else {
2099       guardTy = CGF.Int64Ty;
2100       guardAlignment = CharUnits::fromQuantity(
2101                              CGM.getDataLayout().getABITypeAlignment(guardTy));
2102     }
2103   }
2104   llvm::PointerType *guardPtrTy = guardTy->getPointerTo();
2105 
2106   // Create the guard variable if we don't already have it (as we
2107   // might if we're double-emitting this function body).
2108   llvm::GlobalVariable *guard = CGM.getStaticLocalDeclGuardAddress(&D);
2109   if (!guard) {
2110     // Mangle the name for the guard.
2111     SmallString<256> guardName;
2112     {
2113       llvm::raw_svector_ostream out(guardName);
2114       getMangleContext().mangleStaticGuardVariable(&D, out);
2115     }
2116 
2117     // Create the guard variable with a zero-initializer.
2118     // Just absorb linkage and visibility from the guarded variable.
2119     guard = new llvm::GlobalVariable(CGM.getModule(), guardTy,
2120                                      false, var->getLinkage(),
2121                                      llvm::ConstantInt::get(guardTy, 0),
2122                                      guardName.str());
2123     guard->setDSOLocal(var->isDSOLocal());
2124     guard->setVisibility(var->getVisibility());
2125     // If the variable is thread-local, so is its guard variable.
2126     guard->setThreadLocalMode(var->getThreadLocalMode());
2127     guard->setAlignment(guardAlignment.getQuantity());
2128 
2129     // The ABI says: "It is suggested that it be emitted in the same COMDAT
2130     // group as the associated data object." In practice, this doesn't work for
2131     // non-ELF and non-Wasm object formats, so only do it for ELF and Wasm.
2132     llvm::Comdat *C = var->getComdat();
2133     if (!D.isLocalVarDecl() && C &&
2134         (CGM.getTarget().getTriple().isOSBinFormatELF() ||
2135          CGM.getTarget().getTriple().isOSBinFormatWasm())) {
2136       guard->setComdat(C);
2137       // An inline variable's guard function is run from the per-TU
2138       // initialization function, not via a dedicated global ctor function, so
2139       // we can't put it in a comdat.
2140       if (!NonTemplateInline)
2141         CGF.CurFn->setComdat(C);
2142     } else if (CGM.supportsCOMDAT() && guard->isWeakForLinker()) {
2143       guard->setComdat(CGM.getModule().getOrInsertComdat(guard->getName()));
2144     }
2145 
2146     CGM.setStaticLocalDeclGuardAddress(&D, guard);
2147   }
2148 
2149   Address guardAddr = Address(guard, guardAlignment);
2150 
2151   // Test whether the variable has completed initialization.
2152   //
2153   // Itanium C++ ABI 3.3.2:
2154   //   The following is pseudo-code showing how these functions can be used:
2155   //     if (obj_guard.first_byte == 0) {
2156   //       if ( __cxa_guard_acquire (&obj_guard) ) {
2157   //         try {
2158   //           ... initialize the object ...;
2159   //         } catch (...) {
2160   //            __cxa_guard_abort (&obj_guard);
2161   //            throw;
2162   //         }
2163   //         ... queue object destructor with __cxa_atexit() ...;
2164   //         __cxa_guard_release (&obj_guard);
2165   //       }
2166   //     }
2167 
2168   // Load the first byte of the guard variable.
2169   llvm::LoadInst *LI =
2170       Builder.CreateLoad(Builder.CreateElementBitCast(guardAddr, CGM.Int8Ty));
2171 
2172   // Itanium ABI:
2173   //   An implementation supporting thread-safety on multiprocessor
2174   //   systems must also guarantee that references to the initialized
2175   //   object do not occur before the load of the initialization flag.
2176   //
2177   // In LLVM, we do this by marking the load Acquire.
2178   if (threadsafe)
2179     LI->setAtomic(llvm::AtomicOrdering::Acquire);
2180 
2181   // For ARM, we should only check the first bit, rather than the entire byte:
2182   //
2183   // ARM C++ ABI 3.2.3.1:
2184   //   To support the potential use of initialization guard variables
2185   //   as semaphores that are the target of ARM SWP and LDREX/STREX
2186   //   synchronizing instructions we define a static initialization
2187   //   guard variable to be a 4-byte aligned, 4-byte word with the
2188   //   following inline access protocol.
2189   //     #define INITIALIZED 1
2190   //     if ((obj_guard & INITIALIZED) != INITIALIZED) {
2191   //       if (__cxa_guard_acquire(&obj_guard))
2192   //         ...
2193   //     }
2194   //
2195   // and similarly for ARM64:
2196   //
2197   // ARM64 C++ ABI 3.2.2:
2198   //   This ABI instead only specifies the value bit 0 of the static guard
2199   //   variable; all other bits are platform defined. Bit 0 shall be 0 when the
2200   //   variable is not initialized and 1 when it is.
2201   llvm::Value *V =
2202       (UseARMGuardVarABI && !useInt8GuardVariable)
2203           ? Builder.CreateAnd(LI, llvm::ConstantInt::get(CGM.Int8Ty, 1))
2204           : LI;
2205   llvm::Value *NeedsInit = Builder.CreateIsNull(V, "guard.uninitialized");
2206 
2207   llvm::BasicBlock *InitCheckBlock = CGF.createBasicBlock("init.check");
2208   llvm::BasicBlock *EndBlock = CGF.createBasicBlock("init.end");
2209 
2210   // Check if the first byte of the guard variable is zero.
2211   CGF.EmitCXXGuardedInitBranch(NeedsInit, InitCheckBlock, EndBlock,
2212                                CodeGenFunction::GuardKind::VariableGuard, &D);
2213 
2214   CGF.EmitBlock(InitCheckBlock);
2215 
2216   // Variables used when coping with thread-safe statics and exceptions.
2217   if (threadsafe) {
2218     // Call __cxa_guard_acquire.
2219     llvm::Value *V
2220       = CGF.EmitNounwindRuntimeCall(getGuardAcquireFn(CGM, guardPtrTy), guard);
2221 
2222     llvm::BasicBlock *InitBlock = CGF.createBasicBlock("init");
2223 
2224     Builder.CreateCondBr(Builder.CreateIsNotNull(V, "tobool"),
2225                          InitBlock, EndBlock);
2226 
2227     // Call __cxa_guard_abort along the exceptional edge.
2228     CGF.EHStack.pushCleanup<CallGuardAbort>(EHCleanup, guard);
2229 
2230     CGF.EmitBlock(InitBlock);
2231   }
2232 
2233   // Emit the initializer and add a global destructor if appropriate.
2234   CGF.EmitCXXGlobalVarDeclInit(D, var, shouldPerformInit);
2235 
2236   if (threadsafe) {
2237     // Pop the guard-abort cleanup if we pushed one.
2238     CGF.PopCleanupBlock();
2239 
2240     // Call __cxa_guard_release.  This cannot throw.
2241     CGF.EmitNounwindRuntimeCall(getGuardReleaseFn(CGM, guardPtrTy),
2242                                 guardAddr.getPointer());
2243   } else {
2244     Builder.CreateStore(llvm::ConstantInt::get(guardTy, 1), guardAddr);
2245   }
2246 
2247   CGF.EmitBlock(EndBlock);
2248 }
2249 
2250 /// Register a global destructor using __cxa_atexit.
emitGlobalDtorWithCXAAtExit(CodeGenFunction & CGF,llvm::Constant * dtor,llvm::Constant * addr,bool TLS)2251 static void emitGlobalDtorWithCXAAtExit(CodeGenFunction &CGF,
2252                                         llvm::Constant *dtor,
2253                                         llvm::Constant *addr,
2254                                         bool TLS) {
2255   const char *Name = "__cxa_atexit";
2256   if (TLS) {
2257     const llvm::Triple &T = CGF.getTarget().getTriple();
2258     Name = T.isOSDarwin() ?  "_tlv_atexit" : "__cxa_thread_atexit";
2259   }
2260 
2261   // We're assuming that the destructor function is something we can
2262   // reasonably call with the default CC.  Go ahead and cast it to the
2263   // right prototype.
2264   llvm::Type *dtorTy =
2265     llvm::FunctionType::get(CGF.VoidTy, CGF.Int8PtrTy, false)->getPointerTo();
2266 
2267   // extern "C" int __cxa_atexit(void (*f)(void *), void *p, void *d);
2268   llvm::Type *paramTys[] = { dtorTy, CGF.Int8PtrTy, CGF.Int8PtrTy };
2269   llvm::FunctionType *atexitTy =
2270     llvm::FunctionType::get(CGF.IntTy, paramTys, false);
2271 
2272   // Fetch the actual function.
2273   llvm::Constant *atexit = CGF.CGM.CreateRuntimeFunction(atexitTy, Name);
2274   if (llvm::Function *fn = dyn_cast<llvm::Function>(atexit))
2275     fn->setDoesNotThrow();
2276 
2277   // Create a variable that binds the atexit to this shared object.
2278   llvm::Constant *handle =
2279       CGF.CGM.CreateRuntimeVariable(CGF.Int8Ty, "__dso_handle");
2280   auto *GV = cast<llvm::GlobalValue>(handle->stripPointerCasts());
2281   GV->setVisibility(llvm::GlobalValue::HiddenVisibility);
2282 
2283   if (!addr)
2284     // addr is null when we are trying to register a dtor annotated with
2285     // __attribute__((destructor)) in a constructor function. Using null here is
2286     // okay because this argument is just passed back to the destructor
2287     // function.
2288     addr = llvm::Constant::getNullValue(CGF.Int8PtrTy);
2289 
2290   llvm::Value *args[] = {
2291     llvm::ConstantExpr::getBitCast(dtor, dtorTy),
2292     llvm::ConstantExpr::getBitCast(addr, CGF.Int8PtrTy),
2293     handle
2294   };
2295   CGF.EmitNounwindRuntimeCall(atexit, args);
2296 }
2297 
registerGlobalDtorsWithAtExit()2298 void CodeGenModule::registerGlobalDtorsWithAtExit() {
2299   for (const auto I : DtorsUsingAtExit) {
2300     int Priority = I.first;
2301     const llvm::TinyPtrVector<llvm::Function *> &Dtors = I.second;
2302 
2303     // Create a function that registers destructors that have the same priority.
2304     //
2305     // Since constructor functions are run in non-descending order of their
2306     // priorities, destructors are registered in non-descending order of their
2307     // priorities, and since destructor functions are run in the reverse order
2308     // of their registration, destructor functions are run in non-ascending
2309     // order of their priorities.
2310     CodeGenFunction CGF(*this);
2311     std::string GlobalInitFnName =
2312         std::string("__GLOBAL_init_") + llvm::to_string(Priority);
2313     llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false);
2314     llvm::Function *GlobalInitFn = CreateGlobalInitOrDestructFunction(
2315         FTy, GlobalInitFnName, getTypes().arrangeNullaryFunction(),
2316         SourceLocation());
2317     ASTContext &Ctx = getContext();
2318     FunctionDecl *FD = FunctionDecl::Create(
2319         Ctx, Ctx.getTranslationUnitDecl(), SourceLocation(), SourceLocation(),
2320         &Ctx.Idents.get(GlobalInitFnName), Ctx.VoidTy, nullptr, SC_Static,
2321         false, false);
2322     CGF.StartFunction(GlobalDecl(FD), getContext().VoidTy, GlobalInitFn,
2323                       getTypes().arrangeNullaryFunction(), FunctionArgList(),
2324                       SourceLocation(), SourceLocation());
2325 
2326     for (auto *Dtor : Dtors) {
2327       // Register the destructor function calling __cxa_atexit if it is
2328       // available. Otherwise fall back on calling atexit.
2329       if (getCodeGenOpts().CXAAtExit)
2330         emitGlobalDtorWithCXAAtExit(CGF, Dtor, nullptr, false);
2331       else
2332         CGF.registerGlobalDtorWithAtExit(Dtor);
2333     }
2334 
2335     CGF.FinishFunction();
2336     AddGlobalCtor(GlobalInitFn, Priority, nullptr);
2337   }
2338 }
2339 
2340 /// Register a global destructor as best as we know how.
registerGlobalDtor(CodeGenFunction & CGF,const VarDecl & D,llvm::Constant * dtor,llvm::Constant * addr)2341 void ItaniumCXXABI::registerGlobalDtor(CodeGenFunction &CGF,
2342                                        const VarDecl &D,
2343                                        llvm::Constant *dtor,
2344                                        llvm::Constant *addr) {
2345   // Use __cxa_atexit if available.
2346   if (CGM.getCodeGenOpts().CXAAtExit)
2347     return emitGlobalDtorWithCXAAtExit(CGF, dtor, addr, D.getTLSKind());
2348 
2349   if (D.getTLSKind())
2350     CGM.ErrorUnsupported(&D, "non-trivial TLS destruction");
2351 
2352   // In Apple kexts, we want to add a global destructor entry.
2353   // FIXME: shouldn't this be guarded by some variable?
2354   if (CGM.getLangOpts().AppleKext) {
2355     // Generate a global destructor entry.
2356     return CGM.AddCXXDtorEntry(dtor, addr);
2357   }
2358 
2359   CGF.registerGlobalDtorWithAtExit(D, dtor, addr);
2360 }
2361 
isThreadWrapperReplaceable(const VarDecl * VD,CodeGen::CodeGenModule & CGM)2362 static bool isThreadWrapperReplaceable(const VarDecl *VD,
2363                                        CodeGen::CodeGenModule &CGM) {
2364   assert(!VD->isStaticLocal() && "static local VarDecls don't need wrappers!");
2365   // Darwin prefers to have references to thread local variables to go through
2366   // the thread wrapper instead of directly referencing the backing variable.
2367   return VD->getTLSKind() == VarDecl::TLS_Dynamic &&
2368          CGM.getTarget().getTriple().isOSDarwin();
2369 }
2370 
2371 /// Get the appropriate linkage for the wrapper function. This is essentially
2372 /// the weak form of the variable's linkage; every translation unit which needs
2373 /// the wrapper emits a copy, and we want the linker to merge them.
2374 static llvm::GlobalValue::LinkageTypes
getThreadLocalWrapperLinkage(const VarDecl * VD,CodeGen::CodeGenModule & CGM)2375 getThreadLocalWrapperLinkage(const VarDecl *VD, CodeGen::CodeGenModule &CGM) {
2376   llvm::GlobalValue::LinkageTypes VarLinkage =
2377       CGM.getLLVMLinkageVarDefinition(VD, /*isConstant=*/false);
2378 
2379   // For internal linkage variables, we don't need an external or weak wrapper.
2380   if (llvm::GlobalValue::isLocalLinkage(VarLinkage))
2381     return VarLinkage;
2382 
2383   // If the thread wrapper is replaceable, give it appropriate linkage.
2384   if (isThreadWrapperReplaceable(VD, CGM))
2385     if (!llvm::GlobalVariable::isLinkOnceLinkage(VarLinkage) &&
2386         !llvm::GlobalVariable::isWeakODRLinkage(VarLinkage))
2387       return VarLinkage;
2388   return llvm::GlobalValue::WeakODRLinkage;
2389 }
2390 
2391 llvm::Function *
getOrCreateThreadLocalWrapper(const VarDecl * VD,llvm::Value * Val)2392 ItaniumCXXABI::getOrCreateThreadLocalWrapper(const VarDecl *VD,
2393                                              llvm::Value *Val) {
2394   // Mangle the name for the thread_local wrapper function.
2395   SmallString<256> WrapperName;
2396   {
2397     llvm::raw_svector_ostream Out(WrapperName);
2398     getMangleContext().mangleItaniumThreadLocalWrapper(VD, Out);
2399   }
2400 
2401   // FIXME: If VD is a definition, we should regenerate the function attributes
2402   // before returning.
2403   if (llvm::Value *V = CGM.getModule().getNamedValue(WrapperName))
2404     return cast<llvm::Function>(V);
2405 
2406   QualType RetQT = VD->getType();
2407   if (RetQT->isReferenceType())
2408     RetQT = RetQT.getNonReferenceType();
2409 
2410   const CGFunctionInfo &FI = CGM.getTypes().arrangeBuiltinFunctionDeclaration(
2411       getContext().getPointerType(RetQT), FunctionArgList());
2412 
2413   llvm::FunctionType *FnTy = CGM.getTypes().GetFunctionType(FI);
2414   llvm::Function *Wrapper =
2415       llvm::Function::Create(FnTy, getThreadLocalWrapperLinkage(VD, CGM),
2416                              WrapperName.str(), &CGM.getModule());
2417 
2418   CGM.SetLLVMFunctionAttributes(nullptr, FI, Wrapper);
2419 
2420   if (VD->hasDefinition())
2421     CGM.SetLLVMFunctionAttributesForDefinition(nullptr, Wrapper);
2422 
2423   // Always resolve references to the wrapper at link time.
2424   if (!Wrapper->hasLocalLinkage() && !(isThreadWrapperReplaceable(VD, CGM) &&
2425       !llvm::GlobalVariable::isLinkOnceLinkage(Wrapper->getLinkage()) &&
2426       !llvm::GlobalVariable::isWeakODRLinkage(Wrapper->getLinkage())))
2427     Wrapper->setVisibility(llvm::GlobalValue::HiddenVisibility);
2428 
2429   if (isThreadWrapperReplaceable(VD, CGM)) {
2430     Wrapper->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
2431     Wrapper->addFnAttr(llvm::Attribute::NoUnwind);
2432   }
2433   return Wrapper;
2434 }
2435 
EmitThreadLocalInitFuncs(CodeGenModule & CGM,ArrayRef<const VarDecl * > CXXThreadLocals,ArrayRef<llvm::Function * > CXXThreadLocalInits,ArrayRef<const VarDecl * > CXXThreadLocalInitVars)2436 void ItaniumCXXABI::EmitThreadLocalInitFuncs(
2437     CodeGenModule &CGM, ArrayRef<const VarDecl *> CXXThreadLocals,
2438     ArrayRef<llvm::Function *> CXXThreadLocalInits,
2439     ArrayRef<const VarDecl *> CXXThreadLocalInitVars) {
2440   llvm::Function *InitFunc = nullptr;
2441 
2442   // Separate initializers into those with ordered (or partially-ordered)
2443   // initialization and those with unordered initialization.
2444   llvm::SmallVector<llvm::Function *, 8> OrderedInits;
2445   llvm::SmallDenseMap<const VarDecl *, llvm::Function *> UnorderedInits;
2446   for (unsigned I = 0; I != CXXThreadLocalInits.size(); ++I) {
2447     if (isTemplateInstantiation(
2448             CXXThreadLocalInitVars[I]->getTemplateSpecializationKind()))
2449       UnorderedInits[CXXThreadLocalInitVars[I]->getCanonicalDecl()] =
2450           CXXThreadLocalInits[I];
2451     else
2452       OrderedInits.push_back(CXXThreadLocalInits[I]);
2453   }
2454 
2455   if (!OrderedInits.empty()) {
2456     // Generate a guarded initialization function.
2457     llvm::FunctionType *FTy =
2458         llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
2459     const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
2460     InitFunc = CGM.CreateGlobalInitOrDestructFunction(FTy, "__tls_init", FI,
2461                                                       SourceLocation(),
2462                                                       /*TLS=*/true);
2463     llvm::GlobalVariable *Guard = new llvm::GlobalVariable(
2464         CGM.getModule(), CGM.Int8Ty, /*isConstant=*/false,
2465         llvm::GlobalVariable::InternalLinkage,
2466         llvm::ConstantInt::get(CGM.Int8Ty, 0), "__tls_guard");
2467     Guard->setThreadLocal(true);
2468 
2469     CharUnits GuardAlign = CharUnits::One();
2470     Guard->setAlignment(GuardAlign.getQuantity());
2471 
2472     CodeGenFunction(CGM).GenerateCXXGlobalInitFunc(InitFunc, OrderedInits,
2473                                                    Address(Guard, GuardAlign));
2474     // On Darwin platforms, use CXX_FAST_TLS calling convention.
2475     if (CGM.getTarget().getTriple().isOSDarwin()) {
2476       InitFunc->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
2477       InitFunc->addFnAttr(llvm::Attribute::NoUnwind);
2478     }
2479   }
2480 
2481   // Emit thread wrappers.
2482   for (const VarDecl *VD : CXXThreadLocals) {
2483     llvm::GlobalVariable *Var =
2484         cast<llvm::GlobalVariable>(CGM.GetGlobalValue(CGM.getMangledName(VD)));
2485     llvm::Function *Wrapper = getOrCreateThreadLocalWrapper(VD, Var);
2486 
2487     // Some targets require that all access to thread local variables go through
2488     // the thread wrapper.  This means that we cannot attempt to create a thread
2489     // wrapper or a thread helper.
2490     if (isThreadWrapperReplaceable(VD, CGM) && !VD->hasDefinition()) {
2491       Wrapper->setLinkage(llvm::Function::ExternalLinkage);
2492       continue;
2493     }
2494 
2495     // Mangle the name for the thread_local initialization function.
2496     SmallString<256> InitFnName;
2497     {
2498       llvm::raw_svector_ostream Out(InitFnName);
2499       getMangleContext().mangleItaniumThreadLocalInit(VD, Out);
2500     }
2501 
2502     // If we have a definition for the variable, emit the initialization
2503     // function as an alias to the global Init function (if any). Otherwise,
2504     // produce a declaration of the initialization function.
2505     llvm::GlobalValue *Init = nullptr;
2506     bool InitIsInitFunc = false;
2507     if (VD->hasDefinition()) {
2508       InitIsInitFunc = true;
2509       llvm::Function *InitFuncToUse = InitFunc;
2510       if (isTemplateInstantiation(VD->getTemplateSpecializationKind()))
2511         InitFuncToUse = UnorderedInits.lookup(VD->getCanonicalDecl());
2512       if (InitFuncToUse)
2513         Init = llvm::GlobalAlias::create(Var->getLinkage(), InitFnName.str(),
2514                                          InitFuncToUse);
2515     } else {
2516       // Emit a weak global function referring to the initialization function.
2517       // This function will not exist if the TU defining the thread_local
2518       // variable in question does not need any dynamic initialization for
2519       // its thread_local variables.
2520       llvm::FunctionType *FnTy = llvm::FunctionType::get(CGM.VoidTy, false);
2521       Init = llvm::Function::Create(FnTy,
2522                                     llvm::GlobalVariable::ExternalWeakLinkage,
2523                                     InitFnName.str(), &CGM.getModule());
2524       const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
2525       CGM.SetLLVMFunctionAttributes(nullptr, FI, cast<llvm::Function>(Init));
2526     }
2527 
2528     if (Init) {
2529       Init->setVisibility(Var->getVisibility());
2530       Init->setDSOLocal(Var->isDSOLocal());
2531     }
2532 
2533     llvm::LLVMContext &Context = CGM.getModule().getContext();
2534     llvm::BasicBlock *Entry = llvm::BasicBlock::Create(Context, "", Wrapper);
2535     CGBuilderTy Builder(CGM, Entry);
2536     if (InitIsInitFunc) {
2537       if (Init) {
2538         llvm::CallInst *CallVal = Builder.CreateCall(Init);
2539         if (isThreadWrapperReplaceable(VD, CGM)) {
2540           CallVal->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
2541           llvm::Function *Fn =
2542               cast<llvm::Function>(cast<llvm::GlobalAlias>(Init)->getAliasee());
2543           Fn->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
2544         }
2545       }
2546     } else {
2547       // Don't know whether we have an init function. Call it if it exists.
2548       llvm::Value *Have = Builder.CreateIsNotNull(Init);
2549       llvm::BasicBlock *InitBB = llvm::BasicBlock::Create(Context, "", Wrapper);
2550       llvm::BasicBlock *ExitBB = llvm::BasicBlock::Create(Context, "", Wrapper);
2551       Builder.CreateCondBr(Have, InitBB, ExitBB);
2552 
2553       Builder.SetInsertPoint(InitBB);
2554       Builder.CreateCall(Init);
2555       Builder.CreateBr(ExitBB);
2556 
2557       Builder.SetInsertPoint(ExitBB);
2558     }
2559 
2560     // For a reference, the result of the wrapper function is a pointer to
2561     // the referenced object.
2562     llvm::Value *Val = Var;
2563     if (VD->getType()->isReferenceType()) {
2564       CharUnits Align = CGM.getContext().getDeclAlign(VD);
2565       Val = Builder.CreateAlignedLoad(Val, Align);
2566     }
2567     if (Val->getType() != Wrapper->getReturnType())
2568       Val = Builder.CreatePointerBitCastOrAddrSpaceCast(
2569           Val, Wrapper->getReturnType(), "");
2570     Builder.CreateRet(Val);
2571   }
2572 }
2573 
EmitThreadLocalVarDeclLValue(CodeGenFunction & CGF,const VarDecl * VD,QualType LValType)2574 LValue ItaniumCXXABI::EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF,
2575                                                    const VarDecl *VD,
2576                                                    QualType LValType) {
2577   llvm::Value *Val = CGF.CGM.GetAddrOfGlobalVar(VD);
2578   llvm::Function *Wrapper = getOrCreateThreadLocalWrapper(VD, Val);
2579 
2580   llvm::CallInst *CallVal = CGF.Builder.CreateCall(Wrapper);
2581   CallVal->setCallingConv(Wrapper->getCallingConv());
2582 
2583   LValue LV;
2584   if (VD->getType()->isReferenceType())
2585     LV = CGF.MakeNaturalAlignAddrLValue(CallVal, LValType);
2586   else
2587     LV = CGF.MakeAddrLValue(CallVal, LValType,
2588                             CGF.getContext().getDeclAlign(VD));
2589   // FIXME: need setObjCGCLValueClass?
2590   return LV;
2591 }
2592 
2593 /// Return whether the given global decl needs a VTT parameter, which it does
2594 /// if it's a base constructor or destructor with virtual bases.
NeedsVTTParameter(GlobalDecl GD)2595 bool ItaniumCXXABI::NeedsVTTParameter(GlobalDecl GD) {
2596   const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
2597 
2598   // We don't have any virtual bases, just return early.
2599   if (!MD->getParent()->getNumVBases())
2600     return false;
2601 
2602   // Check if we have a base constructor.
2603   if (isa<CXXConstructorDecl>(MD) && GD.getCtorType() == Ctor_Base)
2604     return true;
2605 
2606   // Check if we have a base destructor.
2607   if (isa<CXXDestructorDecl>(MD) && GD.getDtorType() == Dtor_Base)
2608     return true;
2609 
2610   return false;
2611 }
2612 
2613 namespace {
2614 class ItaniumRTTIBuilder {
2615   CodeGenModule &CGM;  // Per-module state.
2616   llvm::LLVMContext &VMContext;
2617   const ItaniumCXXABI &CXXABI;  // Per-module state.
2618 
2619   /// Fields - The fields of the RTTI descriptor currently being built.
2620   SmallVector<llvm::Constant *, 16> Fields;
2621 
2622   /// GetAddrOfTypeName - Returns the mangled type name of the given type.
2623   llvm::GlobalVariable *
2624   GetAddrOfTypeName(QualType Ty, llvm::GlobalVariable::LinkageTypes Linkage);
2625 
2626   /// GetAddrOfExternalRTTIDescriptor - Returns the constant for the RTTI
2627   /// descriptor of the given type.
2628   llvm::Constant *GetAddrOfExternalRTTIDescriptor(QualType Ty);
2629 
2630   /// BuildVTablePointer - Build the vtable pointer for the given type.
2631   void BuildVTablePointer(const Type *Ty);
2632 
2633   /// BuildSIClassTypeInfo - Build an abi::__si_class_type_info, used for single
2634   /// inheritance, according to the Itanium C++ ABI, 2.9.5p6b.
2635   void BuildSIClassTypeInfo(const CXXRecordDecl *RD);
2636 
2637   /// BuildVMIClassTypeInfo - Build an abi::__vmi_class_type_info, used for
2638   /// classes with bases that do not satisfy the abi::__si_class_type_info
2639   /// constraints, according ti the Itanium C++ ABI, 2.9.5p5c.
2640   void BuildVMIClassTypeInfo(const CXXRecordDecl *RD);
2641 
2642   /// BuildPointerTypeInfo - Build an abi::__pointer_type_info struct, used
2643   /// for pointer types.
2644   void BuildPointerTypeInfo(QualType PointeeTy);
2645 
2646   /// BuildObjCObjectTypeInfo - Build the appropriate kind of
2647   /// type_info for an object type.
2648   void BuildObjCObjectTypeInfo(const ObjCObjectType *Ty);
2649 
2650   /// BuildPointerToMemberTypeInfo - Build an abi::__pointer_to_member_type_info
2651   /// struct, used for member pointer types.
2652   void BuildPointerToMemberTypeInfo(const MemberPointerType *Ty);
2653 
2654 public:
ItaniumRTTIBuilder(const ItaniumCXXABI & ABI)2655   ItaniumRTTIBuilder(const ItaniumCXXABI &ABI)
2656       : CGM(ABI.CGM), VMContext(CGM.getModule().getContext()), CXXABI(ABI) {}
2657 
2658   // Pointer type info flags.
2659   enum {
2660     /// PTI_Const - Type has const qualifier.
2661     PTI_Const = 0x1,
2662 
2663     /// PTI_Volatile - Type has volatile qualifier.
2664     PTI_Volatile = 0x2,
2665 
2666     /// PTI_Restrict - Type has restrict qualifier.
2667     PTI_Restrict = 0x4,
2668 
2669     /// PTI_Incomplete - Type is incomplete.
2670     PTI_Incomplete = 0x8,
2671 
2672     /// PTI_ContainingClassIncomplete - Containing class is incomplete.
2673     /// (in pointer to member).
2674     PTI_ContainingClassIncomplete = 0x10,
2675 
2676     /// PTI_TransactionSafe - Pointee is transaction_safe function (C++ TM TS).
2677     //PTI_TransactionSafe = 0x20,
2678 
2679     /// PTI_Noexcept - Pointee is noexcept function (C++1z).
2680     PTI_Noexcept = 0x40,
2681   };
2682 
2683   // VMI type info flags.
2684   enum {
2685     /// VMI_NonDiamondRepeat - Class has non-diamond repeated inheritance.
2686     VMI_NonDiamondRepeat = 0x1,
2687 
2688     /// VMI_DiamondShaped - Class is diamond shaped.
2689     VMI_DiamondShaped = 0x2
2690   };
2691 
2692   // Base class type info flags.
2693   enum {
2694     /// BCTI_Virtual - Base class is virtual.
2695     BCTI_Virtual = 0x1,
2696 
2697     /// BCTI_Public - Base class is public.
2698     BCTI_Public = 0x2
2699   };
2700 
2701   /// BuildTypeInfo - Build the RTTI type info struct for the given type, or
2702   /// link to an existing RTTI descriptor if one already exists.
2703   llvm::Constant *BuildTypeInfo(QualType Ty);
2704 
2705   /// BuildTypeInfo - Build the RTTI type info struct for the given type.
2706   llvm::Constant *BuildTypeInfo(
2707       QualType Ty,
2708       llvm::GlobalVariable::LinkageTypes Linkage,
2709       llvm::GlobalValue::VisibilityTypes Visibility,
2710       llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass);
2711 };
2712 }
2713 
GetAddrOfTypeName(QualType Ty,llvm::GlobalVariable::LinkageTypes Linkage)2714 llvm::GlobalVariable *ItaniumRTTIBuilder::GetAddrOfTypeName(
2715     QualType Ty, llvm::GlobalVariable::LinkageTypes Linkage) {
2716   SmallString<256> Name;
2717   llvm::raw_svector_ostream Out(Name);
2718   CGM.getCXXABI().getMangleContext().mangleCXXRTTIName(Ty, Out);
2719 
2720   // We know that the mangled name of the type starts at index 4 of the
2721   // mangled name of the typename, so we can just index into it in order to
2722   // get the mangled name of the type.
2723   llvm::Constant *Init = llvm::ConstantDataArray::getString(VMContext,
2724                                                             Name.substr(4));
2725 
2726   llvm::GlobalVariable *GV =
2727     CGM.CreateOrReplaceCXXRuntimeVariable(Name, Init->getType(), Linkage);
2728 
2729   GV->setInitializer(Init);
2730 
2731   return GV;
2732 }
2733 
2734 llvm::Constant *
GetAddrOfExternalRTTIDescriptor(QualType Ty)2735 ItaniumRTTIBuilder::GetAddrOfExternalRTTIDescriptor(QualType Ty) {
2736   // Mangle the RTTI name.
2737   SmallString<256> Name;
2738   llvm::raw_svector_ostream Out(Name);
2739   CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out);
2740 
2741   // Look for an existing global.
2742   llvm::GlobalVariable *GV = CGM.getModule().getNamedGlobal(Name);
2743 
2744   if (!GV) {
2745     // Create a new global variable.
2746     // Note for the future: If we would ever like to do deferred emission of
2747     // RTTI, check if emitting vtables opportunistically need any adjustment.
2748 
2749     GV = new llvm::GlobalVariable(CGM.getModule(), CGM.Int8PtrTy,
2750                                   /*Constant=*/true,
2751                                   llvm::GlobalValue::ExternalLinkage, nullptr,
2752                                   Name);
2753     const CXXRecordDecl *RD = Ty->getAsCXXRecordDecl();
2754     CGM.setGVProperties(GV, RD);
2755   }
2756 
2757   return llvm::ConstantExpr::getBitCast(GV, CGM.Int8PtrTy);
2758 }
2759 
2760 /// TypeInfoIsInStandardLibrary - Given a builtin type, returns whether the type
2761 /// info for that type is defined in the standard library.
TypeInfoIsInStandardLibrary(const BuiltinType * Ty)2762 static bool TypeInfoIsInStandardLibrary(const BuiltinType *Ty) {
2763   // Itanium C++ ABI 2.9.2:
2764   //   Basic type information (e.g. for "int", "bool", etc.) will be kept in
2765   //   the run-time support library. Specifically, the run-time support
2766   //   library should contain type_info objects for the types X, X* and
2767   //   X const*, for every X in: void, std::nullptr_t, bool, wchar_t, char,
2768   //   unsigned char, signed char, short, unsigned short, int, unsigned int,
2769   //   long, unsigned long, long long, unsigned long long, float, double,
2770   //   long double, char16_t, char32_t, and the IEEE 754r decimal and
2771   //   half-precision floating point types.
2772   //
2773   // GCC also emits RTTI for __int128.
2774   // FIXME: We do not emit RTTI information for decimal types here.
2775 
2776   // Types added here must also be added to EmitFundamentalRTTIDescriptors.
2777   switch (Ty->getKind()) {
2778     case BuiltinType::Void:
2779     case BuiltinType::NullPtr:
2780     case BuiltinType::Bool:
2781     case BuiltinType::WChar_S:
2782     case BuiltinType::WChar_U:
2783     case BuiltinType::Char_U:
2784     case BuiltinType::Char_S:
2785     case BuiltinType::UChar:
2786     case BuiltinType::SChar:
2787     case BuiltinType::Short:
2788     case BuiltinType::UShort:
2789     case BuiltinType::Int:
2790     case BuiltinType::UInt:
2791     case BuiltinType::Long:
2792     case BuiltinType::ULong:
2793     case BuiltinType::LongLong:
2794     case BuiltinType::ULongLong:
2795     case BuiltinType::Half:
2796     case BuiltinType::Float:
2797     case BuiltinType::Double:
2798     case BuiltinType::LongDouble:
2799     case BuiltinType::Float16:
2800     case BuiltinType::Float128:
2801     case BuiltinType::Char8:
2802     case BuiltinType::Char16:
2803     case BuiltinType::Char32:
2804     case BuiltinType::Int128:
2805     case BuiltinType::UInt128:
2806       return true;
2807 
2808 #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
2809     case BuiltinType::Id:
2810 #include "clang/Basic/OpenCLImageTypes.def"
2811     case BuiltinType::OCLSampler:
2812     case BuiltinType::OCLEvent:
2813     case BuiltinType::OCLClkEvent:
2814     case BuiltinType::OCLQueue:
2815     case BuiltinType::OCLReserveID:
2816     case BuiltinType::ShortAccum:
2817     case BuiltinType::Accum:
2818     case BuiltinType::LongAccum:
2819     case BuiltinType::UShortAccum:
2820     case BuiltinType::UAccum:
2821     case BuiltinType::ULongAccum:
2822     case BuiltinType::ShortFract:
2823     case BuiltinType::Fract:
2824     case BuiltinType::LongFract:
2825     case BuiltinType::UShortFract:
2826     case BuiltinType::UFract:
2827     case BuiltinType::ULongFract:
2828     case BuiltinType::SatShortAccum:
2829     case BuiltinType::SatAccum:
2830     case BuiltinType::SatLongAccum:
2831     case BuiltinType::SatUShortAccum:
2832     case BuiltinType::SatUAccum:
2833     case BuiltinType::SatULongAccum:
2834     case BuiltinType::SatShortFract:
2835     case BuiltinType::SatFract:
2836     case BuiltinType::SatLongFract:
2837     case BuiltinType::SatUShortFract:
2838     case BuiltinType::SatUFract:
2839     case BuiltinType::SatULongFract:
2840       return false;
2841 
2842     case BuiltinType::Dependent:
2843 #define BUILTIN_TYPE(Id, SingletonId)
2844 #define PLACEHOLDER_TYPE(Id, SingletonId) \
2845     case BuiltinType::Id:
2846 #include "clang/AST/BuiltinTypes.def"
2847       llvm_unreachable("asking for RRTI for a placeholder type!");
2848 
2849     case BuiltinType::ObjCId:
2850     case BuiltinType::ObjCClass:
2851     case BuiltinType::ObjCSel:
2852       llvm_unreachable("FIXME: Objective-C types are unsupported!");
2853   }
2854 
2855   llvm_unreachable("Invalid BuiltinType Kind!");
2856 }
2857 
TypeInfoIsInStandardLibrary(const PointerType * PointerTy)2858 static bool TypeInfoIsInStandardLibrary(const PointerType *PointerTy) {
2859   QualType PointeeTy = PointerTy->getPointeeType();
2860   const BuiltinType *BuiltinTy = dyn_cast<BuiltinType>(PointeeTy);
2861   if (!BuiltinTy)
2862     return false;
2863 
2864   // Check the qualifiers.
2865   Qualifiers Quals = PointeeTy.getQualifiers();
2866   Quals.removeConst();
2867 
2868   if (!Quals.empty())
2869     return false;
2870 
2871   return TypeInfoIsInStandardLibrary(BuiltinTy);
2872 }
2873 
2874 /// IsStandardLibraryRTTIDescriptor - Returns whether the type
2875 /// information for the given type exists in the standard library.
IsStandardLibraryRTTIDescriptor(QualType Ty)2876 static bool IsStandardLibraryRTTIDescriptor(QualType Ty) {
2877   // Type info for builtin types is defined in the standard library.
2878   if (const BuiltinType *BuiltinTy = dyn_cast<BuiltinType>(Ty))
2879     return TypeInfoIsInStandardLibrary(BuiltinTy);
2880 
2881   // Type info for some pointer types to builtin types is defined in the
2882   // standard library.
2883   if (const PointerType *PointerTy = dyn_cast<PointerType>(Ty))
2884     return TypeInfoIsInStandardLibrary(PointerTy);
2885 
2886   return false;
2887 }
2888 
2889 /// ShouldUseExternalRTTIDescriptor - Returns whether the type information for
2890 /// the given type exists somewhere else, and that we should not emit the type
2891 /// information in this translation unit.  Assumes that it is not a
2892 /// standard-library type.
ShouldUseExternalRTTIDescriptor(CodeGenModule & CGM,QualType Ty)2893 static bool ShouldUseExternalRTTIDescriptor(CodeGenModule &CGM,
2894                                             QualType Ty) {
2895   ASTContext &Context = CGM.getContext();
2896 
2897   // If RTTI is disabled, assume it might be disabled in the
2898   // translation unit that defines any potential key function, too.
2899   if (!Context.getLangOpts().RTTI) return false;
2900 
2901   if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) {
2902     const CXXRecordDecl *RD = cast<CXXRecordDecl>(RecordTy->getDecl());
2903     if (!RD->hasDefinition())
2904       return false;
2905 
2906     if (!RD->isDynamicClass())
2907       return false;
2908 
2909     // FIXME: this may need to be reconsidered if the key function
2910     // changes.
2911     // N.B. We must always emit the RTTI data ourselves if there exists a key
2912     // function.
2913     bool IsDLLImport = RD->hasAttr<DLLImportAttr>();
2914 
2915     // Don't import the RTTI but emit it locally.
2916     if (CGM.getTriple().isWindowsGNUEnvironment() && IsDLLImport)
2917       return false;
2918 
2919     if (CGM.getVTables().isVTableExternal(RD))
2920       return IsDLLImport && !CGM.getTriple().isWindowsItaniumEnvironment()
2921                  ? false
2922                  : true;
2923 
2924     if (IsDLLImport)
2925       return true;
2926   }
2927 
2928   return false;
2929 }
2930 
2931 /// IsIncompleteClassType - Returns whether the given record type is incomplete.
IsIncompleteClassType(const RecordType * RecordTy)2932 static bool IsIncompleteClassType(const RecordType *RecordTy) {
2933   return !RecordTy->getDecl()->isCompleteDefinition();
2934 }
2935 
2936 /// ContainsIncompleteClassType - Returns whether the given type contains an
2937 /// incomplete class type. This is true if
2938 ///
2939 ///   * The given type is an incomplete class type.
2940 ///   * The given type is a pointer type whose pointee type contains an
2941 ///     incomplete class type.
2942 ///   * The given type is a member pointer type whose class is an incomplete
2943 ///     class type.
2944 ///   * The given type is a member pointer type whoise pointee type contains an
2945 ///     incomplete class type.
2946 /// is an indirect or direct pointer to an incomplete class type.
ContainsIncompleteClassType(QualType Ty)2947 static bool ContainsIncompleteClassType(QualType Ty) {
2948   if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) {
2949     if (IsIncompleteClassType(RecordTy))
2950       return true;
2951   }
2952 
2953   if (const PointerType *PointerTy = dyn_cast<PointerType>(Ty))
2954     return ContainsIncompleteClassType(PointerTy->getPointeeType());
2955 
2956   if (const MemberPointerType *MemberPointerTy =
2957       dyn_cast<MemberPointerType>(Ty)) {
2958     // Check if the class type is incomplete.
2959     const RecordType *ClassType = cast<RecordType>(MemberPointerTy->getClass());
2960     if (IsIncompleteClassType(ClassType))
2961       return true;
2962 
2963     return ContainsIncompleteClassType(MemberPointerTy->getPointeeType());
2964   }
2965 
2966   return false;
2967 }
2968 
2969 // CanUseSingleInheritance - Return whether the given record decl has a "single,
2970 // public, non-virtual base at offset zero (i.e. the derived class is dynamic
2971 // iff the base is)", according to Itanium C++ ABI, 2.95p6b.
CanUseSingleInheritance(const CXXRecordDecl * RD)2972 static bool CanUseSingleInheritance(const CXXRecordDecl *RD) {
2973   // Check the number of bases.
2974   if (RD->getNumBases() != 1)
2975     return false;
2976 
2977   // Get the base.
2978   CXXRecordDecl::base_class_const_iterator Base = RD->bases_begin();
2979 
2980   // Check that the base is not virtual.
2981   if (Base->isVirtual())
2982     return false;
2983 
2984   // Check that the base is public.
2985   if (Base->getAccessSpecifier() != AS_public)
2986     return false;
2987 
2988   // Check that the class is dynamic iff the base is.
2989   const CXXRecordDecl *BaseDecl =
2990     cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl());
2991   if (!BaseDecl->isEmpty() &&
2992       BaseDecl->isDynamicClass() != RD->isDynamicClass())
2993     return false;
2994 
2995   return true;
2996 }
2997 
BuildVTablePointer(const Type * Ty)2998 void ItaniumRTTIBuilder::BuildVTablePointer(const Type *Ty) {
2999   // abi::__class_type_info.
3000   static const char * const ClassTypeInfo =
3001     "_ZTVN10__cxxabiv117__class_type_infoE";
3002   // abi::__si_class_type_info.
3003   static const char * const SIClassTypeInfo =
3004     "_ZTVN10__cxxabiv120__si_class_type_infoE";
3005   // abi::__vmi_class_type_info.
3006   static const char * const VMIClassTypeInfo =
3007     "_ZTVN10__cxxabiv121__vmi_class_type_infoE";
3008 
3009   const char *VTableName = nullptr;
3010 
3011   switch (Ty->getTypeClass()) {
3012 #define TYPE(Class, Base)
3013 #define ABSTRACT_TYPE(Class, Base)
3014 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
3015 #define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
3016 #define DEPENDENT_TYPE(Class, Base) case Type::Class:
3017 #include "clang/AST/TypeNodes.def"
3018     llvm_unreachable("Non-canonical and dependent types shouldn't get here");
3019 
3020   case Type::LValueReference:
3021   case Type::RValueReference:
3022     llvm_unreachable("References shouldn't get here");
3023 
3024   case Type::Auto:
3025   case Type::DeducedTemplateSpecialization:
3026     llvm_unreachable("Undeduced type shouldn't get here");
3027 
3028   case Type::Pipe:
3029     llvm_unreachable("Pipe types shouldn't get here");
3030 
3031   case Type::Builtin:
3032   // GCC treats vector and complex types as fundamental types.
3033   case Type::Vector:
3034   case Type::ExtVector:
3035   case Type::Complex:
3036   case Type::Atomic:
3037   // FIXME: GCC treats block pointers as fundamental types?!
3038   case Type::BlockPointer:
3039     // abi::__fundamental_type_info.
3040     VTableName = "_ZTVN10__cxxabiv123__fundamental_type_infoE";
3041     break;
3042 
3043   case Type::ConstantArray:
3044   case Type::IncompleteArray:
3045   case Type::VariableArray:
3046     // abi::__array_type_info.
3047     VTableName = "_ZTVN10__cxxabiv117__array_type_infoE";
3048     break;
3049 
3050   case Type::FunctionNoProto:
3051   case Type::FunctionProto:
3052     // abi::__function_type_info.
3053     VTableName = "_ZTVN10__cxxabiv120__function_type_infoE";
3054     break;
3055 
3056   case Type::Enum:
3057     // abi::__enum_type_info.
3058     VTableName = "_ZTVN10__cxxabiv116__enum_type_infoE";
3059     break;
3060 
3061   case Type::Record: {
3062     const CXXRecordDecl *RD =
3063       cast<CXXRecordDecl>(cast<RecordType>(Ty)->getDecl());
3064 
3065     if (!RD->hasDefinition() || !RD->getNumBases()) {
3066       VTableName = ClassTypeInfo;
3067     } else if (CanUseSingleInheritance(RD)) {
3068       VTableName = SIClassTypeInfo;
3069     } else {
3070       VTableName = VMIClassTypeInfo;
3071     }
3072 
3073     break;
3074   }
3075 
3076   case Type::ObjCObject:
3077     // Ignore protocol qualifiers.
3078     Ty = cast<ObjCObjectType>(Ty)->getBaseType().getTypePtr();
3079 
3080     // Handle id and Class.
3081     if (isa<BuiltinType>(Ty)) {
3082       VTableName = ClassTypeInfo;
3083       break;
3084     }
3085 
3086     assert(isa<ObjCInterfaceType>(Ty));
3087     // Fall through.
3088 
3089   case Type::ObjCInterface:
3090     if (cast<ObjCInterfaceType>(Ty)->getDecl()->getSuperClass()) {
3091       VTableName = SIClassTypeInfo;
3092     } else {
3093       VTableName = ClassTypeInfo;
3094     }
3095     break;
3096 
3097   case Type::ObjCObjectPointer:
3098   case Type::Pointer:
3099     // abi::__pointer_type_info.
3100     VTableName = "_ZTVN10__cxxabiv119__pointer_type_infoE";
3101     break;
3102 
3103   case Type::MemberPointer:
3104     // abi::__pointer_to_member_type_info.
3105     VTableName = "_ZTVN10__cxxabiv129__pointer_to_member_type_infoE";
3106     break;
3107   }
3108 
3109   llvm::Constant *VTable =
3110     CGM.getModule().getOrInsertGlobal(VTableName, CGM.Int8PtrTy);
3111   CGM.setDSOLocal(cast<llvm::GlobalValue>(VTable->stripPointerCasts()));
3112 
3113   llvm::Type *PtrDiffTy =
3114     CGM.getTypes().ConvertType(CGM.getContext().getPointerDiffType());
3115 
3116   // The vtable address point is 2.
3117   llvm::Constant *Two = llvm::ConstantInt::get(PtrDiffTy, 2);
3118   VTable =
3119       llvm::ConstantExpr::getInBoundsGetElementPtr(CGM.Int8PtrTy, VTable, Two);
3120   VTable = llvm::ConstantExpr::getBitCast(VTable, CGM.Int8PtrTy);
3121 
3122   Fields.push_back(VTable);
3123 }
3124 
3125 /// Return the linkage that the type info and type info name constants
3126 /// should have for the given type.
getTypeInfoLinkage(CodeGenModule & CGM,QualType Ty)3127 static llvm::GlobalVariable::LinkageTypes getTypeInfoLinkage(CodeGenModule &CGM,
3128                                                              QualType Ty) {
3129   // Itanium C++ ABI 2.9.5p7:
3130   //   In addition, it and all of the intermediate abi::__pointer_type_info
3131   //   structs in the chain down to the abi::__class_type_info for the
3132   //   incomplete class type must be prevented from resolving to the
3133   //   corresponding type_info structs for the complete class type, possibly
3134   //   by making them local static objects. Finally, a dummy class RTTI is
3135   //   generated for the incomplete type that will not resolve to the final
3136   //   complete class RTTI (because the latter need not exist), possibly by
3137   //   making it a local static object.
3138   if (ContainsIncompleteClassType(Ty))
3139     return llvm::GlobalValue::InternalLinkage;
3140 
3141   switch (Ty->getLinkage()) {
3142   case NoLinkage:
3143   case InternalLinkage:
3144   case UniqueExternalLinkage:
3145     return llvm::GlobalValue::InternalLinkage;
3146 
3147   case VisibleNoLinkage:
3148   case ModuleInternalLinkage:
3149   case ModuleLinkage:
3150   case ExternalLinkage:
3151     // RTTI is not enabled, which means that this type info struct is going
3152     // to be used for exception handling. Give it linkonce_odr linkage.
3153     if (!CGM.getLangOpts().RTTI)
3154       return llvm::GlobalValue::LinkOnceODRLinkage;
3155 
3156     if (const RecordType *Record = dyn_cast<RecordType>(Ty)) {
3157       const CXXRecordDecl *RD = cast<CXXRecordDecl>(Record->getDecl());
3158       if (RD->hasAttr<WeakAttr>())
3159         return llvm::GlobalValue::WeakODRLinkage;
3160       if (CGM.getTriple().isWindowsItaniumEnvironment())
3161         if (RD->hasAttr<DLLImportAttr>() &&
3162             ShouldUseExternalRTTIDescriptor(CGM, Ty))
3163           return llvm::GlobalValue::ExternalLinkage;
3164       // MinGW always uses LinkOnceODRLinkage for type info.
3165       if (RD->isDynamicClass() &&
3166           !CGM.getContext()
3167                .getTargetInfo()
3168                .getTriple()
3169                .isWindowsGNUEnvironment())
3170         return CGM.getVTableLinkage(RD);
3171     }
3172 
3173     return llvm::GlobalValue::LinkOnceODRLinkage;
3174   }
3175 
3176   llvm_unreachable("Invalid linkage!");
3177 }
3178 
BuildTypeInfo(QualType Ty)3179 llvm::Constant *ItaniumRTTIBuilder::BuildTypeInfo(QualType Ty) {
3180   // We want to operate on the canonical type.
3181   Ty = Ty.getCanonicalType();
3182 
3183   // Check if we've already emitted an RTTI descriptor for this type.
3184   SmallString<256> Name;
3185   llvm::raw_svector_ostream Out(Name);
3186   CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out);
3187 
3188   llvm::GlobalVariable *OldGV = CGM.getModule().getNamedGlobal(Name);
3189   if (OldGV && !OldGV->isDeclaration()) {
3190     assert(!OldGV->hasAvailableExternallyLinkage() &&
3191            "available_externally typeinfos not yet implemented");
3192 
3193     return llvm::ConstantExpr::getBitCast(OldGV, CGM.Int8PtrTy);
3194   }
3195 
3196   // Check if there is already an external RTTI descriptor for this type.
3197   if (IsStandardLibraryRTTIDescriptor(Ty) ||
3198       ShouldUseExternalRTTIDescriptor(CGM, Ty))
3199     return GetAddrOfExternalRTTIDescriptor(Ty);
3200 
3201   // Emit the standard library with external linkage.
3202   llvm::GlobalVariable::LinkageTypes Linkage = getTypeInfoLinkage(CGM, Ty);
3203 
3204   // Give the type_info object and name the formal visibility of the
3205   // type itself.
3206   llvm::GlobalValue::VisibilityTypes llvmVisibility;
3207   if (llvm::GlobalValue::isLocalLinkage(Linkage))
3208     // If the linkage is local, only default visibility makes sense.
3209     llvmVisibility = llvm::GlobalValue::DefaultVisibility;
3210   else if (CXXABI.classifyRTTIUniqueness(Ty, Linkage) ==
3211            ItaniumCXXABI::RUK_NonUniqueHidden)
3212     llvmVisibility = llvm::GlobalValue::HiddenVisibility;
3213   else
3214     llvmVisibility = CodeGenModule::GetLLVMVisibility(Ty->getVisibility());
3215 
3216   llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass =
3217       llvm::GlobalValue::DefaultStorageClass;
3218   if (CGM.getTriple().isWindowsItaniumEnvironment()) {
3219     auto RD = Ty->getAsCXXRecordDecl();
3220     if (RD && RD->hasAttr<DLLExportAttr>())
3221       DLLStorageClass = llvm::GlobalValue::DLLExportStorageClass;
3222   }
3223 
3224   return BuildTypeInfo(Ty, Linkage, llvmVisibility, DLLStorageClass);
3225 }
3226 
BuildTypeInfo(QualType Ty,llvm::GlobalVariable::LinkageTypes Linkage,llvm::GlobalValue::VisibilityTypes Visibility,llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass)3227 llvm::Constant *ItaniumRTTIBuilder::BuildTypeInfo(
3228       QualType Ty,
3229       llvm::GlobalVariable::LinkageTypes Linkage,
3230       llvm::GlobalValue::VisibilityTypes Visibility,
3231       llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass) {
3232   // Add the vtable pointer.
3233   BuildVTablePointer(cast<Type>(Ty));
3234 
3235   // And the name.
3236   llvm::GlobalVariable *TypeName = GetAddrOfTypeName(Ty, Linkage);
3237   llvm::Constant *TypeNameField;
3238 
3239   // If we're supposed to demote the visibility, be sure to set a flag
3240   // to use a string comparison for type_info comparisons.
3241   ItaniumCXXABI::RTTIUniquenessKind RTTIUniqueness =
3242       CXXABI.classifyRTTIUniqueness(Ty, Linkage);
3243   if (RTTIUniqueness != ItaniumCXXABI::RUK_Unique) {
3244     // The flag is the sign bit, which on ARM64 is defined to be clear
3245     // for global pointers.  This is very ARM64-specific.
3246     TypeNameField = llvm::ConstantExpr::getPtrToInt(TypeName, CGM.Int64Ty);
3247     llvm::Constant *flag =
3248         llvm::ConstantInt::get(CGM.Int64Ty, ((uint64_t)1) << 63);
3249     TypeNameField = llvm::ConstantExpr::getAdd(TypeNameField, flag);
3250     TypeNameField =
3251         llvm::ConstantExpr::getIntToPtr(TypeNameField, CGM.Int8PtrTy);
3252   } else {
3253     TypeNameField = llvm::ConstantExpr::getBitCast(TypeName, CGM.Int8PtrTy);
3254   }
3255   Fields.push_back(TypeNameField);
3256 
3257   switch (Ty->getTypeClass()) {
3258 #define TYPE(Class, Base)
3259 #define ABSTRACT_TYPE(Class, Base)
3260 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
3261 #define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
3262 #define DEPENDENT_TYPE(Class, Base) case Type::Class:
3263 #include "clang/AST/TypeNodes.def"
3264     llvm_unreachable("Non-canonical and dependent types shouldn't get here");
3265 
3266   // GCC treats vector types as fundamental types.
3267   case Type::Builtin:
3268   case Type::Vector:
3269   case Type::ExtVector:
3270   case Type::Complex:
3271   case Type::BlockPointer:
3272     // Itanium C++ ABI 2.9.5p4:
3273     // abi::__fundamental_type_info adds no data members to std::type_info.
3274     break;
3275 
3276   case Type::LValueReference:
3277   case Type::RValueReference:
3278     llvm_unreachable("References shouldn't get here");
3279 
3280   case Type::Auto:
3281   case Type::DeducedTemplateSpecialization:
3282     llvm_unreachable("Undeduced type shouldn't get here");
3283 
3284   case Type::Pipe:
3285     llvm_unreachable("Pipe type shouldn't get here");
3286 
3287   case Type::ConstantArray:
3288   case Type::IncompleteArray:
3289   case Type::VariableArray:
3290     // Itanium C++ ABI 2.9.5p5:
3291     // abi::__array_type_info adds no data members to std::type_info.
3292     break;
3293 
3294   case Type::FunctionNoProto:
3295   case Type::FunctionProto:
3296     // Itanium C++ ABI 2.9.5p5:
3297     // abi::__function_type_info adds no data members to std::type_info.
3298     break;
3299 
3300   case Type::Enum:
3301     // Itanium C++ ABI 2.9.5p5:
3302     // abi::__enum_type_info adds no data members to std::type_info.
3303     break;
3304 
3305   case Type::Record: {
3306     const CXXRecordDecl *RD =
3307       cast<CXXRecordDecl>(cast<RecordType>(Ty)->getDecl());
3308     if (!RD->hasDefinition() || !RD->getNumBases()) {
3309       // We don't need to emit any fields.
3310       break;
3311     }
3312 
3313     if (CanUseSingleInheritance(RD))
3314       BuildSIClassTypeInfo(RD);
3315     else
3316       BuildVMIClassTypeInfo(RD);
3317 
3318     break;
3319   }
3320 
3321   case Type::ObjCObject:
3322   case Type::ObjCInterface:
3323     BuildObjCObjectTypeInfo(cast<ObjCObjectType>(Ty));
3324     break;
3325 
3326   case Type::ObjCObjectPointer:
3327     BuildPointerTypeInfo(cast<ObjCObjectPointerType>(Ty)->getPointeeType());
3328     break;
3329 
3330   case Type::Pointer:
3331     BuildPointerTypeInfo(cast<PointerType>(Ty)->getPointeeType());
3332     break;
3333 
3334   case Type::MemberPointer:
3335     BuildPointerToMemberTypeInfo(cast<MemberPointerType>(Ty));
3336     break;
3337 
3338   case Type::Atomic:
3339     // No fields, at least for the moment.
3340     break;
3341   }
3342 
3343   llvm::Constant *Init = llvm::ConstantStruct::getAnon(Fields);
3344 
3345   SmallString<256> Name;
3346   llvm::raw_svector_ostream Out(Name);
3347   CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out);
3348   llvm::Module &M = CGM.getModule();
3349   llvm::GlobalVariable *OldGV = M.getNamedGlobal(Name);
3350   llvm::GlobalVariable *GV =
3351       new llvm::GlobalVariable(M, Init->getType(),
3352                                /*Constant=*/true, Linkage, Init, Name);
3353 
3354   // If there's already an old global variable, replace it with the new one.
3355   if (OldGV) {
3356     GV->takeName(OldGV);
3357     llvm::Constant *NewPtr =
3358       llvm::ConstantExpr::getBitCast(GV, OldGV->getType());
3359     OldGV->replaceAllUsesWith(NewPtr);
3360     OldGV->eraseFromParent();
3361   }
3362 
3363   if (CGM.supportsCOMDAT() && GV->isWeakForLinker())
3364     GV->setComdat(M.getOrInsertComdat(GV->getName()));
3365 
3366   // The Itanium ABI specifies that type_info objects must be globally
3367   // unique, with one exception: if the type is an incomplete class
3368   // type or a (possibly indirect) pointer to one.  That exception
3369   // affects the general case of comparing type_info objects produced
3370   // by the typeid operator, which is why the comparison operators on
3371   // std::type_info generally use the type_info name pointers instead
3372   // of the object addresses.  However, the language's built-in uses
3373   // of RTTI generally require class types to be complete, even when
3374   // manipulating pointers to those class types.  This allows the
3375   // implementation of dynamic_cast to rely on address equality tests,
3376   // which is much faster.
3377 
3378   // All of this is to say that it's important that both the type_info
3379   // object and the type_info name be uniqued when weakly emitted.
3380 
3381   TypeName->setVisibility(Visibility);
3382   CGM.setDSOLocal(TypeName);
3383 
3384   GV->setVisibility(Visibility);
3385   CGM.setDSOLocal(GV);
3386 
3387   TypeName->setDLLStorageClass(DLLStorageClass);
3388   GV->setDLLStorageClass(DLLStorageClass);
3389 
3390   return llvm::ConstantExpr::getBitCast(GV, CGM.Int8PtrTy);
3391 }
3392 
3393 /// BuildObjCObjectTypeInfo - Build the appropriate kind of type_info
3394 /// for the given Objective-C object type.
BuildObjCObjectTypeInfo(const ObjCObjectType * OT)3395 void ItaniumRTTIBuilder::BuildObjCObjectTypeInfo(const ObjCObjectType *OT) {
3396   // Drop qualifiers.
3397   const Type *T = OT->getBaseType().getTypePtr();
3398   assert(isa<BuiltinType>(T) || isa<ObjCInterfaceType>(T));
3399 
3400   // The builtin types are abi::__class_type_infos and don't require
3401   // extra fields.
3402   if (isa<BuiltinType>(T)) return;
3403 
3404   ObjCInterfaceDecl *Class = cast<ObjCInterfaceType>(T)->getDecl();
3405   ObjCInterfaceDecl *Super = Class->getSuperClass();
3406 
3407   // Root classes are also __class_type_info.
3408   if (!Super) return;
3409 
3410   QualType SuperTy = CGM.getContext().getObjCInterfaceType(Super);
3411 
3412   // Everything else is single inheritance.
3413   llvm::Constant *BaseTypeInfo =
3414       ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(SuperTy);
3415   Fields.push_back(BaseTypeInfo);
3416 }
3417 
3418 /// BuildSIClassTypeInfo - Build an abi::__si_class_type_info, used for single
3419 /// inheritance, according to the Itanium C++ ABI, 2.95p6b.
BuildSIClassTypeInfo(const CXXRecordDecl * RD)3420 void ItaniumRTTIBuilder::BuildSIClassTypeInfo(const CXXRecordDecl *RD) {
3421   // Itanium C++ ABI 2.9.5p6b:
3422   // It adds to abi::__class_type_info a single member pointing to the
3423   // type_info structure for the base type,
3424   llvm::Constant *BaseTypeInfo =
3425     ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(RD->bases_begin()->getType());
3426   Fields.push_back(BaseTypeInfo);
3427 }
3428 
3429 namespace {
3430   /// SeenBases - Contains virtual and non-virtual bases seen when traversing
3431   /// a class hierarchy.
3432   struct SeenBases {
3433     llvm::SmallPtrSet<const CXXRecordDecl *, 16> NonVirtualBases;
3434     llvm::SmallPtrSet<const CXXRecordDecl *, 16> VirtualBases;
3435   };
3436 }
3437 
3438 /// ComputeVMIClassTypeInfoFlags - Compute the value of the flags member in
3439 /// abi::__vmi_class_type_info.
3440 ///
ComputeVMIClassTypeInfoFlags(const CXXBaseSpecifier * Base,SeenBases & Bases)3441 static unsigned ComputeVMIClassTypeInfoFlags(const CXXBaseSpecifier *Base,
3442                                              SeenBases &Bases) {
3443 
3444   unsigned Flags = 0;
3445 
3446   const CXXRecordDecl *BaseDecl =
3447     cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl());
3448 
3449   if (Base->isVirtual()) {
3450     // Mark the virtual base as seen.
3451     if (!Bases.VirtualBases.insert(BaseDecl).second) {
3452       // If this virtual base has been seen before, then the class is diamond
3453       // shaped.
3454       Flags |= ItaniumRTTIBuilder::VMI_DiamondShaped;
3455     } else {
3456       if (Bases.NonVirtualBases.count(BaseDecl))
3457         Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat;
3458     }
3459   } else {
3460     // Mark the non-virtual base as seen.
3461     if (!Bases.NonVirtualBases.insert(BaseDecl).second) {
3462       // If this non-virtual base has been seen before, then the class has non-
3463       // diamond shaped repeated inheritance.
3464       Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat;
3465     } else {
3466       if (Bases.VirtualBases.count(BaseDecl))
3467         Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat;
3468     }
3469   }
3470 
3471   // Walk all bases.
3472   for (const auto &I : BaseDecl->bases())
3473     Flags |= ComputeVMIClassTypeInfoFlags(&I, Bases);
3474 
3475   return Flags;
3476 }
3477 
ComputeVMIClassTypeInfoFlags(const CXXRecordDecl * RD)3478 static unsigned ComputeVMIClassTypeInfoFlags(const CXXRecordDecl *RD) {
3479   unsigned Flags = 0;
3480   SeenBases Bases;
3481 
3482   // Walk all bases.
3483   for (const auto &I : RD->bases())
3484     Flags |= ComputeVMIClassTypeInfoFlags(&I, Bases);
3485 
3486   return Flags;
3487 }
3488 
3489 /// BuildVMIClassTypeInfo - Build an abi::__vmi_class_type_info, used for
3490 /// classes with bases that do not satisfy the abi::__si_class_type_info
3491 /// constraints, according ti the Itanium C++ ABI, 2.9.5p5c.
BuildVMIClassTypeInfo(const CXXRecordDecl * RD)3492 void ItaniumRTTIBuilder::BuildVMIClassTypeInfo(const CXXRecordDecl *RD) {
3493   llvm::Type *UnsignedIntLTy =
3494     CGM.getTypes().ConvertType(CGM.getContext().UnsignedIntTy);
3495 
3496   // Itanium C++ ABI 2.9.5p6c:
3497   //   __flags is a word with flags describing details about the class
3498   //   structure, which may be referenced by using the __flags_masks
3499   //   enumeration. These flags refer to both direct and indirect bases.
3500   unsigned Flags = ComputeVMIClassTypeInfoFlags(RD);
3501   Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags));
3502 
3503   // Itanium C++ ABI 2.9.5p6c:
3504   //   __base_count is a word with the number of direct proper base class
3505   //   descriptions that follow.
3506   Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, RD->getNumBases()));
3507 
3508   if (!RD->getNumBases())
3509     return;
3510 
3511   // Now add the base class descriptions.
3512 
3513   // Itanium C++ ABI 2.9.5p6c:
3514   //   __base_info[] is an array of base class descriptions -- one for every
3515   //   direct proper base. Each description is of the type:
3516   //
3517   //   struct abi::__base_class_type_info {
3518   //   public:
3519   //     const __class_type_info *__base_type;
3520   //     long __offset_flags;
3521   //
3522   //     enum __offset_flags_masks {
3523   //       __virtual_mask = 0x1,
3524   //       __public_mask = 0x2,
3525   //       __offset_shift = 8
3526   //     };
3527   //   };
3528 
3529   // If we're in mingw and 'long' isn't wide enough for a pointer, use 'long
3530   // long' instead of 'long' for __offset_flags. libstdc++abi uses long long on
3531   // LLP64 platforms.
3532   // FIXME: Consider updating libc++abi to match, and extend this logic to all
3533   // LLP64 platforms.
3534   QualType OffsetFlagsTy = CGM.getContext().LongTy;
3535   const TargetInfo &TI = CGM.getContext().getTargetInfo();
3536   if (TI.getTriple().isOSCygMing() && TI.getPointerWidth(0) > TI.getLongWidth())
3537     OffsetFlagsTy = CGM.getContext().LongLongTy;
3538   llvm::Type *OffsetFlagsLTy =
3539       CGM.getTypes().ConvertType(OffsetFlagsTy);
3540 
3541   for (const auto &Base : RD->bases()) {
3542     // The __base_type member points to the RTTI for the base type.
3543     Fields.push_back(ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(Base.getType()));
3544 
3545     const CXXRecordDecl *BaseDecl =
3546       cast<CXXRecordDecl>(Base.getType()->getAs<RecordType>()->getDecl());
3547 
3548     int64_t OffsetFlags = 0;
3549 
3550     // All but the lower 8 bits of __offset_flags are a signed offset.
3551     // For a non-virtual base, this is the offset in the object of the base
3552     // subobject. For a virtual base, this is the offset in the virtual table of
3553     // the virtual base offset for the virtual base referenced (negative).
3554     CharUnits Offset;
3555     if (Base.isVirtual())
3556       Offset =
3557         CGM.getItaniumVTableContext().getVirtualBaseOffsetOffset(RD, BaseDecl);
3558     else {
3559       const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
3560       Offset = Layout.getBaseClassOffset(BaseDecl);
3561     };
3562 
3563     OffsetFlags = uint64_t(Offset.getQuantity()) << 8;
3564 
3565     // The low-order byte of __offset_flags contains flags, as given by the
3566     // masks from the enumeration __offset_flags_masks.
3567     if (Base.isVirtual())
3568       OffsetFlags |= BCTI_Virtual;
3569     if (Base.getAccessSpecifier() == AS_public)
3570       OffsetFlags |= BCTI_Public;
3571 
3572     Fields.push_back(llvm::ConstantInt::get(OffsetFlagsLTy, OffsetFlags));
3573   }
3574 }
3575 
3576 /// Compute the flags for a __pbase_type_info, and remove the corresponding
3577 /// pieces from \p Type.
extractPBaseFlags(ASTContext & Ctx,QualType & Type)3578 static unsigned extractPBaseFlags(ASTContext &Ctx, QualType &Type) {
3579   unsigned Flags = 0;
3580 
3581   if (Type.isConstQualified())
3582     Flags |= ItaniumRTTIBuilder::PTI_Const;
3583   if (Type.isVolatileQualified())
3584     Flags |= ItaniumRTTIBuilder::PTI_Volatile;
3585   if (Type.isRestrictQualified())
3586     Flags |= ItaniumRTTIBuilder::PTI_Restrict;
3587   Type = Type.getUnqualifiedType();
3588 
3589   // Itanium C++ ABI 2.9.5p7:
3590   //   When the abi::__pbase_type_info is for a direct or indirect pointer to an
3591   //   incomplete class type, the incomplete target type flag is set.
3592   if (ContainsIncompleteClassType(Type))
3593     Flags |= ItaniumRTTIBuilder::PTI_Incomplete;
3594 
3595   if (auto *Proto = Type->getAs<FunctionProtoType>()) {
3596     if (Proto->isNothrow()) {
3597       Flags |= ItaniumRTTIBuilder::PTI_Noexcept;
3598       Type = Ctx.getFunctionTypeWithExceptionSpec(Type, EST_None);
3599     }
3600   }
3601 
3602   return Flags;
3603 }
3604 
3605 /// BuildPointerTypeInfo - Build an abi::__pointer_type_info struct,
3606 /// used for pointer types.
BuildPointerTypeInfo(QualType PointeeTy)3607 void ItaniumRTTIBuilder::BuildPointerTypeInfo(QualType PointeeTy) {
3608   // Itanium C++ ABI 2.9.5p7:
3609   //   __flags is a flag word describing the cv-qualification and other
3610   //   attributes of the type pointed to
3611   unsigned Flags = extractPBaseFlags(CGM.getContext(), PointeeTy);
3612 
3613   llvm::Type *UnsignedIntLTy =
3614     CGM.getTypes().ConvertType(CGM.getContext().UnsignedIntTy);
3615   Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags));
3616 
3617   // Itanium C++ ABI 2.9.5p7:
3618   //  __pointee is a pointer to the std::type_info derivation for the
3619   //  unqualified type being pointed to.
3620   llvm::Constant *PointeeTypeInfo =
3621       ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(PointeeTy);
3622   Fields.push_back(PointeeTypeInfo);
3623 }
3624 
3625 /// BuildPointerToMemberTypeInfo - Build an abi::__pointer_to_member_type_info
3626 /// struct, used for member pointer types.
3627 void
BuildPointerToMemberTypeInfo(const MemberPointerType * Ty)3628 ItaniumRTTIBuilder::BuildPointerToMemberTypeInfo(const MemberPointerType *Ty) {
3629   QualType PointeeTy = Ty->getPointeeType();
3630 
3631   // Itanium C++ ABI 2.9.5p7:
3632   //   __flags is a flag word describing the cv-qualification and other
3633   //   attributes of the type pointed to.
3634   unsigned Flags = extractPBaseFlags(CGM.getContext(), PointeeTy);
3635 
3636   const RecordType *ClassType = cast<RecordType>(Ty->getClass());
3637   if (IsIncompleteClassType(ClassType))
3638     Flags |= PTI_ContainingClassIncomplete;
3639 
3640   llvm::Type *UnsignedIntLTy =
3641     CGM.getTypes().ConvertType(CGM.getContext().UnsignedIntTy);
3642   Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags));
3643 
3644   // Itanium C++ ABI 2.9.5p7:
3645   //   __pointee is a pointer to the std::type_info derivation for the
3646   //   unqualified type being pointed to.
3647   llvm::Constant *PointeeTypeInfo =
3648       ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(PointeeTy);
3649   Fields.push_back(PointeeTypeInfo);
3650 
3651   // Itanium C++ ABI 2.9.5p9:
3652   //   __context is a pointer to an abi::__class_type_info corresponding to the
3653   //   class type containing the member pointed to
3654   //   (e.g., the "A" in "int A::*").
3655   Fields.push_back(
3656       ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(QualType(ClassType, 0)));
3657 }
3658 
getAddrOfRTTIDescriptor(QualType Ty)3659 llvm::Constant *ItaniumCXXABI::getAddrOfRTTIDescriptor(QualType Ty) {
3660   return ItaniumRTTIBuilder(*this).BuildTypeInfo(Ty);
3661 }
3662 
EmitFundamentalRTTIDescriptors(const CXXRecordDecl * RD)3663 void ItaniumCXXABI::EmitFundamentalRTTIDescriptors(const CXXRecordDecl *RD) {
3664   // Types added here must also be added to TypeInfoIsInStandardLibrary.
3665   QualType FundamentalTypes[] = {
3666       getContext().VoidTy,             getContext().NullPtrTy,
3667       getContext().BoolTy,             getContext().WCharTy,
3668       getContext().CharTy,             getContext().UnsignedCharTy,
3669       getContext().SignedCharTy,       getContext().ShortTy,
3670       getContext().UnsignedShortTy,    getContext().IntTy,
3671       getContext().UnsignedIntTy,      getContext().LongTy,
3672       getContext().UnsignedLongTy,     getContext().LongLongTy,
3673       getContext().UnsignedLongLongTy, getContext().Int128Ty,
3674       getContext().UnsignedInt128Ty,   getContext().HalfTy,
3675       getContext().FloatTy,            getContext().DoubleTy,
3676       getContext().LongDoubleTy,       getContext().Float128Ty,
3677       getContext().Char8Ty,            getContext().Char16Ty,
3678       getContext().Char32Ty
3679   };
3680   llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass =
3681       RD->hasAttr<DLLExportAttr>()
3682       ? llvm::GlobalValue::DLLExportStorageClass
3683       : llvm::GlobalValue::DefaultStorageClass;
3684   llvm::GlobalValue::VisibilityTypes Visibility =
3685       CodeGenModule::GetLLVMVisibility(RD->getVisibility());
3686   for (const QualType &FundamentalType : FundamentalTypes) {
3687     QualType PointerType = getContext().getPointerType(FundamentalType);
3688     QualType PointerTypeConst = getContext().getPointerType(
3689         FundamentalType.withConst());
3690     for (QualType Type : {FundamentalType, PointerType, PointerTypeConst})
3691       ItaniumRTTIBuilder(*this).BuildTypeInfo(
3692           Type, llvm::GlobalValue::ExternalLinkage,
3693           Visibility, DLLStorageClass);
3694   }
3695 }
3696 
3697 /// What sort of uniqueness rules should we use for the RTTI for the
3698 /// given type?
classifyRTTIUniqueness(QualType CanTy,llvm::GlobalValue::LinkageTypes Linkage) const3699 ItaniumCXXABI::RTTIUniquenessKind ItaniumCXXABI::classifyRTTIUniqueness(
3700     QualType CanTy, llvm::GlobalValue::LinkageTypes Linkage) const {
3701   if (shouldRTTIBeUnique())
3702     return RUK_Unique;
3703 
3704   // It's only necessary for linkonce_odr or weak_odr linkage.
3705   if (Linkage != llvm::GlobalValue::LinkOnceODRLinkage &&
3706       Linkage != llvm::GlobalValue::WeakODRLinkage)
3707     return RUK_Unique;
3708 
3709   // It's only necessary with default visibility.
3710   if (CanTy->getVisibility() != DefaultVisibility)
3711     return RUK_Unique;
3712 
3713   // If we're not required to publish this symbol, hide it.
3714   if (Linkage == llvm::GlobalValue::LinkOnceODRLinkage)
3715     return RUK_NonUniqueHidden;
3716 
3717   // If we're required to publish this symbol, as we might be under an
3718   // explicit instantiation, leave it with default visibility but
3719   // enable string-comparisons.
3720   assert(Linkage == llvm::GlobalValue::WeakODRLinkage);
3721   return RUK_NonUniqueVisible;
3722 }
3723 
3724 // Find out how to codegen the complete destructor and constructor
3725 namespace {
3726 enum class StructorCodegen { Emit, RAUW, Alias, COMDAT };
3727 }
getCodegenToUse(CodeGenModule & CGM,const CXXMethodDecl * MD)3728 static StructorCodegen getCodegenToUse(CodeGenModule &CGM,
3729                                        const CXXMethodDecl *MD) {
3730   if (!CGM.getCodeGenOpts().CXXCtorDtorAliases)
3731     return StructorCodegen::Emit;
3732 
3733   // The complete and base structors are not equivalent if there are any virtual
3734   // bases, so emit separate functions.
3735   if (MD->getParent()->getNumVBases())
3736     return StructorCodegen::Emit;
3737 
3738   GlobalDecl AliasDecl;
3739   if (const auto *DD = dyn_cast<CXXDestructorDecl>(MD)) {
3740     AliasDecl = GlobalDecl(DD, Dtor_Complete);
3741   } else {
3742     const auto *CD = cast<CXXConstructorDecl>(MD);
3743     AliasDecl = GlobalDecl(CD, Ctor_Complete);
3744   }
3745   llvm::GlobalValue::LinkageTypes Linkage = CGM.getFunctionLinkage(AliasDecl);
3746 
3747   if (llvm::GlobalValue::isDiscardableIfUnused(Linkage))
3748     return StructorCodegen::RAUW;
3749 
3750   // FIXME: Should we allow available_externally aliases?
3751   if (!llvm::GlobalAlias::isValidLinkage(Linkage))
3752     return StructorCodegen::RAUW;
3753 
3754   if (llvm::GlobalValue::isWeakForLinker(Linkage)) {
3755     // Only ELF and wasm support COMDATs with arbitrary names (C5/D5).
3756     if (CGM.getTarget().getTriple().isOSBinFormatELF() ||
3757         CGM.getTarget().getTriple().isOSBinFormatWasm())
3758       return StructorCodegen::COMDAT;
3759     return StructorCodegen::Emit;
3760   }
3761 
3762   return StructorCodegen::Alias;
3763 }
3764 
emitConstructorDestructorAlias(CodeGenModule & CGM,GlobalDecl AliasDecl,GlobalDecl TargetDecl)3765 static void emitConstructorDestructorAlias(CodeGenModule &CGM,
3766                                            GlobalDecl AliasDecl,
3767                                            GlobalDecl TargetDecl) {
3768   llvm::GlobalValue::LinkageTypes Linkage = CGM.getFunctionLinkage(AliasDecl);
3769 
3770   StringRef MangledName = CGM.getMangledName(AliasDecl);
3771   llvm::GlobalValue *Entry = CGM.GetGlobalValue(MangledName);
3772   if (Entry && !Entry->isDeclaration())
3773     return;
3774 
3775   auto *Aliasee = cast<llvm::GlobalValue>(CGM.GetAddrOfGlobal(TargetDecl));
3776 
3777   // Create the alias with no name.
3778   auto *Alias = llvm::GlobalAlias::create(Linkage, "", Aliasee);
3779 
3780   // Constructors and destructors are always unnamed_addr.
3781   Alias->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
3782 
3783   // Switch any previous uses to the alias.
3784   if (Entry) {
3785     assert(Entry->getType() == Aliasee->getType() &&
3786            "declaration exists with different type");
3787     Alias->takeName(Entry);
3788     Entry->replaceAllUsesWith(Alias);
3789     Entry->eraseFromParent();
3790   } else {
3791     Alias->setName(MangledName);
3792   }
3793 
3794   // Finally, set up the alias with its proper name and attributes.
3795   CGM.SetCommonAttributes(AliasDecl, Alias);
3796 }
3797 
emitCXXStructor(const CXXMethodDecl * MD,StructorType Type)3798 void ItaniumCXXABI::emitCXXStructor(const CXXMethodDecl *MD,
3799                                     StructorType Type) {
3800   auto *CD = dyn_cast<CXXConstructorDecl>(MD);
3801   const CXXDestructorDecl *DD = CD ? nullptr : cast<CXXDestructorDecl>(MD);
3802 
3803   StructorCodegen CGType = getCodegenToUse(CGM, MD);
3804 
3805   if (Type == StructorType::Complete) {
3806     GlobalDecl CompleteDecl;
3807     GlobalDecl BaseDecl;
3808     if (CD) {
3809       CompleteDecl = GlobalDecl(CD, Ctor_Complete);
3810       BaseDecl = GlobalDecl(CD, Ctor_Base);
3811     } else {
3812       CompleteDecl = GlobalDecl(DD, Dtor_Complete);
3813       BaseDecl = GlobalDecl(DD, Dtor_Base);
3814     }
3815 
3816     if (CGType == StructorCodegen::Alias || CGType == StructorCodegen::COMDAT) {
3817       emitConstructorDestructorAlias(CGM, CompleteDecl, BaseDecl);
3818       return;
3819     }
3820 
3821     if (CGType == StructorCodegen::RAUW) {
3822       StringRef MangledName = CGM.getMangledName(CompleteDecl);
3823       auto *Aliasee = CGM.GetAddrOfGlobal(BaseDecl);
3824       CGM.addReplacement(MangledName, Aliasee);
3825       return;
3826     }
3827   }
3828 
3829   // The base destructor is equivalent to the base destructor of its
3830   // base class if there is exactly one non-virtual base class with a
3831   // non-trivial destructor, there are no fields with a non-trivial
3832   // destructor, and the body of the destructor is trivial.
3833   if (DD && Type == StructorType::Base && CGType != StructorCodegen::COMDAT &&
3834       !CGM.TryEmitBaseDestructorAsAlias(DD))
3835     return;
3836 
3837   // FIXME: The deleting destructor is equivalent to the selected operator
3838   // delete if:
3839   //  * either the delete is a destroying operator delete or the destructor
3840   //    would be trivial if it weren't virtual,
3841   //  * the conversion from the 'this' parameter to the first parameter of the
3842   //    destructor is equivalent to a bitcast,
3843   //  * the destructor does not have an implicit "this" return, and
3844   //  * the operator delete has the same calling convention and IR function type
3845   //    as the destructor.
3846   // In such cases we should try to emit the deleting dtor as an alias to the
3847   // selected 'operator delete'.
3848 
3849   llvm::Function *Fn = CGM.codegenCXXStructor(MD, Type);
3850 
3851   if (CGType == StructorCodegen::COMDAT) {
3852     SmallString<256> Buffer;
3853     llvm::raw_svector_ostream Out(Buffer);
3854     if (DD)
3855       getMangleContext().mangleCXXDtorComdat(DD, Out);
3856     else
3857       getMangleContext().mangleCXXCtorComdat(CD, Out);
3858     llvm::Comdat *C = CGM.getModule().getOrInsertComdat(Out.str());
3859     Fn->setComdat(C);
3860   } else {
3861     CGM.maybeSetTrivialComdat(*MD, *Fn);
3862   }
3863 }
3864 
getBeginCatchFn(CodeGenModule & CGM)3865 static llvm::Constant *getBeginCatchFn(CodeGenModule &CGM) {
3866   // void *__cxa_begin_catch(void*);
3867   llvm::FunctionType *FTy = llvm::FunctionType::get(
3868       CGM.Int8PtrTy, CGM.Int8PtrTy, /*IsVarArgs=*/false);
3869 
3870   return CGM.CreateRuntimeFunction(FTy, "__cxa_begin_catch");
3871 }
3872 
getEndCatchFn(CodeGenModule & CGM)3873 static llvm::Constant *getEndCatchFn(CodeGenModule &CGM) {
3874   // void __cxa_end_catch();
3875   llvm::FunctionType *FTy =
3876       llvm::FunctionType::get(CGM.VoidTy, /*IsVarArgs=*/false);
3877 
3878   return CGM.CreateRuntimeFunction(FTy, "__cxa_end_catch");
3879 }
3880 
getGetExceptionPtrFn(CodeGenModule & CGM)3881 static llvm::Constant *getGetExceptionPtrFn(CodeGenModule &CGM) {
3882   // void *__cxa_get_exception_ptr(void*);
3883   llvm::FunctionType *FTy = llvm::FunctionType::get(
3884       CGM.Int8PtrTy, CGM.Int8PtrTy, /*IsVarArgs=*/false);
3885 
3886   return CGM.CreateRuntimeFunction(FTy, "__cxa_get_exception_ptr");
3887 }
3888 
3889 namespace {
3890   /// A cleanup to call __cxa_end_catch.  In many cases, the caught
3891   /// exception type lets us state definitively that the thrown exception
3892   /// type does not have a destructor.  In particular:
3893   ///   - Catch-alls tell us nothing, so we have to conservatively
3894   ///     assume that the thrown exception might have a destructor.
3895   ///   - Catches by reference behave according to their base types.
3896   ///   - Catches of non-record types will only trigger for exceptions
3897   ///     of non-record types, which never have destructors.
3898   ///   - Catches of record types can trigger for arbitrary subclasses
3899   ///     of the caught type, so we have to assume the actual thrown
3900   ///     exception type might have a throwing destructor, even if the
3901   ///     caught type's destructor is trivial or nothrow.
3902   struct CallEndCatch final : EHScopeStack::Cleanup {
CallEndCatch__anon90d96f400911::CallEndCatch3903     CallEndCatch(bool MightThrow) : MightThrow(MightThrow) {}
3904     bool MightThrow;
3905 
Emit__anon90d96f400911::CallEndCatch3906     void Emit(CodeGenFunction &CGF, Flags flags) override {
3907       if (!MightThrow) {
3908         CGF.EmitNounwindRuntimeCall(getEndCatchFn(CGF.CGM));
3909         return;
3910       }
3911 
3912       CGF.EmitRuntimeCallOrInvoke(getEndCatchFn(CGF.CGM));
3913     }
3914   };
3915 }
3916 
3917 /// Emits a call to __cxa_begin_catch and enters a cleanup to call
3918 /// __cxa_end_catch.
3919 ///
3920 /// \param EndMightThrow - true if __cxa_end_catch might throw
CallBeginCatch(CodeGenFunction & CGF,llvm::Value * Exn,bool EndMightThrow)3921 static llvm::Value *CallBeginCatch(CodeGenFunction &CGF,
3922                                    llvm::Value *Exn,
3923                                    bool EndMightThrow) {
3924   llvm::CallInst *call =
3925     CGF.EmitNounwindRuntimeCall(getBeginCatchFn(CGF.CGM), Exn);
3926 
3927   CGF.EHStack.pushCleanup<CallEndCatch>(NormalAndEHCleanup, EndMightThrow);
3928 
3929   return call;
3930 }
3931 
3932 /// A "special initializer" callback for initializing a catch
3933 /// parameter during catch initialization.
InitCatchParam(CodeGenFunction & CGF,const VarDecl & CatchParam,Address ParamAddr,SourceLocation Loc)3934 static void InitCatchParam(CodeGenFunction &CGF,
3935                            const VarDecl &CatchParam,
3936                            Address ParamAddr,
3937                            SourceLocation Loc) {
3938   // Load the exception from where the landing pad saved it.
3939   llvm::Value *Exn = CGF.getExceptionFromSlot();
3940 
3941   CanQualType CatchType =
3942     CGF.CGM.getContext().getCanonicalType(CatchParam.getType());
3943   llvm::Type *LLVMCatchTy = CGF.ConvertTypeForMem(CatchType);
3944 
3945   // If we're catching by reference, we can just cast the object
3946   // pointer to the appropriate pointer.
3947   if (isa<ReferenceType>(CatchType)) {
3948     QualType CaughtType = cast<ReferenceType>(CatchType)->getPointeeType();
3949     bool EndCatchMightThrow = CaughtType->isRecordType();
3950 
3951     // __cxa_begin_catch returns the adjusted object pointer.
3952     llvm::Value *AdjustedExn = CallBeginCatch(CGF, Exn, EndCatchMightThrow);
3953 
3954     // We have no way to tell the personality function that we're
3955     // catching by reference, so if we're catching a pointer,
3956     // __cxa_begin_catch will actually return that pointer by value.
3957     if (const PointerType *PT = dyn_cast<PointerType>(CaughtType)) {
3958       QualType PointeeType = PT->getPointeeType();
3959 
3960       // When catching by reference, generally we should just ignore
3961       // this by-value pointer and use the exception object instead.
3962       if (!PointeeType->isRecordType()) {
3963 
3964         // Exn points to the struct _Unwind_Exception header, which
3965         // we have to skip past in order to reach the exception data.
3966         unsigned HeaderSize =
3967           CGF.CGM.getTargetCodeGenInfo().getSizeOfUnwindException();
3968         AdjustedExn = CGF.Builder.CreateConstGEP1_32(Exn, HeaderSize);
3969 
3970       // However, if we're catching a pointer-to-record type that won't
3971       // work, because the personality function might have adjusted
3972       // the pointer.  There's actually no way for us to fully satisfy
3973       // the language/ABI contract here:  we can't use Exn because it
3974       // might have the wrong adjustment, but we can't use the by-value
3975       // pointer because it's off by a level of abstraction.
3976       //
3977       // The current solution is to dump the adjusted pointer into an
3978       // alloca, which breaks language semantics (because changing the
3979       // pointer doesn't change the exception) but at least works.
3980       // The better solution would be to filter out non-exact matches
3981       // and rethrow them, but this is tricky because the rethrow
3982       // really needs to be catchable by other sites at this landing
3983       // pad.  The best solution is to fix the personality function.
3984       } else {
3985         // Pull the pointer for the reference type off.
3986         llvm::Type *PtrTy =
3987           cast<llvm::PointerType>(LLVMCatchTy)->getElementType();
3988 
3989         // Create the temporary and write the adjusted pointer into it.
3990         Address ExnPtrTmp =
3991           CGF.CreateTempAlloca(PtrTy, CGF.getPointerAlign(), "exn.byref.tmp");
3992         llvm::Value *Casted = CGF.Builder.CreateBitCast(AdjustedExn, PtrTy);
3993         CGF.Builder.CreateStore(Casted, ExnPtrTmp);
3994 
3995         // Bind the reference to the temporary.
3996         AdjustedExn = ExnPtrTmp.getPointer();
3997       }
3998     }
3999 
4000     llvm::Value *ExnCast =
4001       CGF.Builder.CreateBitCast(AdjustedExn, LLVMCatchTy, "exn.byref");
4002     CGF.Builder.CreateStore(ExnCast, ParamAddr);
4003     return;
4004   }
4005 
4006   // Scalars and complexes.
4007   TypeEvaluationKind TEK = CGF.getEvaluationKind(CatchType);
4008   if (TEK != TEK_Aggregate) {
4009     llvm::Value *AdjustedExn = CallBeginCatch(CGF, Exn, false);
4010 
4011     // If the catch type is a pointer type, __cxa_begin_catch returns
4012     // the pointer by value.
4013     if (CatchType->hasPointerRepresentation()) {
4014       llvm::Value *CastExn =
4015         CGF.Builder.CreateBitCast(AdjustedExn, LLVMCatchTy, "exn.casted");
4016 
4017       switch (CatchType.getQualifiers().getObjCLifetime()) {
4018       case Qualifiers::OCL_Strong:
4019         CastExn = CGF.EmitARCRetainNonBlock(CastExn);
4020         // fallthrough
4021 
4022       case Qualifiers::OCL_None:
4023       case Qualifiers::OCL_ExplicitNone:
4024       case Qualifiers::OCL_Autoreleasing:
4025         CGF.Builder.CreateStore(CastExn, ParamAddr);
4026         return;
4027 
4028       case Qualifiers::OCL_Weak:
4029         CGF.EmitARCInitWeak(ParamAddr, CastExn);
4030         return;
4031       }
4032       llvm_unreachable("bad ownership qualifier!");
4033     }
4034 
4035     // Otherwise, it returns a pointer into the exception object.
4036 
4037     llvm::Type *PtrTy = LLVMCatchTy->getPointerTo(0); // addrspace 0 ok
4038     llvm::Value *Cast = CGF.Builder.CreateBitCast(AdjustedExn, PtrTy);
4039 
4040     LValue srcLV = CGF.MakeNaturalAlignAddrLValue(Cast, CatchType);
4041     LValue destLV = CGF.MakeAddrLValue(ParamAddr, CatchType);
4042     switch (TEK) {
4043     case TEK_Complex:
4044       CGF.EmitStoreOfComplex(CGF.EmitLoadOfComplex(srcLV, Loc), destLV,
4045                              /*init*/ true);
4046       return;
4047     case TEK_Scalar: {
4048       llvm::Value *ExnLoad = CGF.EmitLoadOfScalar(srcLV, Loc);
4049       CGF.EmitStoreOfScalar(ExnLoad, destLV, /*init*/ true);
4050       return;
4051     }
4052     case TEK_Aggregate:
4053       llvm_unreachable("evaluation kind filtered out!");
4054     }
4055     llvm_unreachable("bad evaluation kind");
4056   }
4057 
4058   assert(isa<RecordType>(CatchType) && "unexpected catch type!");
4059   auto catchRD = CatchType->getAsCXXRecordDecl();
4060   CharUnits caughtExnAlignment = CGF.CGM.getClassPointerAlignment(catchRD);
4061 
4062   llvm::Type *PtrTy = LLVMCatchTy->getPointerTo(0); // addrspace 0 ok
4063 
4064   // Check for a copy expression.  If we don't have a copy expression,
4065   // that means a trivial copy is okay.
4066   const Expr *copyExpr = CatchParam.getInit();
4067   if (!copyExpr) {
4068     llvm::Value *rawAdjustedExn = CallBeginCatch(CGF, Exn, true);
4069     Address adjustedExn(CGF.Builder.CreateBitCast(rawAdjustedExn, PtrTy),
4070                         caughtExnAlignment);
4071     LValue Dest = CGF.MakeAddrLValue(ParamAddr, CatchType);
4072     LValue Src = CGF.MakeAddrLValue(adjustedExn, CatchType);
4073     CGF.EmitAggregateCopy(Dest, Src, CatchType, AggValueSlot::DoesNotOverlap);
4074     return;
4075   }
4076 
4077   // We have to call __cxa_get_exception_ptr to get the adjusted
4078   // pointer before copying.
4079   llvm::CallInst *rawAdjustedExn =
4080     CGF.EmitNounwindRuntimeCall(getGetExceptionPtrFn(CGF.CGM), Exn);
4081 
4082   // Cast that to the appropriate type.
4083   Address adjustedExn(CGF.Builder.CreateBitCast(rawAdjustedExn, PtrTy),
4084                       caughtExnAlignment);
4085 
4086   // The copy expression is defined in terms of an OpaqueValueExpr.
4087   // Find it and map it to the adjusted expression.
4088   CodeGenFunction::OpaqueValueMapping
4089     opaque(CGF, OpaqueValueExpr::findInCopyConstruct(copyExpr),
4090            CGF.MakeAddrLValue(adjustedExn, CatchParam.getType()));
4091 
4092   // Call the copy ctor in a terminate scope.
4093   CGF.EHStack.pushTerminate();
4094 
4095   // Perform the copy construction.
4096   CGF.EmitAggExpr(copyExpr,
4097                   AggValueSlot::forAddr(ParamAddr, Qualifiers(),
4098                                         AggValueSlot::IsNotDestructed,
4099                                         AggValueSlot::DoesNotNeedGCBarriers,
4100                                         AggValueSlot::IsNotAliased,
4101                                         AggValueSlot::DoesNotOverlap));
4102 
4103   // Leave the terminate scope.
4104   CGF.EHStack.popTerminate();
4105 
4106   // Undo the opaque value mapping.
4107   opaque.pop();
4108 
4109   // Finally we can call __cxa_begin_catch.
4110   CallBeginCatch(CGF, Exn, true);
4111 }
4112 
4113 /// Begins a catch statement by initializing the catch variable and
4114 /// calling __cxa_begin_catch.
emitBeginCatch(CodeGenFunction & CGF,const CXXCatchStmt * S)4115 void ItaniumCXXABI::emitBeginCatch(CodeGenFunction &CGF,
4116                                    const CXXCatchStmt *S) {
4117   // We have to be very careful with the ordering of cleanups here:
4118   //   C++ [except.throw]p4:
4119   //     The destruction [of the exception temporary] occurs
4120   //     immediately after the destruction of the object declared in
4121   //     the exception-declaration in the handler.
4122   //
4123   // So the precise ordering is:
4124   //   1.  Construct catch variable.
4125   //   2.  __cxa_begin_catch
4126   //   3.  Enter __cxa_end_catch cleanup
4127   //   4.  Enter dtor cleanup
4128   //
4129   // We do this by using a slightly abnormal initialization process.
4130   // Delegation sequence:
4131   //   - ExitCXXTryStmt opens a RunCleanupsScope
4132   //     - EmitAutoVarAlloca creates the variable and debug info
4133   //       - InitCatchParam initializes the variable from the exception
4134   //       - CallBeginCatch calls __cxa_begin_catch
4135   //       - CallBeginCatch enters the __cxa_end_catch cleanup
4136   //     - EmitAutoVarCleanups enters the variable destructor cleanup
4137   //   - EmitCXXTryStmt emits the code for the catch body
4138   //   - EmitCXXTryStmt close the RunCleanupsScope
4139 
4140   VarDecl *CatchParam = S->getExceptionDecl();
4141   if (!CatchParam) {
4142     llvm::Value *Exn = CGF.getExceptionFromSlot();
4143     CallBeginCatch(CGF, Exn, true);
4144     return;
4145   }
4146 
4147   // Emit the local.
4148   CodeGenFunction::AutoVarEmission var = CGF.EmitAutoVarAlloca(*CatchParam);
4149   InitCatchParam(CGF, *CatchParam, var.getObjectAddress(CGF), S->getLocStart());
4150   CGF.EmitAutoVarCleanups(var);
4151 }
4152 
4153 /// Get or define the following function:
4154 ///   void @__clang_call_terminate(i8* %exn) nounwind noreturn
4155 /// This code is used only in C++.
getClangCallTerminateFn(CodeGenModule & CGM)4156 static llvm::Constant *getClangCallTerminateFn(CodeGenModule &CGM) {
4157   llvm::FunctionType *fnTy =
4158     llvm::FunctionType::get(CGM.VoidTy, CGM.Int8PtrTy, /*IsVarArgs=*/false);
4159   llvm::Constant *fnRef = CGM.CreateRuntimeFunction(
4160       fnTy, "__clang_call_terminate", llvm::AttributeList(), /*Local=*/true);
4161 
4162   llvm::Function *fn = dyn_cast<llvm::Function>(fnRef);
4163   if (fn && fn->empty()) {
4164     fn->setDoesNotThrow();
4165     fn->setDoesNotReturn();
4166 
4167     // What we really want is to massively penalize inlining without
4168     // forbidding it completely.  The difference between that and
4169     // 'noinline' is negligible.
4170     fn->addFnAttr(llvm::Attribute::NoInline);
4171 
4172     // Allow this function to be shared across translation units, but
4173     // we don't want it to turn into an exported symbol.
4174     fn->setLinkage(llvm::Function::LinkOnceODRLinkage);
4175     fn->setVisibility(llvm::Function::HiddenVisibility);
4176     if (CGM.supportsCOMDAT())
4177       fn->setComdat(CGM.getModule().getOrInsertComdat(fn->getName()));
4178 
4179     // Set up the function.
4180     llvm::BasicBlock *entry =
4181       llvm::BasicBlock::Create(CGM.getLLVMContext(), "", fn);
4182     CGBuilderTy builder(CGM, entry);
4183 
4184     // Pull the exception pointer out of the parameter list.
4185     llvm::Value *exn = &*fn->arg_begin();
4186 
4187     // Call __cxa_begin_catch(exn).
4188     llvm::CallInst *catchCall = builder.CreateCall(getBeginCatchFn(CGM), exn);
4189     catchCall->setDoesNotThrow();
4190     catchCall->setCallingConv(CGM.getRuntimeCC());
4191 
4192     // Call std::terminate().
4193     llvm::CallInst *termCall = builder.CreateCall(CGM.getTerminateFn());
4194     termCall->setDoesNotThrow();
4195     termCall->setDoesNotReturn();
4196     termCall->setCallingConv(CGM.getRuntimeCC());
4197 
4198     // std::terminate cannot return.
4199     builder.CreateUnreachable();
4200   }
4201 
4202   return fnRef;
4203 }
4204 
4205 llvm::CallInst *
emitTerminateForUnexpectedException(CodeGenFunction & CGF,llvm::Value * Exn)4206 ItaniumCXXABI::emitTerminateForUnexpectedException(CodeGenFunction &CGF,
4207                                                    llvm::Value *Exn) {
4208   // In C++, we want to call __cxa_begin_catch() before terminating.
4209   if (Exn) {
4210     assert(CGF.CGM.getLangOpts().CPlusPlus);
4211     return CGF.EmitNounwindRuntimeCall(getClangCallTerminateFn(CGF.CGM), Exn);
4212   }
4213   return CGF.EmitNounwindRuntimeCall(CGF.CGM.getTerminateFn());
4214 }
4215 
4216 std::pair<llvm::Value *, const CXXRecordDecl *>
LoadVTablePtr(CodeGenFunction & CGF,Address This,const CXXRecordDecl * RD)4217 ItaniumCXXABI::LoadVTablePtr(CodeGenFunction &CGF, Address This,
4218                              const CXXRecordDecl *RD) {
4219   return {CGF.GetVTablePtr(This, CGM.Int8PtrTy, RD), RD};
4220 }
4221 
emitBeginCatch(CodeGenFunction & CGF,const CXXCatchStmt * C)4222 void WebAssemblyCXXABI::emitBeginCatch(CodeGenFunction &CGF,
4223                                        const CXXCatchStmt *C) {
4224   if (CGF.getTarget().hasFeature("exception-handling"))
4225     CGF.EHStack.pushCleanup<CatchRetScope>(
4226         NormalCleanup, cast<llvm::CatchPadInst>(CGF.CurrentFuncletPad));
4227   ItaniumCXXABI::emitBeginCatch(CGF, C);
4228 }
4229