1 //===------- ItaniumCXXABI.cpp - Emit LLVM Code from ASTs for a Module ----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This provides C++ code generation targeting the Itanium C++ ABI. The class
10 // in this file generates structures that follow the Itanium C++ ABI, which is
11 // documented at:
12 // https://itanium-cxx-abi.github.io/cxx-abi/abi.html
13 // https://itanium-cxx-abi.github.io/cxx-abi/abi-eh.html
14 //
15 // It also supports the closely-related ARM ABI, documented at:
16 // https://developer.arm.com/documentation/ihi0041/g/
17 //
18 //===----------------------------------------------------------------------===//
19
20 #include "CGCXXABI.h"
21 #include "CGCleanup.h"
22 #include "CGRecordLayout.h"
23 #include "CGVTables.h"
24 #include "CodeGenFunction.h"
25 #include "CodeGenModule.h"
26 #include "TargetInfo.h"
27 #include "clang/AST/Attr.h"
28 #include "clang/AST/Mangle.h"
29 #include "clang/AST/StmtCXX.h"
30 #include "clang/AST/Type.h"
31 #include "clang/CodeGen/ConstantInitBuilder.h"
32 #include "llvm/IR/DataLayout.h"
33 #include "llvm/IR/GlobalValue.h"
34 #include "llvm/IR/Instructions.h"
35 #include "llvm/IR/Intrinsics.h"
36 #include "llvm/IR/Value.h"
37 #include "llvm/Support/ScopedPrinter.h"
38
39 using namespace clang;
40 using namespace CodeGen;
41
42 namespace {
43 class ItaniumCXXABI : public CodeGen::CGCXXABI {
44 /// VTables - All the vtables which have been defined.
45 llvm::DenseMap<const CXXRecordDecl *, llvm::GlobalVariable *> VTables;
46
47 /// All the thread wrapper functions that have been used.
48 llvm::SmallVector<std::pair<const VarDecl *, llvm::Function *>, 8>
49 ThreadWrappers;
50
51 protected:
52 bool UseARMMethodPtrABI;
53 bool UseARMGuardVarABI;
54 bool Use32BitVTableOffsetABI;
55
getMangleContext()56 ItaniumMangleContext &getMangleContext() {
57 return cast<ItaniumMangleContext>(CodeGen::CGCXXABI::getMangleContext());
58 }
59
60 public:
ItaniumCXXABI(CodeGen::CodeGenModule & CGM,bool UseARMMethodPtrABI=false,bool UseARMGuardVarABI=false)61 ItaniumCXXABI(CodeGen::CodeGenModule &CGM,
62 bool UseARMMethodPtrABI = false,
63 bool UseARMGuardVarABI = false) :
64 CGCXXABI(CGM), UseARMMethodPtrABI(UseARMMethodPtrABI),
65 UseARMGuardVarABI(UseARMGuardVarABI),
66 Use32BitVTableOffsetABI(false) { }
67
68 bool classifyReturnType(CGFunctionInfo &FI) const override;
69
getRecordArgABI(const CXXRecordDecl * RD) const70 RecordArgABI getRecordArgABI(const CXXRecordDecl *RD) const override {
71 // If C++ prohibits us from making a copy, pass by address.
72 if (!RD->canPassInRegisters())
73 return RAA_Indirect;
74 return RAA_Default;
75 }
76
isThisCompleteObject(GlobalDecl GD) const77 bool isThisCompleteObject(GlobalDecl GD) const override {
78 // The Itanium ABI has separate complete-object vs. base-object
79 // variants of both constructors and destructors.
80 if (isa<CXXDestructorDecl>(GD.getDecl())) {
81 switch (GD.getDtorType()) {
82 case Dtor_Complete:
83 case Dtor_Deleting:
84 return true;
85
86 case Dtor_Base:
87 return false;
88
89 case Dtor_Comdat:
90 llvm_unreachable("emitting dtor comdat as function?");
91 }
92 llvm_unreachable("bad dtor kind");
93 }
94 if (isa<CXXConstructorDecl>(GD.getDecl())) {
95 switch (GD.getCtorType()) {
96 case Ctor_Complete:
97 return true;
98
99 case Ctor_Base:
100 return false;
101
102 case Ctor_CopyingClosure:
103 case Ctor_DefaultClosure:
104 llvm_unreachable("closure ctors in Itanium ABI?");
105
106 case Ctor_Comdat:
107 llvm_unreachable("emitting ctor comdat as function?");
108 }
109 llvm_unreachable("bad dtor kind");
110 }
111
112 // No other kinds.
113 return false;
114 }
115
116 bool isZeroInitializable(const MemberPointerType *MPT) override;
117
118 llvm::Type *ConvertMemberPointerType(const MemberPointerType *MPT) override;
119
120 CGCallee
121 EmitLoadOfMemberFunctionPointer(CodeGenFunction &CGF,
122 const Expr *E,
123 Address This,
124 llvm::Value *&ThisPtrForCall,
125 llvm::Value *MemFnPtr,
126 const MemberPointerType *MPT) override;
127
128 llvm::Value *
129 EmitMemberDataPointerAddress(CodeGenFunction &CGF, const Expr *E,
130 Address Base,
131 llvm::Value *MemPtr,
132 const MemberPointerType *MPT) override;
133
134 llvm::Value *EmitMemberPointerConversion(CodeGenFunction &CGF,
135 const CastExpr *E,
136 llvm::Value *Src) override;
137 llvm::Constant *EmitMemberPointerConversion(const CastExpr *E,
138 llvm::Constant *Src) override;
139
140 llvm::Constant *EmitNullMemberPointer(const MemberPointerType *MPT) override;
141
142 llvm::Constant *EmitMemberFunctionPointer(const CXXMethodDecl *MD) override;
143 llvm::Constant *EmitMemberDataPointer(const MemberPointerType *MPT,
144 CharUnits offset) override;
145 llvm::Constant *EmitMemberPointer(const APValue &MP, QualType MPT) override;
146 llvm::Constant *BuildMemberPointer(const CXXMethodDecl *MD,
147 CharUnits ThisAdjustment);
148
149 llvm::Value *EmitMemberPointerComparison(CodeGenFunction &CGF,
150 llvm::Value *L, llvm::Value *R,
151 const MemberPointerType *MPT,
152 bool Inequality) override;
153
154 llvm::Value *EmitMemberPointerIsNotNull(CodeGenFunction &CGF,
155 llvm::Value *Addr,
156 const MemberPointerType *MPT) override;
157
158 void emitVirtualObjectDelete(CodeGenFunction &CGF, const CXXDeleteExpr *DE,
159 Address Ptr, QualType ElementType,
160 const CXXDestructorDecl *Dtor) override;
161
162 void emitRethrow(CodeGenFunction &CGF, bool isNoReturn) override;
163 void emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) override;
164
165 void emitBeginCatch(CodeGenFunction &CGF, const CXXCatchStmt *C) override;
166
167 llvm::CallInst *
168 emitTerminateForUnexpectedException(CodeGenFunction &CGF,
169 llvm::Value *Exn) override;
170
171 void EmitFundamentalRTTIDescriptors(const CXXRecordDecl *RD);
172 llvm::Constant *getAddrOfRTTIDescriptor(QualType Ty) override;
173 CatchTypeInfo
getAddrOfCXXCatchHandlerType(QualType Ty,QualType CatchHandlerType)174 getAddrOfCXXCatchHandlerType(QualType Ty,
175 QualType CatchHandlerType) override {
176 return CatchTypeInfo{getAddrOfRTTIDescriptor(Ty), 0};
177 }
178
179 bool shouldTypeidBeNullChecked(bool IsDeref, QualType SrcRecordTy) override;
180 void EmitBadTypeidCall(CodeGenFunction &CGF) override;
181 llvm::Value *EmitTypeid(CodeGenFunction &CGF, QualType SrcRecordTy,
182 Address ThisPtr,
183 llvm::Type *StdTypeInfoPtrTy) override;
184
185 bool shouldDynamicCastCallBeNullChecked(bool SrcIsPtr,
186 QualType SrcRecordTy) override;
187
188 llvm::Value *EmitDynamicCastCall(CodeGenFunction &CGF, Address Value,
189 QualType SrcRecordTy, QualType DestTy,
190 QualType DestRecordTy,
191 llvm::BasicBlock *CastEnd) override;
192
193 llvm::Value *EmitDynamicCastToVoid(CodeGenFunction &CGF, Address Value,
194 QualType SrcRecordTy,
195 QualType DestTy) override;
196
197 bool EmitBadCastCall(CodeGenFunction &CGF) override;
198
199 llvm::Value *
200 GetVirtualBaseClassOffset(CodeGenFunction &CGF, Address This,
201 const CXXRecordDecl *ClassDecl,
202 const CXXRecordDecl *BaseClassDecl) override;
203
204 void EmitCXXConstructors(const CXXConstructorDecl *D) override;
205
206 AddedStructorArgCounts
207 buildStructorSignature(GlobalDecl GD,
208 SmallVectorImpl<CanQualType> &ArgTys) override;
209
useThunkForDtorVariant(const CXXDestructorDecl * Dtor,CXXDtorType DT) const210 bool useThunkForDtorVariant(const CXXDestructorDecl *Dtor,
211 CXXDtorType DT) const override {
212 // Itanium does not emit any destructor variant as an inline thunk.
213 // Delegating may occur as an optimization, but all variants are either
214 // emitted with external linkage or as linkonce if they are inline and used.
215 return false;
216 }
217
218 void EmitCXXDestructors(const CXXDestructorDecl *D) override;
219
220 void addImplicitStructorParams(CodeGenFunction &CGF, QualType &ResTy,
221 FunctionArgList &Params) override;
222
223 void EmitInstanceFunctionProlog(CodeGenFunction &CGF) override;
224
225 AddedStructorArgs getImplicitConstructorArgs(CodeGenFunction &CGF,
226 const CXXConstructorDecl *D,
227 CXXCtorType Type,
228 bool ForVirtualBase,
229 bool Delegating) override;
230
231 llvm::Value *getCXXDestructorImplicitParam(CodeGenFunction &CGF,
232 const CXXDestructorDecl *DD,
233 CXXDtorType Type,
234 bool ForVirtualBase,
235 bool Delegating) override;
236
237 void EmitDestructorCall(CodeGenFunction &CGF, const CXXDestructorDecl *DD,
238 CXXDtorType Type, bool ForVirtualBase,
239 bool Delegating, Address This,
240 QualType ThisTy) override;
241
242 void emitVTableDefinitions(CodeGenVTables &CGVT,
243 const CXXRecordDecl *RD) override;
244
245 bool isVirtualOffsetNeededForVTableField(CodeGenFunction &CGF,
246 CodeGenFunction::VPtr Vptr) override;
247
doStructorsInitializeVPtrs(const CXXRecordDecl * VTableClass)248 bool doStructorsInitializeVPtrs(const CXXRecordDecl *VTableClass) override {
249 return true;
250 }
251
252 llvm::Constant *
253 getVTableAddressPoint(BaseSubobject Base,
254 const CXXRecordDecl *VTableClass) override;
255
256 llvm::Value *getVTableAddressPointInStructor(
257 CodeGenFunction &CGF, const CXXRecordDecl *VTableClass,
258 BaseSubobject Base, const CXXRecordDecl *NearestVBase) override;
259
260 llvm::Value *getVTableAddressPointInStructorWithVTT(
261 CodeGenFunction &CGF, const CXXRecordDecl *VTableClass,
262 BaseSubobject Base, const CXXRecordDecl *NearestVBase);
263
264 llvm::Constant *
265 getVTableAddressPointForConstExpr(BaseSubobject Base,
266 const CXXRecordDecl *VTableClass) override;
267
268 llvm::GlobalVariable *getAddrOfVTable(const CXXRecordDecl *RD,
269 CharUnits VPtrOffset) override;
270
271 CGCallee getVirtualFunctionPointer(CodeGenFunction &CGF, GlobalDecl GD,
272 Address This, llvm::Type *Ty,
273 SourceLocation Loc) override;
274
275 llvm::Value *EmitVirtualDestructorCall(CodeGenFunction &CGF,
276 const CXXDestructorDecl *Dtor,
277 CXXDtorType DtorType, Address This,
278 DeleteOrMemberCallExpr E) override;
279
280 void emitVirtualInheritanceTables(const CXXRecordDecl *RD) override;
281
282 bool canSpeculativelyEmitVTable(const CXXRecordDecl *RD) const override;
283 bool canSpeculativelyEmitVTableAsBaseClass(const CXXRecordDecl *RD) const;
284
setThunkLinkage(llvm::Function * Thunk,bool ForVTable,GlobalDecl GD,bool ReturnAdjustment)285 void setThunkLinkage(llvm::Function *Thunk, bool ForVTable, GlobalDecl GD,
286 bool ReturnAdjustment) override {
287 // Allow inlining of thunks by emitting them with available_externally
288 // linkage together with vtables when needed.
289 if (ForVTable && !Thunk->hasLocalLinkage())
290 Thunk->setLinkage(llvm::GlobalValue::AvailableExternallyLinkage);
291 CGM.setGVProperties(Thunk, GD);
292 }
293
exportThunk()294 bool exportThunk() override { return true; }
295
296 llvm::Value *performThisAdjustment(CodeGenFunction &CGF, Address This,
297 const ThisAdjustment &TA) override;
298
299 llvm::Value *performReturnAdjustment(CodeGenFunction &CGF, Address Ret,
300 const ReturnAdjustment &RA) override;
301
getSrcArgforCopyCtor(const CXXConstructorDecl *,FunctionArgList & Args) const302 size_t getSrcArgforCopyCtor(const CXXConstructorDecl *,
303 FunctionArgList &Args) const override {
304 assert(!Args.empty() && "expected the arglist to not be empty!");
305 return Args.size() - 1;
306 }
307
GetPureVirtualCallName()308 StringRef GetPureVirtualCallName() override { return "__cxa_pure_virtual"; }
GetDeletedVirtualCallName()309 StringRef GetDeletedVirtualCallName() override
310 { return "__cxa_deleted_virtual"; }
311
312 CharUnits getArrayCookieSizeImpl(QualType elementType) override;
313 Address InitializeArrayCookie(CodeGenFunction &CGF,
314 Address NewPtr,
315 llvm::Value *NumElements,
316 const CXXNewExpr *expr,
317 QualType ElementType) override;
318 llvm::Value *readArrayCookieImpl(CodeGenFunction &CGF,
319 Address allocPtr,
320 CharUnits cookieSize) override;
321
322 void EmitGuardedInit(CodeGenFunction &CGF, const VarDecl &D,
323 llvm::GlobalVariable *DeclPtr,
324 bool PerformInit) override;
325 void registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
326 llvm::FunctionCallee dtor,
327 llvm::Constant *addr) override;
328
329 llvm::Function *getOrCreateThreadLocalWrapper(const VarDecl *VD,
330 llvm::Value *Val);
331 void EmitThreadLocalInitFuncs(
332 CodeGenModule &CGM,
333 ArrayRef<const VarDecl *> CXXThreadLocals,
334 ArrayRef<llvm::Function *> CXXThreadLocalInits,
335 ArrayRef<const VarDecl *> CXXThreadLocalInitVars) override;
336
337 /// Determine whether we will definitely emit this variable with a constant
338 /// initializer, either because the language semantics demand it or because
339 /// we know that the initializer is a constant.
340 // For weak definitions, any initializer available in the current translation
341 // is not necessarily reflective of the initializer used; such initializers
342 // are ignored unless if InspectInitForWeakDef is true.
343 bool
isEmittedWithConstantInitializer(const VarDecl * VD,bool InspectInitForWeakDef=false) const344 isEmittedWithConstantInitializer(const VarDecl *VD,
345 bool InspectInitForWeakDef = false) const {
346 VD = VD->getMostRecentDecl();
347 if (VD->hasAttr<ConstInitAttr>())
348 return true;
349
350 // All later checks examine the initializer specified on the variable. If
351 // the variable is weak, such examination would not be correct.
352 if (!InspectInitForWeakDef &&
353 (VD->isWeak() || VD->hasAttr<SelectAnyAttr>()))
354 return false;
355
356 const VarDecl *InitDecl = VD->getInitializingDeclaration();
357 if (!InitDecl)
358 return false;
359
360 // If there's no initializer to run, this is constant initialization.
361 if (!InitDecl->hasInit())
362 return true;
363
364 // If we have the only definition, we don't need a thread wrapper if we
365 // will emit the value as a constant.
366 if (isUniqueGVALinkage(getContext().GetGVALinkageForVariable(VD)))
367 return !VD->needsDestruction(getContext()) && InitDecl->evaluateValue();
368
369 // Otherwise, we need a thread wrapper unless we know that every
370 // translation unit will emit the value as a constant. We rely on the
371 // variable being constant-initialized in every translation unit if it's
372 // constant-initialized in any translation unit, which isn't actually
373 // guaranteed by the standard but is necessary for sanity.
374 return InitDecl->hasConstantInitialization();
375 }
376
usesThreadWrapperFunction(const VarDecl * VD) const377 bool usesThreadWrapperFunction(const VarDecl *VD) const override {
378 return !isEmittedWithConstantInitializer(VD) ||
379 VD->needsDestruction(getContext());
380 }
381 LValue EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF, const VarDecl *VD,
382 QualType LValType) override;
383
384 bool NeedsVTTParameter(GlobalDecl GD) override;
385
386 /**************************** RTTI Uniqueness ******************************/
387
388 protected:
389 /// Returns true if the ABI requires RTTI type_info objects to be unique
390 /// across a program.
shouldRTTIBeUnique() const391 virtual bool shouldRTTIBeUnique() const { return true; }
392
393 public:
394 /// What sort of unique-RTTI behavior should we use?
395 enum RTTIUniquenessKind {
396 /// We are guaranteeing, or need to guarantee, that the RTTI string
397 /// is unique.
398 RUK_Unique,
399
400 /// We are not guaranteeing uniqueness for the RTTI string, so we
401 /// can demote to hidden visibility but must use string comparisons.
402 RUK_NonUniqueHidden,
403
404 /// We are not guaranteeing uniqueness for the RTTI string, so we
405 /// have to use string comparisons, but we also have to emit it with
406 /// non-hidden visibility.
407 RUK_NonUniqueVisible
408 };
409
410 /// Return the required visibility status for the given type and linkage in
411 /// the current ABI.
412 RTTIUniquenessKind
413 classifyRTTIUniqueness(QualType CanTy,
414 llvm::GlobalValue::LinkageTypes Linkage) const;
415 friend class ItaniumRTTIBuilder;
416
417 void emitCXXStructor(GlobalDecl GD) override;
418
419 std::pair<llvm::Value *, const CXXRecordDecl *>
420 LoadVTablePtr(CodeGenFunction &CGF, Address This,
421 const CXXRecordDecl *RD) override;
422
423 private:
hasAnyUnusedVirtualInlineFunction(const CXXRecordDecl * RD) const424 bool hasAnyUnusedVirtualInlineFunction(const CXXRecordDecl *RD) const {
425 const auto &VtableLayout =
426 CGM.getItaniumVTableContext().getVTableLayout(RD);
427
428 for (const auto &VtableComponent : VtableLayout.vtable_components()) {
429 // Skip empty slot.
430 if (!VtableComponent.isUsedFunctionPointerKind())
431 continue;
432
433 const CXXMethodDecl *Method = VtableComponent.getFunctionDecl();
434 if (!Method->getCanonicalDecl()->isInlined())
435 continue;
436
437 StringRef Name = CGM.getMangledName(VtableComponent.getGlobalDecl());
438 auto *Entry = CGM.GetGlobalValue(Name);
439 // This checks if virtual inline function has already been emitted.
440 // Note that it is possible that this inline function would be emitted
441 // after trying to emit vtable speculatively. Because of this we do
442 // an extra pass after emitting all deferred vtables to find and emit
443 // these vtables opportunistically.
444 if (!Entry || Entry->isDeclaration())
445 return true;
446 }
447 return false;
448 }
449
isVTableHidden(const CXXRecordDecl * RD) const450 bool isVTableHidden(const CXXRecordDecl *RD) const {
451 const auto &VtableLayout =
452 CGM.getItaniumVTableContext().getVTableLayout(RD);
453
454 for (const auto &VtableComponent : VtableLayout.vtable_components()) {
455 if (VtableComponent.isRTTIKind()) {
456 const CXXRecordDecl *RTTIDecl = VtableComponent.getRTTIDecl();
457 if (RTTIDecl->getVisibility() == Visibility::HiddenVisibility)
458 return true;
459 } else if (VtableComponent.isUsedFunctionPointerKind()) {
460 const CXXMethodDecl *Method = VtableComponent.getFunctionDecl();
461 if (Method->getVisibility() == Visibility::HiddenVisibility &&
462 !Method->isDefined())
463 return true;
464 }
465 }
466 return false;
467 }
468 };
469
470 class ARMCXXABI : public ItaniumCXXABI {
471 public:
ARMCXXABI(CodeGen::CodeGenModule & CGM)472 ARMCXXABI(CodeGen::CodeGenModule &CGM) :
473 ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true,
474 /*UseARMGuardVarABI=*/true) {}
475
HasThisReturn(GlobalDecl GD) const476 bool HasThisReturn(GlobalDecl GD) const override {
477 return (isa<CXXConstructorDecl>(GD.getDecl()) || (
478 isa<CXXDestructorDecl>(GD.getDecl()) &&
479 GD.getDtorType() != Dtor_Deleting));
480 }
481
482 void EmitReturnFromThunk(CodeGenFunction &CGF, RValue RV,
483 QualType ResTy) override;
484
485 CharUnits getArrayCookieSizeImpl(QualType elementType) override;
486 Address InitializeArrayCookie(CodeGenFunction &CGF,
487 Address NewPtr,
488 llvm::Value *NumElements,
489 const CXXNewExpr *expr,
490 QualType ElementType) override;
491 llvm::Value *readArrayCookieImpl(CodeGenFunction &CGF, Address allocPtr,
492 CharUnits cookieSize) override;
493 };
494
495 class AppleARM64CXXABI : public ARMCXXABI {
496 public:
AppleARM64CXXABI(CodeGen::CodeGenModule & CGM)497 AppleARM64CXXABI(CodeGen::CodeGenModule &CGM) : ARMCXXABI(CGM) {
498 Use32BitVTableOffsetABI = true;
499 }
500
501 // ARM64 libraries are prepared for non-unique RTTI.
shouldRTTIBeUnique() const502 bool shouldRTTIBeUnique() const override { return false; }
503 };
504
505 class FuchsiaCXXABI final : public ItaniumCXXABI {
506 public:
FuchsiaCXXABI(CodeGen::CodeGenModule & CGM)507 explicit FuchsiaCXXABI(CodeGen::CodeGenModule &CGM)
508 : ItaniumCXXABI(CGM) {}
509
510 private:
HasThisReturn(GlobalDecl GD) const511 bool HasThisReturn(GlobalDecl GD) const override {
512 return isa<CXXConstructorDecl>(GD.getDecl()) ||
513 (isa<CXXDestructorDecl>(GD.getDecl()) &&
514 GD.getDtorType() != Dtor_Deleting);
515 }
516 };
517
518 class WebAssemblyCXXABI final : public ItaniumCXXABI {
519 public:
WebAssemblyCXXABI(CodeGen::CodeGenModule & CGM)520 explicit WebAssemblyCXXABI(CodeGen::CodeGenModule &CGM)
521 : ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true,
522 /*UseARMGuardVarABI=*/true) {}
523 void emitBeginCatch(CodeGenFunction &CGF, const CXXCatchStmt *C) override;
524 llvm::CallInst *
525 emitTerminateForUnexpectedException(CodeGenFunction &CGF,
526 llvm::Value *Exn) override;
527
528 private:
HasThisReturn(GlobalDecl GD) const529 bool HasThisReturn(GlobalDecl GD) const override {
530 return isa<CXXConstructorDecl>(GD.getDecl()) ||
531 (isa<CXXDestructorDecl>(GD.getDecl()) &&
532 GD.getDtorType() != Dtor_Deleting);
533 }
canCallMismatchedFunctionType() const534 bool canCallMismatchedFunctionType() const override { return false; }
535 };
536
537 class XLCXXABI final : public ItaniumCXXABI {
538 public:
XLCXXABI(CodeGen::CodeGenModule & CGM)539 explicit XLCXXABI(CodeGen::CodeGenModule &CGM)
540 : ItaniumCXXABI(CGM) {}
541
542 void registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
543 llvm::FunctionCallee dtor,
544 llvm::Constant *addr) override;
545
useSinitAndSterm() const546 bool useSinitAndSterm() const override { return true; }
547
548 private:
549 void emitCXXStermFinalizer(const VarDecl &D, llvm::Function *dtorStub,
550 llvm::Constant *addr);
551 };
552 }
553
CreateItaniumCXXABI(CodeGenModule & CGM)554 CodeGen::CGCXXABI *CodeGen::CreateItaniumCXXABI(CodeGenModule &CGM) {
555 switch (CGM.getContext().getCXXABIKind()) {
556 // For IR-generation purposes, there's no significant difference
557 // between the ARM and iOS ABIs.
558 case TargetCXXABI::GenericARM:
559 case TargetCXXABI::iOS:
560 case TargetCXXABI::WatchOS:
561 return new ARMCXXABI(CGM);
562
563 case TargetCXXABI::AppleARM64:
564 return new AppleARM64CXXABI(CGM);
565
566 case TargetCXXABI::Fuchsia:
567 return new FuchsiaCXXABI(CGM);
568
569 // Note that AArch64 uses the generic ItaniumCXXABI class since it doesn't
570 // include the other 32-bit ARM oddities: constructor/destructor return values
571 // and array cookies.
572 case TargetCXXABI::GenericAArch64:
573 return new ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true,
574 /*UseARMGuardVarABI=*/true);
575
576 case TargetCXXABI::GenericMIPS:
577 return new ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true);
578
579 case TargetCXXABI::WebAssembly:
580 return new WebAssemblyCXXABI(CGM);
581
582 case TargetCXXABI::XL:
583 return new XLCXXABI(CGM);
584
585 case TargetCXXABI::GenericItanium:
586 if (CGM.getContext().getTargetInfo().getTriple().getArch()
587 == llvm::Triple::le32) {
588 // For PNaCl, use ARM-style method pointers so that PNaCl code
589 // does not assume anything about the alignment of function
590 // pointers.
591 return new ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true);
592 }
593 return new ItaniumCXXABI(CGM);
594
595 case TargetCXXABI::Microsoft:
596 llvm_unreachable("Microsoft ABI is not Itanium-based");
597 }
598 llvm_unreachable("bad ABI kind");
599 }
600
601 llvm::Type *
ConvertMemberPointerType(const MemberPointerType * MPT)602 ItaniumCXXABI::ConvertMemberPointerType(const MemberPointerType *MPT) {
603 if (MPT->isMemberDataPointer())
604 return CGM.PtrDiffTy;
605 return llvm::StructType::get(CGM.PtrDiffTy, CGM.PtrDiffTy);
606 }
607
608 /// In the Itanium and ARM ABIs, method pointers have the form:
609 /// struct { ptrdiff_t ptr; ptrdiff_t adj; } memptr;
610 ///
611 /// In the Itanium ABI:
612 /// - method pointers are virtual if (memptr.ptr & 1) is nonzero
613 /// - the this-adjustment is (memptr.adj)
614 /// - the virtual offset is (memptr.ptr - 1)
615 ///
616 /// In the ARM ABI:
617 /// - method pointers are virtual if (memptr.adj & 1) is nonzero
618 /// - the this-adjustment is (memptr.adj >> 1)
619 /// - the virtual offset is (memptr.ptr)
620 /// ARM uses 'adj' for the virtual flag because Thumb functions
621 /// may be only single-byte aligned.
622 ///
623 /// If the member is virtual, the adjusted 'this' pointer points
624 /// to a vtable pointer from which the virtual offset is applied.
625 ///
626 /// If the member is non-virtual, memptr.ptr is the address of
627 /// the function to call.
EmitLoadOfMemberFunctionPointer(CodeGenFunction & CGF,const Expr * E,Address ThisAddr,llvm::Value * & ThisPtrForCall,llvm::Value * MemFnPtr,const MemberPointerType * MPT)628 CGCallee ItaniumCXXABI::EmitLoadOfMemberFunctionPointer(
629 CodeGenFunction &CGF, const Expr *E, Address ThisAddr,
630 llvm::Value *&ThisPtrForCall,
631 llvm::Value *MemFnPtr, const MemberPointerType *MPT) {
632 CGBuilderTy &Builder = CGF.Builder;
633
634 const FunctionProtoType *FPT =
635 MPT->getPointeeType()->getAs<FunctionProtoType>();
636 auto *RD =
637 cast<CXXRecordDecl>(MPT->getClass()->castAs<RecordType>()->getDecl());
638
639 llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(
640 CGM.getTypes().arrangeCXXMethodType(RD, FPT, /*FD=*/nullptr));
641
642 llvm::Constant *ptrdiff_1 = llvm::ConstantInt::get(CGM.PtrDiffTy, 1);
643
644 llvm::BasicBlock *FnVirtual = CGF.createBasicBlock("memptr.virtual");
645 llvm::BasicBlock *FnNonVirtual = CGF.createBasicBlock("memptr.nonvirtual");
646 llvm::BasicBlock *FnEnd = CGF.createBasicBlock("memptr.end");
647
648 // Extract memptr.adj, which is in the second field.
649 llvm::Value *RawAdj = Builder.CreateExtractValue(MemFnPtr, 1, "memptr.adj");
650
651 // Compute the true adjustment.
652 llvm::Value *Adj = RawAdj;
653 if (UseARMMethodPtrABI)
654 Adj = Builder.CreateAShr(Adj, ptrdiff_1, "memptr.adj.shifted");
655
656 // Apply the adjustment and cast back to the original struct type
657 // for consistency.
658 llvm::Value *This = ThisAddr.getPointer();
659 llvm::Value *Ptr = Builder.CreateBitCast(This, Builder.getInt8PtrTy());
660 Ptr = Builder.CreateInBoundsGEP(Builder.getInt8Ty(), Ptr, Adj);
661 This = Builder.CreateBitCast(Ptr, This->getType(), "this.adjusted");
662 ThisPtrForCall = This;
663
664 // Load the function pointer.
665 llvm::Value *FnAsInt = Builder.CreateExtractValue(MemFnPtr, 0, "memptr.ptr");
666
667 // If the LSB in the function pointer is 1, the function pointer points to
668 // a virtual function.
669 llvm::Value *IsVirtual;
670 if (UseARMMethodPtrABI)
671 IsVirtual = Builder.CreateAnd(RawAdj, ptrdiff_1);
672 else
673 IsVirtual = Builder.CreateAnd(FnAsInt, ptrdiff_1);
674 IsVirtual = Builder.CreateIsNotNull(IsVirtual, "memptr.isvirtual");
675 Builder.CreateCondBr(IsVirtual, FnVirtual, FnNonVirtual);
676
677 // In the virtual path, the adjustment left 'This' pointing to the
678 // vtable of the correct base subobject. The "function pointer" is an
679 // offset within the vtable (+1 for the virtual flag on non-ARM).
680 CGF.EmitBlock(FnVirtual);
681
682 // Cast the adjusted this to a pointer to vtable pointer and load.
683 llvm::Type *VTableTy = Builder.getInt8PtrTy();
684 CharUnits VTablePtrAlign =
685 CGF.CGM.getDynamicOffsetAlignment(ThisAddr.getAlignment(), RD,
686 CGF.getPointerAlign());
687 llvm::Value *VTable =
688 CGF.GetVTablePtr(Address(This, VTablePtrAlign), VTableTy, RD);
689
690 // Apply the offset.
691 // On ARM64, to reserve extra space in virtual member function pointers,
692 // we only pay attention to the low 32 bits of the offset.
693 llvm::Value *VTableOffset = FnAsInt;
694 if (!UseARMMethodPtrABI)
695 VTableOffset = Builder.CreateSub(VTableOffset, ptrdiff_1);
696 if (Use32BitVTableOffsetABI) {
697 VTableOffset = Builder.CreateTrunc(VTableOffset, CGF.Int32Ty);
698 VTableOffset = Builder.CreateZExt(VTableOffset, CGM.PtrDiffTy);
699 }
700
701 // Check the address of the function pointer if CFI on member function
702 // pointers is enabled.
703 llvm::Constant *CheckSourceLocation;
704 llvm::Constant *CheckTypeDesc;
705 bool ShouldEmitCFICheck = CGF.SanOpts.has(SanitizerKind::CFIMFCall) &&
706 CGM.HasHiddenLTOVisibility(RD);
707 bool ShouldEmitVFEInfo = CGM.getCodeGenOpts().VirtualFunctionElimination &&
708 CGM.HasHiddenLTOVisibility(RD);
709 bool ShouldEmitWPDInfo =
710 CGM.getCodeGenOpts().WholeProgramVTables &&
711 // Don't insert type tests if we are forcing public std visibility.
712 !CGM.HasLTOVisibilityPublicStd(RD);
713 llvm::Value *VirtualFn = nullptr;
714
715 {
716 CodeGenFunction::SanitizerScope SanScope(&CGF);
717 llvm::Value *TypeId = nullptr;
718 llvm::Value *CheckResult = nullptr;
719
720 if (ShouldEmitCFICheck || ShouldEmitVFEInfo || ShouldEmitWPDInfo) {
721 // If doing CFI, VFE or WPD, we will need the metadata node to check
722 // against.
723 llvm::Metadata *MD =
724 CGM.CreateMetadataIdentifierForVirtualMemPtrType(QualType(MPT, 0));
725 TypeId = llvm::MetadataAsValue::get(CGF.getLLVMContext(), MD);
726 }
727
728 if (ShouldEmitVFEInfo) {
729 llvm::Value *VFPAddr =
730 Builder.CreateGEP(CGF.Int8Ty, VTable, VTableOffset);
731
732 // If doing VFE, load from the vtable with a type.checked.load intrinsic
733 // call. Note that we use the GEP to calculate the address to load from
734 // and pass 0 as the offset to the intrinsic. This is because every
735 // vtable slot of the correct type is marked with matching metadata, and
736 // we know that the load must be from one of these slots.
737 llvm::Value *CheckedLoad = Builder.CreateCall(
738 CGM.getIntrinsic(llvm::Intrinsic::type_checked_load),
739 {VFPAddr, llvm::ConstantInt::get(CGM.Int32Ty, 0), TypeId});
740 CheckResult = Builder.CreateExtractValue(CheckedLoad, 1);
741 VirtualFn = Builder.CreateExtractValue(CheckedLoad, 0);
742 VirtualFn = Builder.CreateBitCast(VirtualFn, FTy->getPointerTo(),
743 "memptr.virtualfn");
744 } else {
745 // When not doing VFE, emit a normal load, as it allows more
746 // optimisations than type.checked.load.
747 if (ShouldEmitCFICheck || ShouldEmitWPDInfo) {
748 llvm::Value *VFPAddr =
749 Builder.CreateGEP(CGF.Int8Ty, VTable, VTableOffset);
750 CheckResult = Builder.CreateCall(
751 CGM.getIntrinsic(llvm::Intrinsic::type_test),
752 {Builder.CreateBitCast(VFPAddr, CGF.Int8PtrTy), TypeId});
753 }
754
755 if (CGM.getItaniumVTableContext().isRelativeLayout()) {
756 VirtualFn = CGF.Builder.CreateCall(
757 CGM.getIntrinsic(llvm::Intrinsic::load_relative,
758 {VTableOffset->getType()}),
759 {VTable, VTableOffset});
760 VirtualFn = CGF.Builder.CreateBitCast(VirtualFn, FTy->getPointerTo());
761 } else {
762 llvm::Value *VFPAddr =
763 CGF.Builder.CreateGEP(CGF.Int8Ty, VTable, VTableOffset);
764 VFPAddr = CGF.Builder.CreateBitCast(
765 VFPAddr, FTy->getPointerTo()->getPointerTo());
766 VirtualFn = CGF.Builder.CreateAlignedLoad(
767 FTy->getPointerTo(), VFPAddr, CGF.getPointerAlign(),
768 "memptr.virtualfn");
769 }
770 }
771 assert(VirtualFn && "Virtual fuction pointer not created!");
772 assert((!ShouldEmitCFICheck || !ShouldEmitVFEInfo || !ShouldEmitWPDInfo ||
773 CheckResult) &&
774 "Check result required but not created!");
775
776 if (ShouldEmitCFICheck) {
777 // If doing CFI, emit the check.
778 CheckSourceLocation = CGF.EmitCheckSourceLocation(E->getBeginLoc());
779 CheckTypeDesc = CGF.EmitCheckTypeDescriptor(QualType(MPT, 0));
780 llvm::Constant *StaticData[] = {
781 llvm::ConstantInt::get(CGF.Int8Ty, CodeGenFunction::CFITCK_VMFCall),
782 CheckSourceLocation,
783 CheckTypeDesc,
784 };
785
786 if (CGM.getCodeGenOpts().SanitizeTrap.has(SanitizerKind::CFIMFCall)) {
787 CGF.EmitTrapCheck(CheckResult, SanitizerHandler::CFICheckFail);
788 } else {
789 llvm::Value *AllVtables = llvm::MetadataAsValue::get(
790 CGM.getLLVMContext(),
791 llvm::MDString::get(CGM.getLLVMContext(), "all-vtables"));
792 llvm::Value *ValidVtable = Builder.CreateCall(
793 CGM.getIntrinsic(llvm::Intrinsic::type_test), {VTable, AllVtables});
794 CGF.EmitCheck(std::make_pair(CheckResult, SanitizerKind::CFIMFCall),
795 SanitizerHandler::CFICheckFail, StaticData,
796 {VTable, ValidVtable});
797 }
798
799 FnVirtual = Builder.GetInsertBlock();
800 }
801 } // End of sanitizer scope
802
803 CGF.EmitBranch(FnEnd);
804
805 // In the non-virtual path, the function pointer is actually a
806 // function pointer.
807 CGF.EmitBlock(FnNonVirtual);
808 llvm::Value *NonVirtualFn =
809 Builder.CreateIntToPtr(FnAsInt, FTy->getPointerTo(), "memptr.nonvirtualfn");
810
811 // Check the function pointer if CFI on member function pointers is enabled.
812 if (ShouldEmitCFICheck) {
813 CXXRecordDecl *RD = MPT->getClass()->getAsCXXRecordDecl();
814 if (RD->hasDefinition()) {
815 CodeGenFunction::SanitizerScope SanScope(&CGF);
816
817 llvm::Constant *StaticData[] = {
818 llvm::ConstantInt::get(CGF.Int8Ty, CodeGenFunction::CFITCK_NVMFCall),
819 CheckSourceLocation,
820 CheckTypeDesc,
821 };
822
823 llvm::Value *Bit = Builder.getFalse();
824 llvm::Value *CastedNonVirtualFn =
825 Builder.CreateBitCast(NonVirtualFn, CGF.Int8PtrTy);
826 for (const CXXRecordDecl *Base : CGM.getMostBaseClasses(RD)) {
827 llvm::Metadata *MD = CGM.CreateMetadataIdentifierForType(
828 getContext().getMemberPointerType(
829 MPT->getPointeeType(),
830 getContext().getRecordType(Base).getTypePtr()));
831 llvm::Value *TypeId =
832 llvm::MetadataAsValue::get(CGF.getLLVMContext(), MD);
833
834 llvm::Value *TypeTest =
835 Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::type_test),
836 {CastedNonVirtualFn, TypeId});
837 Bit = Builder.CreateOr(Bit, TypeTest);
838 }
839
840 CGF.EmitCheck(std::make_pair(Bit, SanitizerKind::CFIMFCall),
841 SanitizerHandler::CFICheckFail, StaticData,
842 {CastedNonVirtualFn, llvm::UndefValue::get(CGF.IntPtrTy)});
843
844 FnNonVirtual = Builder.GetInsertBlock();
845 }
846 }
847
848 // We're done.
849 CGF.EmitBlock(FnEnd);
850 llvm::PHINode *CalleePtr = Builder.CreatePHI(FTy->getPointerTo(), 2);
851 CalleePtr->addIncoming(VirtualFn, FnVirtual);
852 CalleePtr->addIncoming(NonVirtualFn, FnNonVirtual);
853
854 CGCallee Callee(FPT, CalleePtr);
855 return Callee;
856 }
857
858 /// Compute an l-value by applying the given pointer-to-member to a
859 /// base object.
EmitMemberDataPointerAddress(CodeGenFunction & CGF,const Expr * E,Address Base,llvm::Value * MemPtr,const MemberPointerType * MPT)860 llvm::Value *ItaniumCXXABI::EmitMemberDataPointerAddress(
861 CodeGenFunction &CGF, const Expr *E, Address Base, llvm::Value *MemPtr,
862 const MemberPointerType *MPT) {
863 assert(MemPtr->getType() == CGM.PtrDiffTy);
864
865 CGBuilderTy &Builder = CGF.Builder;
866
867 // Cast to char*.
868 Base = Builder.CreateElementBitCast(Base, CGF.Int8Ty);
869
870 // Apply the offset, which we assume is non-null.
871 llvm::Value *Addr = Builder.CreateInBoundsGEP(
872 Base.getElementType(), Base.getPointer(), MemPtr, "memptr.offset");
873
874 // Cast the address to the appropriate pointer type, adopting the
875 // address space of the base pointer.
876 llvm::Type *PType = CGF.ConvertTypeForMem(MPT->getPointeeType())
877 ->getPointerTo(Base.getAddressSpace());
878 return Builder.CreateBitCast(Addr, PType);
879 }
880
881 /// Perform a bitcast, derived-to-base, or base-to-derived member pointer
882 /// conversion.
883 ///
884 /// Bitcast conversions are always a no-op under Itanium.
885 ///
886 /// Obligatory offset/adjustment diagram:
887 /// <-- offset --> <-- adjustment -->
888 /// |--------------------------|----------------------|--------------------|
889 /// ^Derived address point ^Base address point ^Member address point
890 ///
891 /// So when converting a base member pointer to a derived member pointer,
892 /// we add the offset to the adjustment because the address point has
893 /// decreased; and conversely, when converting a derived MP to a base MP
894 /// we subtract the offset from the adjustment because the address point
895 /// has increased.
896 ///
897 /// The standard forbids (at compile time) conversion to and from
898 /// virtual bases, which is why we don't have to consider them here.
899 ///
900 /// The standard forbids (at run time) casting a derived MP to a base
901 /// MP when the derived MP does not point to a member of the base.
902 /// This is why -1 is a reasonable choice for null data member
903 /// pointers.
904 llvm::Value *
EmitMemberPointerConversion(CodeGenFunction & CGF,const CastExpr * E,llvm::Value * src)905 ItaniumCXXABI::EmitMemberPointerConversion(CodeGenFunction &CGF,
906 const CastExpr *E,
907 llvm::Value *src) {
908 assert(E->getCastKind() == CK_DerivedToBaseMemberPointer ||
909 E->getCastKind() == CK_BaseToDerivedMemberPointer ||
910 E->getCastKind() == CK_ReinterpretMemberPointer);
911
912 // Under Itanium, reinterprets don't require any additional processing.
913 if (E->getCastKind() == CK_ReinterpretMemberPointer) return src;
914
915 // Use constant emission if we can.
916 if (isa<llvm::Constant>(src))
917 return EmitMemberPointerConversion(E, cast<llvm::Constant>(src));
918
919 llvm::Constant *adj = getMemberPointerAdjustment(E);
920 if (!adj) return src;
921
922 CGBuilderTy &Builder = CGF.Builder;
923 bool isDerivedToBase = (E->getCastKind() == CK_DerivedToBaseMemberPointer);
924
925 const MemberPointerType *destTy =
926 E->getType()->castAs<MemberPointerType>();
927
928 // For member data pointers, this is just a matter of adding the
929 // offset if the source is non-null.
930 if (destTy->isMemberDataPointer()) {
931 llvm::Value *dst;
932 if (isDerivedToBase)
933 dst = Builder.CreateNSWSub(src, adj, "adj");
934 else
935 dst = Builder.CreateNSWAdd(src, adj, "adj");
936
937 // Null check.
938 llvm::Value *null = llvm::Constant::getAllOnesValue(src->getType());
939 llvm::Value *isNull = Builder.CreateICmpEQ(src, null, "memptr.isnull");
940 return Builder.CreateSelect(isNull, src, dst);
941 }
942
943 // The this-adjustment is left-shifted by 1 on ARM.
944 if (UseARMMethodPtrABI) {
945 uint64_t offset = cast<llvm::ConstantInt>(adj)->getZExtValue();
946 offset <<= 1;
947 adj = llvm::ConstantInt::get(adj->getType(), offset);
948 }
949
950 llvm::Value *srcAdj = Builder.CreateExtractValue(src, 1, "src.adj");
951 llvm::Value *dstAdj;
952 if (isDerivedToBase)
953 dstAdj = Builder.CreateNSWSub(srcAdj, adj, "adj");
954 else
955 dstAdj = Builder.CreateNSWAdd(srcAdj, adj, "adj");
956
957 return Builder.CreateInsertValue(src, dstAdj, 1);
958 }
959
960 llvm::Constant *
EmitMemberPointerConversion(const CastExpr * E,llvm::Constant * src)961 ItaniumCXXABI::EmitMemberPointerConversion(const CastExpr *E,
962 llvm::Constant *src) {
963 assert(E->getCastKind() == CK_DerivedToBaseMemberPointer ||
964 E->getCastKind() == CK_BaseToDerivedMemberPointer ||
965 E->getCastKind() == CK_ReinterpretMemberPointer);
966
967 // Under Itanium, reinterprets don't require any additional processing.
968 if (E->getCastKind() == CK_ReinterpretMemberPointer) return src;
969
970 // If the adjustment is trivial, we don't need to do anything.
971 llvm::Constant *adj = getMemberPointerAdjustment(E);
972 if (!adj) return src;
973
974 bool isDerivedToBase = (E->getCastKind() == CK_DerivedToBaseMemberPointer);
975
976 const MemberPointerType *destTy =
977 E->getType()->castAs<MemberPointerType>();
978
979 // For member data pointers, this is just a matter of adding the
980 // offset if the source is non-null.
981 if (destTy->isMemberDataPointer()) {
982 // null maps to null.
983 if (src->isAllOnesValue()) return src;
984
985 if (isDerivedToBase)
986 return llvm::ConstantExpr::getNSWSub(src, adj);
987 else
988 return llvm::ConstantExpr::getNSWAdd(src, adj);
989 }
990
991 // The this-adjustment is left-shifted by 1 on ARM.
992 if (UseARMMethodPtrABI) {
993 uint64_t offset = cast<llvm::ConstantInt>(adj)->getZExtValue();
994 offset <<= 1;
995 adj = llvm::ConstantInt::get(adj->getType(), offset);
996 }
997
998 llvm::Constant *srcAdj = llvm::ConstantExpr::getExtractValue(src, 1);
999 llvm::Constant *dstAdj;
1000 if (isDerivedToBase)
1001 dstAdj = llvm::ConstantExpr::getNSWSub(srcAdj, adj);
1002 else
1003 dstAdj = llvm::ConstantExpr::getNSWAdd(srcAdj, adj);
1004
1005 return llvm::ConstantExpr::getInsertValue(src, dstAdj, 1);
1006 }
1007
1008 llvm::Constant *
EmitNullMemberPointer(const MemberPointerType * MPT)1009 ItaniumCXXABI::EmitNullMemberPointer(const MemberPointerType *MPT) {
1010 // Itanium C++ ABI 2.3:
1011 // A NULL pointer is represented as -1.
1012 if (MPT->isMemberDataPointer())
1013 return llvm::ConstantInt::get(CGM.PtrDiffTy, -1ULL, /*isSigned=*/true);
1014
1015 llvm::Constant *Zero = llvm::ConstantInt::get(CGM.PtrDiffTy, 0);
1016 llvm::Constant *Values[2] = { Zero, Zero };
1017 return llvm::ConstantStruct::getAnon(Values);
1018 }
1019
1020 llvm::Constant *
EmitMemberDataPointer(const MemberPointerType * MPT,CharUnits offset)1021 ItaniumCXXABI::EmitMemberDataPointer(const MemberPointerType *MPT,
1022 CharUnits offset) {
1023 // Itanium C++ ABI 2.3:
1024 // A pointer to data member is an offset from the base address of
1025 // the class object containing it, represented as a ptrdiff_t
1026 return llvm::ConstantInt::get(CGM.PtrDiffTy, offset.getQuantity());
1027 }
1028
1029 llvm::Constant *
EmitMemberFunctionPointer(const CXXMethodDecl * MD)1030 ItaniumCXXABI::EmitMemberFunctionPointer(const CXXMethodDecl *MD) {
1031 return BuildMemberPointer(MD, CharUnits::Zero());
1032 }
1033
BuildMemberPointer(const CXXMethodDecl * MD,CharUnits ThisAdjustment)1034 llvm::Constant *ItaniumCXXABI::BuildMemberPointer(const CXXMethodDecl *MD,
1035 CharUnits ThisAdjustment) {
1036 assert(MD->isInstance() && "Member function must not be static!");
1037
1038 CodeGenTypes &Types = CGM.getTypes();
1039
1040 // Get the function pointer (or index if this is a virtual function).
1041 llvm::Constant *MemPtr[2];
1042 if (MD->isVirtual()) {
1043 uint64_t Index = CGM.getItaniumVTableContext().getMethodVTableIndex(MD);
1044 uint64_t VTableOffset;
1045 if (CGM.getItaniumVTableContext().isRelativeLayout()) {
1046 // Multiply by 4-byte relative offsets.
1047 VTableOffset = Index * 4;
1048 } else {
1049 const ASTContext &Context = getContext();
1050 CharUnits PointerWidth = Context.toCharUnitsFromBits(
1051 Context.getTargetInfo().getPointerWidth(0));
1052 VTableOffset = Index * PointerWidth.getQuantity();
1053 }
1054
1055 if (UseARMMethodPtrABI) {
1056 // ARM C++ ABI 3.2.1:
1057 // This ABI specifies that adj contains twice the this
1058 // adjustment, plus 1 if the member function is virtual. The
1059 // least significant bit of adj then makes exactly the same
1060 // discrimination as the least significant bit of ptr does for
1061 // Itanium.
1062 MemPtr[0] = llvm::ConstantInt::get(CGM.PtrDiffTy, VTableOffset);
1063 MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy,
1064 2 * ThisAdjustment.getQuantity() + 1);
1065 } else {
1066 // Itanium C++ ABI 2.3:
1067 // For a virtual function, [the pointer field] is 1 plus the
1068 // virtual table offset (in bytes) of the function,
1069 // represented as a ptrdiff_t.
1070 MemPtr[0] = llvm::ConstantInt::get(CGM.PtrDiffTy, VTableOffset + 1);
1071 MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy,
1072 ThisAdjustment.getQuantity());
1073 }
1074 } else {
1075 const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
1076 llvm::Type *Ty;
1077 // Check whether the function has a computable LLVM signature.
1078 if (Types.isFuncTypeConvertible(FPT)) {
1079 // The function has a computable LLVM signature; use the correct type.
1080 Ty = Types.GetFunctionType(Types.arrangeCXXMethodDeclaration(MD));
1081 } else {
1082 // Use an arbitrary non-function type to tell GetAddrOfFunction that the
1083 // function type is incomplete.
1084 Ty = CGM.PtrDiffTy;
1085 }
1086 llvm::Constant *addr = CGM.GetAddrOfFunction(MD, Ty);
1087
1088 MemPtr[0] = llvm::ConstantExpr::getPtrToInt(addr, CGM.PtrDiffTy);
1089 MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy,
1090 (UseARMMethodPtrABI ? 2 : 1) *
1091 ThisAdjustment.getQuantity());
1092 }
1093
1094 return llvm::ConstantStruct::getAnon(MemPtr);
1095 }
1096
EmitMemberPointer(const APValue & MP,QualType MPType)1097 llvm::Constant *ItaniumCXXABI::EmitMemberPointer(const APValue &MP,
1098 QualType MPType) {
1099 const MemberPointerType *MPT = MPType->castAs<MemberPointerType>();
1100 const ValueDecl *MPD = MP.getMemberPointerDecl();
1101 if (!MPD)
1102 return EmitNullMemberPointer(MPT);
1103
1104 CharUnits ThisAdjustment = getContext().getMemberPointerPathAdjustment(MP);
1105
1106 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(MPD))
1107 return BuildMemberPointer(MD, ThisAdjustment);
1108
1109 CharUnits FieldOffset =
1110 getContext().toCharUnitsFromBits(getContext().getFieldOffset(MPD));
1111 return EmitMemberDataPointer(MPT, ThisAdjustment + FieldOffset);
1112 }
1113
1114 /// The comparison algorithm is pretty easy: the member pointers are
1115 /// the same if they're either bitwise identical *or* both null.
1116 ///
1117 /// ARM is different here only because null-ness is more complicated.
1118 llvm::Value *
EmitMemberPointerComparison(CodeGenFunction & CGF,llvm::Value * L,llvm::Value * R,const MemberPointerType * MPT,bool Inequality)1119 ItaniumCXXABI::EmitMemberPointerComparison(CodeGenFunction &CGF,
1120 llvm::Value *L,
1121 llvm::Value *R,
1122 const MemberPointerType *MPT,
1123 bool Inequality) {
1124 CGBuilderTy &Builder = CGF.Builder;
1125
1126 llvm::ICmpInst::Predicate Eq;
1127 llvm::Instruction::BinaryOps And, Or;
1128 if (Inequality) {
1129 Eq = llvm::ICmpInst::ICMP_NE;
1130 And = llvm::Instruction::Or;
1131 Or = llvm::Instruction::And;
1132 } else {
1133 Eq = llvm::ICmpInst::ICMP_EQ;
1134 And = llvm::Instruction::And;
1135 Or = llvm::Instruction::Or;
1136 }
1137
1138 // Member data pointers are easy because there's a unique null
1139 // value, so it just comes down to bitwise equality.
1140 if (MPT->isMemberDataPointer())
1141 return Builder.CreateICmp(Eq, L, R);
1142
1143 // For member function pointers, the tautologies are more complex.
1144 // The Itanium tautology is:
1145 // (L == R) <==> (L.ptr == R.ptr && (L.ptr == 0 || L.adj == R.adj))
1146 // The ARM tautology is:
1147 // (L == R) <==> (L.ptr == R.ptr &&
1148 // (L.adj == R.adj ||
1149 // (L.ptr == 0 && ((L.adj|R.adj) & 1) == 0)))
1150 // The inequality tautologies have exactly the same structure, except
1151 // applying De Morgan's laws.
1152
1153 llvm::Value *LPtr = Builder.CreateExtractValue(L, 0, "lhs.memptr.ptr");
1154 llvm::Value *RPtr = Builder.CreateExtractValue(R, 0, "rhs.memptr.ptr");
1155
1156 // This condition tests whether L.ptr == R.ptr. This must always be
1157 // true for equality to hold.
1158 llvm::Value *PtrEq = Builder.CreateICmp(Eq, LPtr, RPtr, "cmp.ptr");
1159
1160 // This condition, together with the assumption that L.ptr == R.ptr,
1161 // tests whether the pointers are both null. ARM imposes an extra
1162 // condition.
1163 llvm::Value *Zero = llvm::Constant::getNullValue(LPtr->getType());
1164 llvm::Value *EqZero = Builder.CreateICmp(Eq, LPtr, Zero, "cmp.ptr.null");
1165
1166 // This condition tests whether L.adj == R.adj. If this isn't
1167 // true, the pointers are unequal unless they're both null.
1168 llvm::Value *LAdj = Builder.CreateExtractValue(L, 1, "lhs.memptr.adj");
1169 llvm::Value *RAdj = Builder.CreateExtractValue(R, 1, "rhs.memptr.adj");
1170 llvm::Value *AdjEq = Builder.CreateICmp(Eq, LAdj, RAdj, "cmp.adj");
1171
1172 // Null member function pointers on ARM clear the low bit of Adj,
1173 // so the zero condition has to check that neither low bit is set.
1174 if (UseARMMethodPtrABI) {
1175 llvm::Value *One = llvm::ConstantInt::get(LPtr->getType(), 1);
1176
1177 // Compute (l.adj | r.adj) & 1 and test it against zero.
1178 llvm::Value *OrAdj = Builder.CreateOr(LAdj, RAdj, "or.adj");
1179 llvm::Value *OrAdjAnd1 = Builder.CreateAnd(OrAdj, One);
1180 llvm::Value *OrAdjAnd1EqZero = Builder.CreateICmp(Eq, OrAdjAnd1, Zero,
1181 "cmp.or.adj");
1182 EqZero = Builder.CreateBinOp(And, EqZero, OrAdjAnd1EqZero);
1183 }
1184
1185 // Tie together all our conditions.
1186 llvm::Value *Result = Builder.CreateBinOp(Or, EqZero, AdjEq);
1187 Result = Builder.CreateBinOp(And, PtrEq, Result,
1188 Inequality ? "memptr.ne" : "memptr.eq");
1189 return Result;
1190 }
1191
1192 llvm::Value *
EmitMemberPointerIsNotNull(CodeGenFunction & CGF,llvm::Value * MemPtr,const MemberPointerType * MPT)1193 ItaniumCXXABI::EmitMemberPointerIsNotNull(CodeGenFunction &CGF,
1194 llvm::Value *MemPtr,
1195 const MemberPointerType *MPT) {
1196 CGBuilderTy &Builder = CGF.Builder;
1197
1198 /// For member data pointers, this is just a check against -1.
1199 if (MPT->isMemberDataPointer()) {
1200 assert(MemPtr->getType() == CGM.PtrDiffTy);
1201 llvm::Value *NegativeOne =
1202 llvm::Constant::getAllOnesValue(MemPtr->getType());
1203 return Builder.CreateICmpNE(MemPtr, NegativeOne, "memptr.tobool");
1204 }
1205
1206 // In Itanium, a member function pointer is not null if 'ptr' is not null.
1207 llvm::Value *Ptr = Builder.CreateExtractValue(MemPtr, 0, "memptr.ptr");
1208
1209 llvm::Constant *Zero = llvm::ConstantInt::get(Ptr->getType(), 0);
1210 llvm::Value *Result = Builder.CreateICmpNE(Ptr, Zero, "memptr.tobool");
1211
1212 // On ARM, a member function pointer is also non-null if the low bit of 'adj'
1213 // (the virtual bit) is set.
1214 if (UseARMMethodPtrABI) {
1215 llvm::Constant *One = llvm::ConstantInt::get(Ptr->getType(), 1);
1216 llvm::Value *Adj = Builder.CreateExtractValue(MemPtr, 1, "memptr.adj");
1217 llvm::Value *VirtualBit = Builder.CreateAnd(Adj, One, "memptr.virtualbit");
1218 llvm::Value *IsVirtual = Builder.CreateICmpNE(VirtualBit, Zero,
1219 "memptr.isvirtual");
1220 Result = Builder.CreateOr(Result, IsVirtual);
1221 }
1222
1223 return Result;
1224 }
1225
classifyReturnType(CGFunctionInfo & FI) const1226 bool ItaniumCXXABI::classifyReturnType(CGFunctionInfo &FI) const {
1227 const CXXRecordDecl *RD = FI.getReturnType()->getAsCXXRecordDecl();
1228 if (!RD)
1229 return false;
1230
1231 // If C++ prohibits us from making a copy, return by address.
1232 if (!RD->canPassInRegisters()) {
1233 auto Align = CGM.getContext().getTypeAlignInChars(FI.getReturnType());
1234 FI.getReturnInfo() = ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
1235 return true;
1236 }
1237 return false;
1238 }
1239
1240 /// The Itanium ABI requires non-zero initialization only for data
1241 /// member pointers, for which '0' is a valid offset.
isZeroInitializable(const MemberPointerType * MPT)1242 bool ItaniumCXXABI::isZeroInitializable(const MemberPointerType *MPT) {
1243 return MPT->isMemberFunctionPointer();
1244 }
1245
1246 /// The Itanium ABI always places an offset to the complete object
1247 /// at entry -2 in the vtable.
emitVirtualObjectDelete(CodeGenFunction & CGF,const CXXDeleteExpr * DE,Address Ptr,QualType ElementType,const CXXDestructorDecl * Dtor)1248 void ItaniumCXXABI::emitVirtualObjectDelete(CodeGenFunction &CGF,
1249 const CXXDeleteExpr *DE,
1250 Address Ptr,
1251 QualType ElementType,
1252 const CXXDestructorDecl *Dtor) {
1253 bool UseGlobalDelete = DE->isGlobalDelete();
1254 if (UseGlobalDelete) {
1255 // Derive the complete-object pointer, which is what we need
1256 // to pass to the deallocation function.
1257
1258 // Grab the vtable pointer as an intptr_t*.
1259 auto *ClassDecl =
1260 cast<CXXRecordDecl>(ElementType->castAs<RecordType>()->getDecl());
1261 llvm::Value *VTable =
1262 CGF.GetVTablePtr(Ptr, CGF.IntPtrTy->getPointerTo(), ClassDecl);
1263
1264 // Track back to entry -2 and pull out the offset there.
1265 llvm::Value *OffsetPtr = CGF.Builder.CreateConstInBoundsGEP1_64(
1266 CGF.IntPtrTy, VTable, -2, "complete-offset.ptr");
1267 llvm::Value *Offset = CGF.Builder.CreateAlignedLoad(CGF.IntPtrTy, OffsetPtr, CGF.getPointerAlign());
1268
1269 // Apply the offset.
1270 llvm::Value *CompletePtr =
1271 CGF.Builder.CreateBitCast(Ptr.getPointer(), CGF.Int8PtrTy);
1272 CompletePtr =
1273 CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, CompletePtr, Offset);
1274
1275 // If we're supposed to call the global delete, make sure we do so
1276 // even if the destructor throws.
1277 CGF.pushCallObjectDeleteCleanup(DE->getOperatorDelete(), CompletePtr,
1278 ElementType);
1279 }
1280
1281 // FIXME: Provide a source location here even though there's no
1282 // CXXMemberCallExpr for dtor call.
1283 CXXDtorType DtorType = UseGlobalDelete ? Dtor_Complete : Dtor_Deleting;
1284 EmitVirtualDestructorCall(CGF, Dtor, DtorType, Ptr, DE);
1285
1286 if (UseGlobalDelete)
1287 CGF.PopCleanupBlock();
1288 }
1289
emitRethrow(CodeGenFunction & CGF,bool isNoReturn)1290 void ItaniumCXXABI::emitRethrow(CodeGenFunction &CGF, bool isNoReturn) {
1291 // void __cxa_rethrow();
1292
1293 llvm::FunctionType *FTy =
1294 llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
1295
1296 llvm::FunctionCallee Fn = CGM.CreateRuntimeFunction(FTy, "__cxa_rethrow");
1297
1298 if (isNoReturn)
1299 CGF.EmitNoreturnRuntimeCallOrInvoke(Fn, None);
1300 else
1301 CGF.EmitRuntimeCallOrInvoke(Fn);
1302 }
1303
getAllocateExceptionFn(CodeGenModule & CGM)1304 static llvm::FunctionCallee getAllocateExceptionFn(CodeGenModule &CGM) {
1305 // void *__cxa_allocate_exception(size_t thrown_size);
1306
1307 llvm::FunctionType *FTy =
1308 llvm::FunctionType::get(CGM.Int8PtrTy, CGM.SizeTy, /*isVarArg=*/false);
1309
1310 return CGM.CreateRuntimeFunction(FTy, "__cxa_allocate_exception");
1311 }
1312
getThrowFn(CodeGenModule & CGM)1313 static llvm::FunctionCallee getThrowFn(CodeGenModule &CGM) {
1314 // void __cxa_throw(void *thrown_exception, std::type_info *tinfo,
1315 // void (*dest) (void *));
1316
1317 llvm::Type *Args[3] = { CGM.Int8PtrTy, CGM.Int8PtrTy, CGM.Int8PtrTy };
1318 llvm::FunctionType *FTy =
1319 llvm::FunctionType::get(CGM.VoidTy, Args, /*isVarArg=*/false);
1320
1321 return CGM.CreateRuntimeFunction(FTy, "__cxa_throw");
1322 }
1323
emitThrow(CodeGenFunction & CGF,const CXXThrowExpr * E)1324 void ItaniumCXXABI::emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) {
1325 QualType ThrowType = E->getSubExpr()->getType();
1326 // Now allocate the exception object.
1327 llvm::Type *SizeTy = CGF.ConvertType(getContext().getSizeType());
1328 uint64_t TypeSize = getContext().getTypeSizeInChars(ThrowType).getQuantity();
1329
1330 llvm::FunctionCallee AllocExceptionFn = getAllocateExceptionFn(CGM);
1331 llvm::CallInst *ExceptionPtr = CGF.EmitNounwindRuntimeCall(
1332 AllocExceptionFn, llvm::ConstantInt::get(SizeTy, TypeSize), "exception");
1333
1334 CharUnits ExnAlign = CGF.getContext().getExnObjectAlignment();
1335 CGF.EmitAnyExprToExn(E->getSubExpr(), Address(ExceptionPtr, ExnAlign));
1336
1337 // Now throw the exception.
1338 llvm::Constant *TypeInfo = CGM.GetAddrOfRTTIDescriptor(ThrowType,
1339 /*ForEH=*/true);
1340
1341 // The address of the destructor. If the exception type has a
1342 // trivial destructor (or isn't a record), we just pass null.
1343 llvm::Constant *Dtor = nullptr;
1344 if (const RecordType *RecordTy = ThrowType->getAs<RecordType>()) {
1345 CXXRecordDecl *Record = cast<CXXRecordDecl>(RecordTy->getDecl());
1346 if (!Record->hasTrivialDestructor()) {
1347 CXXDestructorDecl *DtorD = Record->getDestructor();
1348 Dtor = CGM.getAddrOfCXXStructor(GlobalDecl(DtorD, Dtor_Complete));
1349 Dtor = llvm::ConstantExpr::getBitCast(Dtor, CGM.Int8PtrTy);
1350 }
1351 }
1352 if (!Dtor) Dtor = llvm::Constant::getNullValue(CGM.Int8PtrTy);
1353
1354 llvm::Value *args[] = { ExceptionPtr, TypeInfo, Dtor };
1355 CGF.EmitNoreturnRuntimeCallOrInvoke(getThrowFn(CGM), args);
1356 }
1357
getItaniumDynamicCastFn(CodeGenFunction & CGF)1358 static llvm::FunctionCallee getItaniumDynamicCastFn(CodeGenFunction &CGF) {
1359 // void *__dynamic_cast(const void *sub,
1360 // const abi::__class_type_info *src,
1361 // const abi::__class_type_info *dst,
1362 // std::ptrdiff_t src2dst_offset);
1363
1364 llvm::Type *Int8PtrTy = CGF.Int8PtrTy;
1365 llvm::Type *PtrDiffTy =
1366 CGF.ConvertType(CGF.getContext().getPointerDiffType());
1367
1368 llvm::Type *Args[4] = { Int8PtrTy, Int8PtrTy, Int8PtrTy, PtrDiffTy };
1369
1370 llvm::FunctionType *FTy = llvm::FunctionType::get(Int8PtrTy, Args, false);
1371
1372 // Mark the function as nounwind readonly.
1373 llvm::Attribute::AttrKind FuncAttrs[] = { llvm::Attribute::NoUnwind,
1374 llvm::Attribute::ReadOnly };
1375 llvm::AttributeList Attrs = llvm::AttributeList::get(
1376 CGF.getLLVMContext(), llvm::AttributeList::FunctionIndex, FuncAttrs);
1377
1378 return CGF.CGM.CreateRuntimeFunction(FTy, "__dynamic_cast", Attrs);
1379 }
1380
getBadCastFn(CodeGenFunction & CGF)1381 static llvm::FunctionCallee getBadCastFn(CodeGenFunction &CGF) {
1382 // void __cxa_bad_cast();
1383 llvm::FunctionType *FTy = llvm::FunctionType::get(CGF.VoidTy, false);
1384 return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_cast");
1385 }
1386
1387 /// Compute the src2dst_offset hint as described in the
1388 /// Itanium C++ ABI [2.9.7]
computeOffsetHint(ASTContext & Context,const CXXRecordDecl * Src,const CXXRecordDecl * Dst)1389 static CharUnits computeOffsetHint(ASTContext &Context,
1390 const CXXRecordDecl *Src,
1391 const CXXRecordDecl *Dst) {
1392 CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true,
1393 /*DetectVirtual=*/false);
1394
1395 // If Dst is not derived from Src we can skip the whole computation below and
1396 // return that Src is not a public base of Dst. Record all inheritance paths.
1397 if (!Dst->isDerivedFrom(Src, Paths))
1398 return CharUnits::fromQuantity(-2ULL);
1399
1400 unsigned NumPublicPaths = 0;
1401 CharUnits Offset;
1402
1403 // Now walk all possible inheritance paths.
1404 for (const CXXBasePath &Path : Paths) {
1405 if (Path.Access != AS_public) // Ignore non-public inheritance.
1406 continue;
1407
1408 ++NumPublicPaths;
1409
1410 for (const CXXBasePathElement &PathElement : Path) {
1411 // If the path contains a virtual base class we can't give any hint.
1412 // -1: no hint.
1413 if (PathElement.Base->isVirtual())
1414 return CharUnits::fromQuantity(-1ULL);
1415
1416 if (NumPublicPaths > 1) // Won't use offsets, skip computation.
1417 continue;
1418
1419 // Accumulate the base class offsets.
1420 const ASTRecordLayout &L = Context.getASTRecordLayout(PathElement.Class);
1421 Offset += L.getBaseClassOffset(
1422 PathElement.Base->getType()->getAsCXXRecordDecl());
1423 }
1424 }
1425
1426 // -2: Src is not a public base of Dst.
1427 if (NumPublicPaths == 0)
1428 return CharUnits::fromQuantity(-2ULL);
1429
1430 // -3: Src is a multiple public base type but never a virtual base type.
1431 if (NumPublicPaths > 1)
1432 return CharUnits::fromQuantity(-3ULL);
1433
1434 // Otherwise, the Src type is a unique public nonvirtual base type of Dst.
1435 // Return the offset of Src from the origin of Dst.
1436 return Offset;
1437 }
1438
getBadTypeidFn(CodeGenFunction & CGF)1439 static llvm::FunctionCallee getBadTypeidFn(CodeGenFunction &CGF) {
1440 // void __cxa_bad_typeid();
1441 llvm::FunctionType *FTy = llvm::FunctionType::get(CGF.VoidTy, false);
1442
1443 return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_typeid");
1444 }
1445
shouldTypeidBeNullChecked(bool IsDeref,QualType SrcRecordTy)1446 bool ItaniumCXXABI::shouldTypeidBeNullChecked(bool IsDeref,
1447 QualType SrcRecordTy) {
1448 return IsDeref;
1449 }
1450
EmitBadTypeidCall(CodeGenFunction & CGF)1451 void ItaniumCXXABI::EmitBadTypeidCall(CodeGenFunction &CGF) {
1452 llvm::FunctionCallee Fn = getBadTypeidFn(CGF);
1453 llvm::CallBase *Call = CGF.EmitRuntimeCallOrInvoke(Fn);
1454 Call->setDoesNotReturn();
1455 CGF.Builder.CreateUnreachable();
1456 }
1457
EmitTypeid(CodeGenFunction & CGF,QualType SrcRecordTy,Address ThisPtr,llvm::Type * StdTypeInfoPtrTy)1458 llvm::Value *ItaniumCXXABI::EmitTypeid(CodeGenFunction &CGF,
1459 QualType SrcRecordTy,
1460 Address ThisPtr,
1461 llvm::Type *StdTypeInfoPtrTy) {
1462 auto *ClassDecl =
1463 cast<CXXRecordDecl>(SrcRecordTy->castAs<RecordType>()->getDecl());
1464 llvm::Value *Value =
1465 CGF.GetVTablePtr(ThisPtr, StdTypeInfoPtrTy->getPointerTo(), ClassDecl);
1466
1467 if (CGM.getItaniumVTableContext().isRelativeLayout()) {
1468 // Load the type info.
1469 Value = CGF.Builder.CreateBitCast(Value, CGM.Int8PtrTy);
1470 Value = CGF.Builder.CreateCall(
1471 CGM.getIntrinsic(llvm::Intrinsic::load_relative, {CGM.Int32Ty}),
1472 {Value, llvm::ConstantInt::get(CGM.Int32Ty, -4)});
1473
1474 // Setup to dereference again since this is a proxy we accessed.
1475 Value = CGF.Builder.CreateBitCast(Value, StdTypeInfoPtrTy->getPointerTo());
1476 } else {
1477 // Load the type info.
1478 Value =
1479 CGF.Builder.CreateConstInBoundsGEP1_64(StdTypeInfoPtrTy, Value, -1ULL);
1480 }
1481 return CGF.Builder.CreateAlignedLoad(StdTypeInfoPtrTy, Value,
1482 CGF.getPointerAlign());
1483 }
1484
shouldDynamicCastCallBeNullChecked(bool SrcIsPtr,QualType SrcRecordTy)1485 bool ItaniumCXXABI::shouldDynamicCastCallBeNullChecked(bool SrcIsPtr,
1486 QualType SrcRecordTy) {
1487 return SrcIsPtr;
1488 }
1489
EmitDynamicCastCall(CodeGenFunction & CGF,Address ThisAddr,QualType SrcRecordTy,QualType DestTy,QualType DestRecordTy,llvm::BasicBlock * CastEnd)1490 llvm::Value *ItaniumCXXABI::EmitDynamicCastCall(
1491 CodeGenFunction &CGF, Address ThisAddr, QualType SrcRecordTy,
1492 QualType DestTy, QualType DestRecordTy, llvm::BasicBlock *CastEnd) {
1493 llvm::Type *PtrDiffLTy =
1494 CGF.ConvertType(CGF.getContext().getPointerDiffType());
1495 llvm::Type *DestLTy = CGF.ConvertType(DestTy);
1496
1497 llvm::Value *SrcRTTI =
1498 CGF.CGM.GetAddrOfRTTIDescriptor(SrcRecordTy.getUnqualifiedType());
1499 llvm::Value *DestRTTI =
1500 CGF.CGM.GetAddrOfRTTIDescriptor(DestRecordTy.getUnqualifiedType());
1501
1502 // Compute the offset hint.
1503 const CXXRecordDecl *SrcDecl = SrcRecordTy->getAsCXXRecordDecl();
1504 const CXXRecordDecl *DestDecl = DestRecordTy->getAsCXXRecordDecl();
1505 llvm::Value *OffsetHint = llvm::ConstantInt::get(
1506 PtrDiffLTy,
1507 computeOffsetHint(CGF.getContext(), SrcDecl, DestDecl).getQuantity());
1508
1509 // Emit the call to __dynamic_cast.
1510 llvm::Value *Value = ThisAddr.getPointer();
1511 Value = CGF.EmitCastToVoidPtr(Value);
1512
1513 llvm::Value *args[] = {Value, SrcRTTI, DestRTTI, OffsetHint};
1514 Value = CGF.EmitNounwindRuntimeCall(getItaniumDynamicCastFn(CGF), args);
1515 Value = CGF.Builder.CreateBitCast(Value, DestLTy);
1516
1517 /// C++ [expr.dynamic.cast]p9:
1518 /// A failed cast to reference type throws std::bad_cast
1519 if (DestTy->isReferenceType()) {
1520 llvm::BasicBlock *BadCastBlock =
1521 CGF.createBasicBlock("dynamic_cast.bad_cast");
1522
1523 llvm::Value *IsNull = CGF.Builder.CreateIsNull(Value);
1524 CGF.Builder.CreateCondBr(IsNull, BadCastBlock, CastEnd);
1525
1526 CGF.EmitBlock(BadCastBlock);
1527 EmitBadCastCall(CGF);
1528 }
1529
1530 return Value;
1531 }
1532
EmitDynamicCastToVoid(CodeGenFunction & CGF,Address ThisAddr,QualType SrcRecordTy,QualType DestTy)1533 llvm::Value *ItaniumCXXABI::EmitDynamicCastToVoid(CodeGenFunction &CGF,
1534 Address ThisAddr,
1535 QualType SrcRecordTy,
1536 QualType DestTy) {
1537 llvm::Type *DestLTy = CGF.ConvertType(DestTy);
1538 auto *ClassDecl =
1539 cast<CXXRecordDecl>(SrcRecordTy->castAs<RecordType>()->getDecl());
1540 llvm::Value *OffsetToTop;
1541 if (CGM.getItaniumVTableContext().isRelativeLayout()) {
1542 // Get the vtable pointer.
1543 llvm::Value *VTable =
1544 CGF.GetVTablePtr(ThisAddr, CGM.Int32Ty->getPointerTo(), ClassDecl);
1545
1546 // Get the offset-to-top from the vtable.
1547 OffsetToTop =
1548 CGF.Builder.CreateConstInBoundsGEP1_32(CGM.Int32Ty, VTable, -2U);
1549 OffsetToTop = CGF.Builder.CreateAlignedLoad(
1550 CGM.Int32Ty, OffsetToTop, CharUnits::fromQuantity(4), "offset.to.top");
1551 } else {
1552 llvm::Type *PtrDiffLTy =
1553 CGF.ConvertType(CGF.getContext().getPointerDiffType());
1554
1555 // Get the vtable pointer.
1556 llvm::Value *VTable =
1557 CGF.GetVTablePtr(ThisAddr, PtrDiffLTy->getPointerTo(), ClassDecl);
1558
1559 // Get the offset-to-top from the vtable.
1560 OffsetToTop =
1561 CGF.Builder.CreateConstInBoundsGEP1_64(PtrDiffLTy, VTable, -2ULL);
1562 OffsetToTop = CGF.Builder.CreateAlignedLoad(
1563 PtrDiffLTy, OffsetToTop, CGF.getPointerAlign(), "offset.to.top");
1564 }
1565 // Finally, add the offset to the pointer.
1566 llvm::Value *Value = ThisAddr.getPointer();
1567 Value = CGF.EmitCastToVoidPtr(Value);
1568 Value = CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, Value, OffsetToTop);
1569 return CGF.Builder.CreateBitCast(Value, DestLTy);
1570 }
1571
EmitBadCastCall(CodeGenFunction & CGF)1572 bool ItaniumCXXABI::EmitBadCastCall(CodeGenFunction &CGF) {
1573 llvm::FunctionCallee Fn = getBadCastFn(CGF);
1574 llvm::CallBase *Call = CGF.EmitRuntimeCallOrInvoke(Fn);
1575 Call->setDoesNotReturn();
1576 CGF.Builder.CreateUnreachable();
1577 return true;
1578 }
1579
1580 llvm::Value *
GetVirtualBaseClassOffset(CodeGenFunction & CGF,Address This,const CXXRecordDecl * ClassDecl,const CXXRecordDecl * BaseClassDecl)1581 ItaniumCXXABI::GetVirtualBaseClassOffset(CodeGenFunction &CGF,
1582 Address This,
1583 const CXXRecordDecl *ClassDecl,
1584 const CXXRecordDecl *BaseClassDecl) {
1585 llvm::Value *VTablePtr = CGF.GetVTablePtr(This, CGM.Int8PtrTy, ClassDecl);
1586 CharUnits VBaseOffsetOffset =
1587 CGM.getItaniumVTableContext().getVirtualBaseOffsetOffset(ClassDecl,
1588 BaseClassDecl);
1589 llvm::Value *VBaseOffsetPtr =
1590 CGF.Builder.CreateConstGEP1_64(
1591 CGF.Int8Ty, VTablePtr, VBaseOffsetOffset.getQuantity(),
1592 "vbase.offset.ptr");
1593
1594 llvm::Value *VBaseOffset;
1595 if (CGM.getItaniumVTableContext().isRelativeLayout()) {
1596 VBaseOffsetPtr =
1597 CGF.Builder.CreateBitCast(VBaseOffsetPtr, CGF.Int32Ty->getPointerTo());
1598 VBaseOffset = CGF.Builder.CreateAlignedLoad(
1599 CGF.Int32Ty, VBaseOffsetPtr, CharUnits::fromQuantity(4),
1600 "vbase.offset");
1601 } else {
1602 VBaseOffsetPtr = CGF.Builder.CreateBitCast(VBaseOffsetPtr,
1603 CGM.PtrDiffTy->getPointerTo());
1604 VBaseOffset = CGF.Builder.CreateAlignedLoad(
1605 CGM.PtrDiffTy, VBaseOffsetPtr, CGF.getPointerAlign(), "vbase.offset");
1606 }
1607 return VBaseOffset;
1608 }
1609
EmitCXXConstructors(const CXXConstructorDecl * D)1610 void ItaniumCXXABI::EmitCXXConstructors(const CXXConstructorDecl *D) {
1611 // Just make sure we're in sync with TargetCXXABI.
1612 assert(CGM.getTarget().getCXXABI().hasConstructorVariants());
1613
1614 // The constructor used for constructing this as a base class;
1615 // ignores virtual bases.
1616 CGM.EmitGlobal(GlobalDecl(D, Ctor_Base));
1617
1618 // The constructor used for constructing this as a complete class;
1619 // constructs the virtual bases, then calls the base constructor.
1620 if (!D->getParent()->isAbstract()) {
1621 // We don't need to emit the complete ctor if the class is abstract.
1622 CGM.EmitGlobal(GlobalDecl(D, Ctor_Complete));
1623 }
1624 }
1625
1626 CGCXXABI::AddedStructorArgCounts
buildStructorSignature(GlobalDecl GD,SmallVectorImpl<CanQualType> & ArgTys)1627 ItaniumCXXABI::buildStructorSignature(GlobalDecl GD,
1628 SmallVectorImpl<CanQualType> &ArgTys) {
1629 ASTContext &Context = getContext();
1630
1631 // All parameters are already in place except VTT, which goes after 'this'.
1632 // These are Clang types, so we don't need to worry about sret yet.
1633
1634 // Check if we need to add a VTT parameter (which has type void **).
1635 if ((isa<CXXConstructorDecl>(GD.getDecl()) ? GD.getCtorType() == Ctor_Base
1636 : GD.getDtorType() == Dtor_Base) &&
1637 cast<CXXMethodDecl>(GD.getDecl())->getParent()->getNumVBases() != 0) {
1638 ArgTys.insert(ArgTys.begin() + 1,
1639 Context.getPointerType(Context.VoidPtrTy));
1640 return AddedStructorArgCounts::prefix(1);
1641 }
1642 return AddedStructorArgCounts{};
1643 }
1644
EmitCXXDestructors(const CXXDestructorDecl * D)1645 void ItaniumCXXABI::EmitCXXDestructors(const CXXDestructorDecl *D) {
1646 // The destructor used for destructing this as a base class; ignores
1647 // virtual bases.
1648 CGM.EmitGlobal(GlobalDecl(D, Dtor_Base));
1649
1650 // The destructor used for destructing this as a most-derived class;
1651 // call the base destructor and then destructs any virtual bases.
1652 CGM.EmitGlobal(GlobalDecl(D, Dtor_Complete));
1653
1654 // The destructor in a virtual table is always a 'deleting'
1655 // destructor, which calls the complete destructor and then uses the
1656 // appropriate operator delete.
1657 if (D->isVirtual())
1658 CGM.EmitGlobal(GlobalDecl(D, Dtor_Deleting));
1659 }
1660
addImplicitStructorParams(CodeGenFunction & CGF,QualType & ResTy,FunctionArgList & Params)1661 void ItaniumCXXABI::addImplicitStructorParams(CodeGenFunction &CGF,
1662 QualType &ResTy,
1663 FunctionArgList &Params) {
1664 const CXXMethodDecl *MD = cast<CXXMethodDecl>(CGF.CurGD.getDecl());
1665 assert(isa<CXXConstructorDecl>(MD) || isa<CXXDestructorDecl>(MD));
1666
1667 // Check if we need a VTT parameter as well.
1668 if (NeedsVTTParameter(CGF.CurGD)) {
1669 ASTContext &Context = getContext();
1670
1671 // FIXME: avoid the fake decl
1672 QualType T = Context.getPointerType(Context.VoidPtrTy);
1673 auto *VTTDecl = ImplicitParamDecl::Create(
1674 Context, /*DC=*/nullptr, MD->getLocation(), &Context.Idents.get("vtt"),
1675 T, ImplicitParamDecl::CXXVTT);
1676 Params.insert(Params.begin() + 1, VTTDecl);
1677 getStructorImplicitParamDecl(CGF) = VTTDecl;
1678 }
1679 }
1680
EmitInstanceFunctionProlog(CodeGenFunction & CGF)1681 void ItaniumCXXABI::EmitInstanceFunctionProlog(CodeGenFunction &CGF) {
1682 // Naked functions have no prolog.
1683 if (CGF.CurFuncDecl && CGF.CurFuncDecl->hasAttr<NakedAttr>())
1684 return;
1685
1686 /// Initialize the 'this' slot. In the Itanium C++ ABI, no prologue
1687 /// adjustments are required, because they are all handled by thunks.
1688 setCXXABIThisValue(CGF, loadIncomingCXXThis(CGF));
1689
1690 /// Initialize the 'vtt' slot if needed.
1691 if (getStructorImplicitParamDecl(CGF)) {
1692 getStructorImplicitParamValue(CGF) = CGF.Builder.CreateLoad(
1693 CGF.GetAddrOfLocalVar(getStructorImplicitParamDecl(CGF)), "vtt");
1694 }
1695
1696 /// If this is a function that the ABI specifies returns 'this', initialize
1697 /// the return slot to 'this' at the start of the function.
1698 ///
1699 /// Unlike the setting of return types, this is done within the ABI
1700 /// implementation instead of by clients of CGCXXABI because:
1701 /// 1) getThisValue is currently protected
1702 /// 2) in theory, an ABI could implement 'this' returns some other way;
1703 /// HasThisReturn only specifies a contract, not the implementation
1704 if (HasThisReturn(CGF.CurGD))
1705 CGF.Builder.CreateStore(getThisValue(CGF), CGF.ReturnValue);
1706 }
1707
getImplicitConstructorArgs(CodeGenFunction & CGF,const CXXConstructorDecl * D,CXXCtorType Type,bool ForVirtualBase,bool Delegating)1708 CGCXXABI::AddedStructorArgs ItaniumCXXABI::getImplicitConstructorArgs(
1709 CodeGenFunction &CGF, const CXXConstructorDecl *D, CXXCtorType Type,
1710 bool ForVirtualBase, bool Delegating) {
1711 if (!NeedsVTTParameter(GlobalDecl(D, Type)))
1712 return AddedStructorArgs{};
1713
1714 // Insert the implicit 'vtt' argument as the second argument.
1715 llvm::Value *VTT =
1716 CGF.GetVTTParameter(GlobalDecl(D, Type), ForVirtualBase, Delegating);
1717 QualType VTTTy = getContext().getPointerType(getContext().VoidPtrTy);
1718 return AddedStructorArgs::prefix({{VTT, VTTTy}});
1719 }
1720
getCXXDestructorImplicitParam(CodeGenFunction & CGF,const CXXDestructorDecl * DD,CXXDtorType Type,bool ForVirtualBase,bool Delegating)1721 llvm::Value *ItaniumCXXABI::getCXXDestructorImplicitParam(
1722 CodeGenFunction &CGF, const CXXDestructorDecl *DD, CXXDtorType Type,
1723 bool ForVirtualBase, bool Delegating) {
1724 GlobalDecl GD(DD, Type);
1725 return CGF.GetVTTParameter(GD, ForVirtualBase, Delegating);
1726 }
1727
EmitDestructorCall(CodeGenFunction & CGF,const CXXDestructorDecl * DD,CXXDtorType Type,bool ForVirtualBase,bool Delegating,Address This,QualType ThisTy)1728 void ItaniumCXXABI::EmitDestructorCall(CodeGenFunction &CGF,
1729 const CXXDestructorDecl *DD,
1730 CXXDtorType Type, bool ForVirtualBase,
1731 bool Delegating, Address This,
1732 QualType ThisTy) {
1733 GlobalDecl GD(DD, Type);
1734 llvm::Value *VTT =
1735 getCXXDestructorImplicitParam(CGF, DD, Type, ForVirtualBase, Delegating);
1736 QualType VTTTy = getContext().getPointerType(getContext().VoidPtrTy);
1737
1738 CGCallee Callee;
1739 if (getContext().getLangOpts().AppleKext &&
1740 Type != Dtor_Base && DD->isVirtual())
1741 Callee = CGF.BuildAppleKextVirtualDestructorCall(DD, Type, DD->getParent());
1742 else
1743 Callee = CGCallee::forDirect(CGM.getAddrOfCXXStructor(GD), GD);
1744
1745 CGF.EmitCXXDestructorCall(GD, Callee, This.getPointer(), ThisTy, VTT, VTTTy,
1746 nullptr);
1747 }
1748
emitVTableDefinitions(CodeGenVTables & CGVT,const CXXRecordDecl * RD)1749 void ItaniumCXXABI::emitVTableDefinitions(CodeGenVTables &CGVT,
1750 const CXXRecordDecl *RD) {
1751 llvm::GlobalVariable *VTable = getAddrOfVTable(RD, CharUnits());
1752 if (VTable->hasInitializer())
1753 return;
1754
1755 ItaniumVTableContext &VTContext = CGM.getItaniumVTableContext();
1756 const VTableLayout &VTLayout = VTContext.getVTableLayout(RD);
1757 llvm::GlobalVariable::LinkageTypes Linkage = CGM.getVTableLinkage(RD);
1758 llvm::Constant *RTTI =
1759 CGM.GetAddrOfRTTIDescriptor(CGM.getContext().getTagDeclType(RD));
1760
1761 // Create and set the initializer.
1762 ConstantInitBuilder builder(CGM);
1763 auto components = builder.beginStruct();
1764 CGVT.createVTableInitializer(components, VTLayout, RTTI,
1765 llvm::GlobalValue::isLocalLinkage(Linkage));
1766 components.finishAndSetAsInitializer(VTable);
1767
1768 // Set the correct linkage.
1769 VTable->setLinkage(Linkage);
1770
1771 if (CGM.supportsCOMDAT() && VTable->isWeakForLinker())
1772 VTable->setComdat(CGM.getModule().getOrInsertComdat(VTable->getName()));
1773
1774 // Set the right visibility.
1775 CGM.setGVProperties(VTable, RD);
1776
1777 // If this is the magic class __cxxabiv1::__fundamental_type_info,
1778 // we will emit the typeinfo for the fundamental types. This is the
1779 // same behaviour as GCC.
1780 const DeclContext *DC = RD->getDeclContext();
1781 if (RD->getIdentifier() &&
1782 RD->getIdentifier()->isStr("__fundamental_type_info") &&
1783 isa<NamespaceDecl>(DC) && cast<NamespaceDecl>(DC)->getIdentifier() &&
1784 cast<NamespaceDecl>(DC)->getIdentifier()->isStr("__cxxabiv1") &&
1785 DC->getParent()->isTranslationUnit())
1786 EmitFundamentalRTTIDescriptors(RD);
1787
1788 // Always emit type metadata on non-available_externally definitions, and on
1789 // available_externally definitions if we are performing whole program
1790 // devirtualization. For WPD we need the type metadata on all vtable
1791 // definitions to ensure we associate derived classes with base classes
1792 // defined in headers but with a strong definition only in a shared library.
1793 if (!VTable->isDeclarationForLinker() ||
1794 CGM.getCodeGenOpts().WholeProgramVTables) {
1795 CGM.EmitVTableTypeMetadata(RD, VTable, VTLayout);
1796 // For available_externally definitions, add the vtable to
1797 // @llvm.compiler.used so that it isn't deleted before whole program
1798 // analysis.
1799 if (VTable->isDeclarationForLinker()) {
1800 assert(CGM.getCodeGenOpts().WholeProgramVTables);
1801 CGM.addCompilerUsedGlobal(VTable);
1802 }
1803 }
1804
1805 if (VTContext.isRelativeLayout() && !VTable->isDSOLocal())
1806 CGVT.GenerateRelativeVTableAlias(VTable, VTable->getName());
1807 }
1808
isVirtualOffsetNeededForVTableField(CodeGenFunction & CGF,CodeGenFunction::VPtr Vptr)1809 bool ItaniumCXXABI::isVirtualOffsetNeededForVTableField(
1810 CodeGenFunction &CGF, CodeGenFunction::VPtr Vptr) {
1811 if (Vptr.NearestVBase == nullptr)
1812 return false;
1813 return NeedsVTTParameter(CGF.CurGD);
1814 }
1815
getVTableAddressPointInStructor(CodeGenFunction & CGF,const CXXRecordDecl * VTableClass,BaseSubobject Base,const CXXRecordDecl * NearestVBase)1816 llvm::Value *ItaniumCXXABI::getVTableAddressPointInStructor(
1817 CodeGenFunction &CGF, const CXXRecordDecl *VTableClass, BaseSubobject Base,
1818 const CXXRecordDecl *NearestVBase) {
1819
1820 if ((Base.getBase()->getNumVBases() || NearestVBase != nullptr) &&
1821 NeedsVTTParameter(CGF.CurGD)) {
1822 return getVTableAddressPointInStructorWithVTT(CGF, VTableClass, Base,
1823 NearestVBase);
1824 }
1825 return getVTableAddressPoint(Base, VTableClass);
1826 }
1827
1828 llvm::Constant *
getVTableAddressPoint(BaseSubobject Base,const CXXRecordDecl * VTableClass)1829 ItaniumCXXABI::getVTableAddressPoint(BaseSubobject Base,
1830 const CXXRecordDecl *VTableClass) {
1831 llvm::GlobalValue *VTable = getAddrOfVTable(VTableClass, CharUnits());
1832
1833 // Find the appropriate vtable within the vtable group, and the address point
1834 // within that vtable.
1835 VTableLayout::AddressPointLocation AddressPoint =
1836 CGM.getItaniumVTableContext()
1837 .getVTableLayout(VTableClass)
1838 .getAddressPoint(Base);
1839 llvm::Value *Indices[] = {
1840 llvm::ConstantInt::get(CGM.Int32Ty, 0),
1841 llvm::ConstantInt::get(CGM.Int32Ty, AddressPoint.VTableIndex),
1842 llvm::ConstantInt::get(CGM.Int32Ty, AddressPoint.AddressPointIndex),
1843 };
1844
1845 return llvm::ConstantExpr::getGetElementPtr(VTable->getValueType(), VTable,
1846 Indices, /*InBounds=*/true,
1847 /*InRangeIndex=*/1);
1848 }
1849
1850 // Check whether all the non-inline virtual methods for the class have the
1851 // specified attribute.
1852 template <typename T>
CXXRecordAllNonInlineVirtualsHaveAttr(const CXXRecordDecl * RD)1853 static bool CXXRecordAllNonInlineVirtualsHaveAttr(const CXXRecordDecl *RD) {
1854 bool FoundNonInlineVirtualMethodWithAttr = false;
1855 for (const auto *D : RD->noload_decls()) {
1856 if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
1857 if (!FD->isVirtualAsWritten() || FD->isInlineSpecified() ||
1858 FD->doesThisDeclarationHaveABody())
1859 continue;
1860 if (!D->hasAttr<T>())
1861 return false;
1862 FoundNonInlineVirtualMethodWithAttr = true;
1863 }
1864 }
1865
1866 // We didn't find any non-inline virtual methods missing the attribute. We
1867 // will return true when we found at least one non-inline virtual with the
1868 // attribute. (This lets our caller know that the attribute needs to be
1869 // propagated up to the vtable.)
1870 return FoundNonInlineVirtualMethodWithAttr;
1871 }
1872
getVTableAddressPointInStructorWithVTT(CodeGenFunction & CGF,const CXXRecordDecl * VTableClass,BaseSubobject Base,const CXXRecordDecl * NearestVBase)1873 llvm::Value *ItaniumCXXABI::getVTableAddressPointInStructorWithVTT(
1874 CodeGenFunction &CGF, const CXXRecordDecl *VTableClass, BaseSubobject Base,
1875 const CXXRecordDecl *NearestVBase) {
1876 assert((Base.getBase()->getNumVBases() || NearestVBase != nullptr) &&
1877 NeedsVTTParameter(CGF.CurGD) && "This class doesn't have VTT");
1878
1879 // Get the secondary vpointer index.
1880 uint64_t VirtualPointerIndex =
1881 CGM.getVTables().getSecondaryVirtualPointerIndex(VTableClass, Base);
1882
1883 /// Load the VTT.
1884 llvm::Value *VTT = CGF.LoadCXXVTT();
1885 if (VirtualPointerIndex)
1886 VTT = CGF.Builder.CreateConstInBoundsGEP1_64(
1887 CGF.VoidPtrTy, VTT, VirtualPointerIndex);
1888
1889 // And load the address point from the VTT.
1890 return CGF.Builder.CreateAlignedLoad(CGF.VoidPtrTy, VTT,
1891 CGF.getPointerAlign());
1892 }
1893
getVTableAddressPointForConstExpr(BaseSubobject Base,const CXXRecordDecl * VTableClass)1894 llvm::Constant *ItaniumCXXABI::getVTableAddressPointForConstExpr(
1895 BaseSubobject Base, const CXXRecordDecl *VTableClass) {
1896 return getVTableAddressPoint(Base, VTableClass);
1897 }
1898
getAddrOfVTable(const CXXRecordDecl * RD,CharUnits VPtrOffset)1899 llvm::GlobalVariable *ItaniumCXXABI::getAddrOfVTable(const CXXRecordDecl *RD,
1900 CharUnits VPtrOffset) {
1901 assert(VPtrOffset.isZero() && "Itanium ABI only supports zero vptr offsets");
1902
1903 llvm::GlobalVariable *&VTable = VTables[RD];
1904 if (VTable)
1905 return VTable;
1906
1907 // Queue up this vtable for possible deferred emission.
1908 CGM.addDeferredVTable(RD);
1909
1910 SmallString<256> Name;
1911 llvm::raw_svector_ostream Out(Name);
1912 getMangleContext().mangleCXXVTable(RD, Out);
1913
1914 const VTableLayout &VTLayout =
1915 CGM.getItaniumVTableContext().getVTableLayout(RD);
1916 llvm::Type *VTableType = CGM.getVTables().getVTableType(VTLayout);
1917
1918 // Use pointer alignment for the vtable. Otherwise we would align them based
1919 // on the size of the initializer which doesn't make sense as only single
1920 // values are read.
1921 unsigned PAlign = CGM.getItaniumVTableContext().isRelativeLayout()
1922 ? 32
1923 : CGM.getTarget().getPointerAlign(0);
1924
1925 VTable = CGM.CreateOrReplaceCXXRuntimeVariable(
1926 Name, VTableType, llvm::GlobalValue::ExternalLinkage,
1927 getContext().toCharUnitsFromBits(PAlign).getQuantity());
1928 VTable->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
1929
1930 // In MS C++ if you have a class with virtual functions in which you are using
1931 // selective member import/export, then all virtual functions must be exported
1932 // unless they are inline, otherwise a link error will result. To match this
1933 // behavior, for such classes, we dllimport the vtable if it is defined
1934 // externally and all the non-inline virtual methods are marked dllimport, and
1935 // we dllexport the vtable if it is defined in this TU and all the non-inline
1936 // virtual methods are marked dllexport.
1937 if (CGM.getTarget().hasPS4DLLImportExport()) {
1938 if ((!RD->hasAttr<DLLImportAttr>()) && (!RD->hasAttr<DLLExportAttr>())) {
1939 if (CGM.getVTables().isVTableExternal(RD)) {
1940 if (CXXRecordAllNonInlineVirtualsHaveAttr<DLLImportAttr>(RD))
1941 VTable->setDLLStorageClass(llvm::GlobalValue::DLLImportStorageClass);
1942 } else {
1943 if (CXXRecordAllNonInlineVirtualsHaveAttr<DLLExportAttr>(RD))
1944 VTable->setDLLStorageClass(llvm::GlobalValue::DLLExportStorageClass);
1945 }
1946 }
1947 }
1948 CGM.setGVProperties(VTable, RD);
1949
1950 return VTable;
1951 }
1952
getVirtualFunctionPointer(CodeGenFunction & CGF,GlobalDecl GD,Address This,llvm::Type * Ty,SourceLocation Loc)1953 CGCallee ItaniumCXXABI::getVirtualFunctionPointer(CodeGenFunction &CGF,
1954 GlobalDecl GD,
1955 Address This,
1956 llvm::Type *Ty,
1957 SourceLocation Loc) {
1958 llvm::Type *TyPtr = Ty->getPointerTo();
1959 auto *MethodDecl = cast<CXXMethodDecl>(GD.getDecl());
1960 llvm::Value *VTable = CGF.GetVTablePtr(
1961 This, TyPtr->getPointerTo(), MethodDecl->getParent());
1962
1963 uint64_t VTableIndex = CGM.getItaniumVTableContext().getMethodVTableIndex(GD);
1964 llvm::Value *VFunc;
1965 if (CGF.ShouldEmitVTableTypeCheckedLoad(MethodDecl->getParent())) {
1966 VFunc = CGF.EmitVTableTypeCheckedLoad(
1967 MethodDecl->getParent(), VTable,
1968 VTableIndex * CGM.getContext().getTargetInfo().getPointerWidth(0) / 8);
1969 } else {
1970 CGF.EmitTypeMetadataCodeForVCall(MethodDecl->getParent(), VTable, Loc);
1971
1972 llvm::Value *VFuncLoad;
1973 if (CGM.getItaniumVTableContext().isRelativeLayout()) {
1974 VTable = CGF.Builder.CreateBitCast(VTable, CGM.Int8PtrTy);
1975 llvm::Value *Load = CGF.Builder.CreateCall(
1976 CGM.getIntrinsic(llvm::Intrinsic::load_relative, {CGM.Int32Ty}),
1977 {VTable, llvm::ConstantInt::get(CGM.Int32Ty, 4 * VTableIndex)});
1978 VFuncLoad = CGF.Builder.CreateBitCast(Load, TyPtr);
1979 } else {
1980 VTable =
1981 CGF.Builder.CreateBitCast(VTable, TyPtr->getPointerTo());
1982 llvm::Value *VTableSlotPtr = CGF.Builder.CreateConstInBoundsGEP1_64(
1983 TyPtr, VTable, VTableIndex, "vfn");
1984 VFuncLoad =
1985 CGF.Builder.CreateAlignedLoad(TyPtr, VTableSlotPtr,
1986 CGF.getPointerAlign());
1987 }
1988
1989 // Add !invariant.load md to virtual function load to indicate that
1990 // function didn't change inside vtable.
1991 // It's safe to add it without -fstrict-vtable-pointers, but it would not
1992 // help in devirtualization because it will only matter if we will have 2
1993 // the same virtual function loads from the same vtable load, which won't
1994 // happen without enabled devirtualization with -fstrict-vtable-pointers.
1995 if (CGM.getCodeGenOpts().OptimizationLevel > 0 &&
1996 CGM.getCodeGenOpts().StrictVTablePointers) {
1997 if (auto *VFuncLoadInstr = dyn_cast<llvm::Instruction>(VFuncLoad)) {
1998 VFuncLoadInstr->setMetadata(
1999 llvm::LLVMContext::MD_invariant_load,
2000 llvm::MDNode::get(CGM.getLLVMContext(),
2001 llvm::ArrayRef<llvm::Metadata *>()));
2002 }
2003 }
2004 VFunc = VFuncLoad;
2005 }
2006
2007 CGCallee Callee(GD, VFunc);
2008 return Callee;
2009 }
2010
EmitVirtualDestructorCall(CodeGenFunction & CGF,const CXXDestructorDecl * Dtor,CXXDtorType DtorType,Address This,DeleteOrMemberCallExpr E)2011 llvm::Value *ItaniumCXXABI::EmitVirtualDestructorCall(
2012 CodeGenFunction &CGF, const CXXDestructorDecl *Dtor, CXXDtorType DtorType,
2013 Address This, DeleteOrMemberCallExpr E) {
2014 auto *CE = E.dyn_cast<const CXXMemberCallExpr *>();
2015 auto *D = E.dyn_cast<const CXXDeleteExpr *>();
2016 assert((CE != nullptr) ^ (D != nullptr));
2017 assert(CE == nullptr || CE->arg_begin() == CE->arg_end());
2018 assert(DtorType == Dtor_Deleting || DtorType == Dtor_Complete);
2019
2020 GlobalDecl GD(Dtor, DtorType);
2021 const CGFunctionInfo *FInfo =
2022 &CGM.getTypes().arrangeCXXStructorDeclaration(GD);
2023 llvm::FunctionType *Ty = CGF.CGM.getTypes().GetFunctionType(*FInfo);
2024 CGCallee Callee = CGCallee::forVirtual(CE, GD, This, Ty);
2025
2026 QualType ThisTy;
2027 if (CE) {
2028 ThisTy = CE->getObjectType();
2029 } else {
2030 ThisTy = D->getDestroyedType();
2031 }
2032
2033 CGF.EmitCXXDestructorCall(GD, Callee, This.getPointer(), ThisTy, nullptr,
2034 QualType(), nullptr);
2035 return nullptr;
2036 }
2037
emitVirtualInheritanceTables(const CXXRecordDecl * RD)2038 void ItaniumCXXABI::emitVirtualInheritanceTables(const CXXRecordDecl *RD) {
2039 CodeGenVTables &VTables = CGM.getVTables();
2040 llvm::GlobalVariable *VTT = VTables.GetAddrOfVTT(RD);
2041 VTables.EmitVTTDefinition(VTT, CGM.getVTableLinkage(RD), RD);
2042 }
2043
canSpeculativelyEmitVTableAsBaseClass(const CXXRecordDecl * RD) const2044 bool ItaniumCXXABI::canSpeculativelyEmitVTableAsBaseClass(
2045 const CXXRecordDecl *RD) const {
2046 // We don't emit available_externally vtables if we are in -fapple-kext mode
2047 // because kext mode does not permit devirtualization.
2048 if (CGM.getLangOpts().AppleKext)
2049 return false;
2050
2051 // If the vtable is hidden then it is not safe to emit an available_externally
2052 // copy of vtable.
2053 if (isVTableHidden(RD))
2054 return false;
2055
2056 if (CGM.getCodeGenOpts().ForceEmitVTables)
2057 return true;
2058
2059 // If we don't have any not emitted inline virtual function then we are safe
2060 // to emit an available_externally copy of vtable.
2061 // FIXME we can still emit a copy of the vtable if we
2062 // can emit definition of the inline functions.
2063 if (hasAnyUnusedVirtualInlineFunction(RD))
2064 return false;
2065
2066 // For a class with virtual bases, we must also be able to speculatively
2067 // emit the VTT, because CodeGen doesn't have separate notions of "can emit
2068 // the vtable" and "can emit the VTT". For a base subobject, this means we
2069 // need to be able to emit non-virtual base vtables.
2070 if (RD->getNumVBases()) {
2071 for (const auto &B : RD->bases()) {
2072 auto *BRD = B.getType()->getAsCXXRecordDecl();
2073 assert(BRD && "no class for base specifier");
2074 if (B.isVirtual() || !BRD->isDynamicClass())
2075 continue;
2076 if (!canSpeculativelyEmitVTableAsBaseClass(BRD))
2077 return false;
2078 }
2079 }
2080
2081 return true;
2082 }
2083
canSpeculativelyEmitVTable(const CXXRecordDecl * RD) const2084 bool ItaniumCXXABI::canSpeculativelyEmitVTable(const CXXRecordDecl *RD) const {
2085 if (!canSpeculativelyEmitVTableAsBaseClass(RD))
2086 return false;
2087
2088 // For a complete-object vtable (or more specifically, for the VTT), we need
2089 // to be able to speculatively emit the vtables of all dynamic virtual bases.
2090 for (const auto &B : RD->vbases()) {
2091 auto *BRD = B.getType()->getAsCXXRecordDecl();
2092 assert(BRD && "no class for base specifier");
2093 if (!BRD->isDynamicClass())
2094 continue;
2095 if (!canSpeculativelyEmitVTableAsBaseClass(BRD))
2096 return false;
2097 }
2098
2099 return true;
2100 }
performTypeAdjustment(CodeGenFunction & CGF,Address InitialPtr,int64_t NonVirtualAdjustment,int64_t VirtualAdjustment,bool IsReturnAdjustment)2101 static llvm::Value *performTypeAdjustment(CodeGenFunction &CGF,
2102 Address InitialPtr,
2103 int64_t NonVirtualAdjustment,
2104 int64_t VirtualAdjustment,
2105 bool IsReturnAdjustment) {
2106 if (!NonVirtualAdjustment && !VirtualAdjustment)
2107 return InitialPtr.getPointer();
2108
2109 Address V = CGF.Builder.CreateElementBitCast(InitialPtr, CGF.Int8Ty);
2110
2111 // In a base-to-derived cast, the non-virtual adjustment is applied first.
2112 if (NonVirtualAdjustment && !IsReturnAdjustment) {
2113 V = CGF.Builder.CreateConstInBoundsByteGEP(V,
2114 CharUnits::fromQuantity(NonVirtualAdjustment));
2115 }
2116
2117 // Perform the virtual adjustment if we have one.
2118 llvm::Value *ResultPtr;
2119 if (VirtualAdjustment) {
2120 Address VTablePtrPtr = CGF.Builder.CreateElementBitCast(V, CGF.Int8PtrTy);
2121 llvm::Value *VTablePtr = CGF.Builder.CreateLoad(VTablePtrPtr);
2122
2123 llvm::Value *Offset;
2124 llvm::Value *OffsetPtr = CGF.Builder.CreateConstInBoundsGEP1_64(
2125 CGF.Int8Ty, VTablePtr, VirtualAdjustment);
2126 if (CGF.CGM.getItaniumVTableContext().isRelativeLayout()) {
2127 // Load the adjustment offset from the vtable as a 32-bit int.
2128 OffsetPtr =
2129 CGF.Builder.CreateBitCast(OffsetPtr, CGF.Int32Ty->getPointerTo());
2130 Offset =
2131 CGF.Builder.CreateAlignedLoad(CGF.Int32Ty, OffsetPtr,
2132 CharUnits::fromQuantity(4));
2133 } else {
2134 llvm::Type *PtrDiffTy =
2135 CGF.ConvertType(CGF.getContext().getPointerDiffType());
2136
2137 OffsetPtr =
2138 CGF.Builder.CreateBitCast(OffsetPtr, PtrDiffTy->getPointerTo());
2139
2140 // Load the adjustment offset from the vtable.
2141 Offset = CGF.Builder.CreateAlignedLoad(PtrDiffTy, OffsetPtr,
2142 CGF.getPointerAlign());
2143 }
2144 // Adjust our pointer.
2145 ResultPtr = CGF.Builder.CreateInBoundsGEP(
2146 V.getElementType(), V.getPointer(), Offset);
2147 } else {
2148 ResultPtr = V.getPointer();
2149 }
2150
2151 // In a derived-to-base conversion, the non-virtual adjustment is
2152 // applied second.
2153 if (NonVirtualAdjustment && IsReturnAdjustment) {
2154 ResultPtr = CGF.Builder.CreateConstInBoundsGEP1_64(CGF.Int8Ty, ResultPtr,
2155 NonVirtualAdjustment);
2156 }
2157
2158 // Cast back to the original type.
2159 return CGF.Builder.CreateBitCast(ResultPtr, InitialPtr.getType());
2160 }
2161
performThisAdjustment(CodeGenFunction & CGF,Address This,const ThisAdjustment & TA)2162 llvm::Value *ItaniumCXXABI::performThisAdjustment(CodeGenFunction &CGF,
2163 Address This,
2164 const ThisAdjustment &TA) {
2165 return performTypeAdjustment(CGF, This, TA.NonVirtual,
2166 TA.Virtual.Itanium.VCallOffsetOffset,
2167 /*IsReturnAdjustment=*/false);
2168 }
2169
2170 llvm::Value *
performReturnAdjustment(CodeGenFunction & CGF,Address Ret,const ReturnAdjustment & RA)2171 ItaniumCXXABI::performReturnAdjustment(CodeGenFunction &CGF, Address Ret,
2172 const ReturnAdjustment &RA) {
2173 return performTypeAdjustment(CGF, Ret, RA.NonVirtual,
2174 RA.Virtual.Itanium.VBaseOffsetOffset,
2175 /*IsReturnAdjustment=*/true);
2176 }
2177
EmitReturnFromThunk(CodeGenFunction & CGF,RValue RV,QualType ResultType)2178 void ARMCXXABI::EmitReturnFromThunk(CodeGenFunction &CGF,
2179 RValue RV, QualType ResultType) {
2180 if (!isa<CXXDestructorDecl>(CGF.CurGD.getDecl()))
2181 return ItaniumCXXABI::EmitReturnFromThunk(CGF, RV, ResultType);
2182
2183 // Destructor thunks in the ARM ABI have indeterminate results.
2184 llvm::Type *T = CGF.ReturnValue.getElementType();
2185 RValue Undef = RValue::get(llvm::UndefValue::get(T));
2186 return ItaniumCXXABI::EmitReturnFromThunk(CGF, Undef, ResultType);
2187 }
2188
2189 /************************** Array allocation cookies **************************/
2190
getArrayCookieSizeImpl(QualType elementType)2191 CharUnits ItaniumCXXABI::getArrayCookieSizeImpl(QualType elementType) {
2192 // The array cookie is a size_t; pad that up to the element alignment.
2193 // The cookie is actually right-justified in that space.
2194 return std::max(CharUnits::fromQuantity(CGM.SizeSizeInBytes),
2195 CGM.getContext().getPreferredTypeAlignInChars(elementType));
2196 }
2197
InitializeArrayCookie(CodeGenFunction & CGF,Address NewPtr,llvm::Value * NumElements,const CXXNewExpr * expr,QualType ElementType)2198 Address ItaniumCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
2199 Address NewPtr,
2200 llvm::Value *NumElements,
2201 const CXXNewExpr *expr,
2202 QualType ElementType) {
2203 assert(requiresArrayCookie(expr));
2204
2205 unsigned AS = NewPtr.getAddressSpace();
2206
2207 ASTContext &Ctx = getContext();
2208 CharUnits SizeSize = CGF.getSizeSize();
2209
2210 // The size of the cookie.
2211 CharUnits CookieSize =
2212 std::max(SizeSize, Ctx.getPreferredTypeAlignInChars(ElementType));
2213 assert(CookieSize == getArrayCookieSizeImpl(ElementType));
2214
2215 // Compute an offset to the cookie.
2216 Address CookiePtr = NewPtr;
2217 CharUnits CookieOffset = CookieSize - SizeSize;
2218 if (!CookieOffset.isZero())
2219 CookiePtr = CGF.Builder.CreateConstInBoundsByteGEP(CookiePtr, CookieOffset);
2220
2221 // Write the number of elements into the appropriate slot.
2222 Address NumElementsPtr =
2223 CGF.Builder.CreateElementBitCast(CookiePtr, CGF.SizeTy);
2224 llvm::Instruction *SI = CGF.Builder.CreateStore(NumElements, NumElementsPtr);
2225
2226 // Handle the array cookie specially in ASan.
2227 if (CGM.getLangOpts().Sanitize.has(SanitizerKind::Address) && AS == 0 &&
2228 (expr->getOperatorNew()->isReplaceableGlobalAllocationFunction() ||
2229 CGM.getCodeGenOpts().SanitizeAddressPoisonCustomArrayCookie)) {
2230 // The store to the CookiePtr does not need to be instrumented.
2231 CGM.getSanitizerMetadata()->disableSanitizerForInstruction(SI);
2232 llvm::FunctionType *FTy =
2233 llvm::FunctionType::get(CGM.VoidTy, NumElementsPtr.getType(), false);
2234 llvm::FunctionCallee F =
2235 CGM.CreateRuntimeFunction(FTy, "__asan_poison_cxx_array_cookie");
2236 CGF.Builder.CreateCall(F, NumElementsPtr.getPointer());
2237 }
2238
2239 // Finally, compute a pointer to the actual data buffer by skipping
2240 // over the cookie completely.
2241 return CGF.Builder.CreateConstInBoundsByteGEP(NewPtr, CookieSize);
2242 }
2243
readArrayCookieImpl(CodeGenFunction & CGF,Address allocPtr,CharUnits cookieSize)2244 llvm::Value *ItaniumCXXABI::readArrayCookieImpl(CodeGenFunction &CGF,
2245 Address allocPtr,
2246 CharUnits cookieSize) {
2247 // The element size is right-justified in the cookie.
2248 Address numElementsPtr = allocPtr;
2249 CharUnits numElementsOffset = cookieSize - CGF.getSizeSize();
2250 if (!numElementsOffset.isZero())
2251 numElementsPtr =
2252 CGF.Builder.CreateConstInBoundsByteGEP(numElementsPtr, numElementsOffset);
2253
2254 unsigned AS = allocPtr.getAddressSpace();
2255 numElementsPtr = CGF.Builder.CreateElementBitCast(numElementsPtr, CGF.SizeTy);
2256 if (!CGM.getLangOpts().Sanitize.has(SanitizerKind::Address) || AS != 0)
2257 return CGF.Builder.CreateLoad(numElementsPtr);
2258 // In asan mode emit a function call instead of a regular load and let the
2259 // run-time deal with it: if the shadow is properly poisoned return the
2260 // cookie, otherwise return 0 to avoid an infinite loop calling DTORs.
2261 // We can't simply ignore this load using nosanitize metadata because
2262 // the metadata may be lost.
2263 llvm::FunctionType *FTy =
2264 llvm::FunctionType::get(CGF.SizeTy, CGF.SizeTy->getPointerTo(0), false);
2265 llvm::FunctionCallee F =
2266 CGM.CreateRuntimeFunction(FTy, "__asan_load_cxx_array_cookie");
2267 return CGF.Builder.CreateCall(F, numElementsPtr.getPointer());
2268 }
2269
getArrayCookieSizeImpl(QualType elementType)2270 CharUnits ARMCXXABI::getArrayCookieSizeImpl(QualType elementType) {
2271 // ARM says that the cookie is always:
2272 // struct array_cookie {
2273 // std::size_t element_size; // element_size != 0
2274 // std::size_t element_count;
2275 // };
2276 // But the base ABI doesn't give anything an alignment greater than
2277 // 8, so we can dismiss this as typical ABI-author blindness to
2278 // actual language complexity and round up to the element alignment.
2279 return std::max(CharUnits::fromQuantity(2 * CGM.SizeSizeInBytes),
2280 CGM.getContext().getTypeAlignInChars(elementType));
2281 }
2282
InitializeArrayCookie(CodeGenFunction & CGF,Address newPtr,llvm::Value * numElements,const CXXNewExpr * expr,QualType elementType)2283 Address ARMCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
2284 Address newPtr,
2285 llvm::Value *numElements,
2286 const CXXNewExpr *expr,
2287 QualType elementType) {
2288 assert(requiresArrayCookie(expr));
2289
2290 // The cookie is always at the start of the buffer.
2291 Address cookie = newPtr;
2292
2293 // The first element is the element size.
2294 cookie = CGF.Builder.CreateElementBitCast(cookie, CGF.SizeTy);
2295 llvm::Value *elementSize = llvm::ConstantInt::get(CGF.SizeTy,
2296 getContext().getTypeSizeInChars(elementType).getQuantity());
2297 CGF.Builder.CreateStore(elementSize, cookie);
2298
2299 // The second element is the element count.
2300 cookie = CGF.Builder.CreateConstInBoundsGEP(cookie, 1);
2301 CGF.Builder.CreateStore(numElements, cookie);
2302
2303 // Finally, compute a pointer to the actual data buffer by skipping
2304 // over the cookie completely.
2305 CharUnits cookieSize = ARMCXXABI::getArrayCookieSizeImpl(elementType);
2306 return CGF.Builder.CreateConstInBoundsByteGEP(newPtr, cookieSize);
2307 }
2308
readArrayCookieImpl(CodeGenFunction & CGF,Address allocPtr,CharUnits cookieSize)2309 llvm::Value *ARMCXXABI::readArrayCookieImpl(CodeGenFunction &CGF,
2310 Address allocPtr,
2311 CharUnits cookieSize) {
2312 // The number of elements is at offset sizeof(size_t) relative to
2313 // the allocated pointer.
2314 Address numElementsPtr
2315 = CGF.Builder.CreateConstInBoundsByteGEP(allocPtr, CGF.getSizeSize());
2316
2317 numElementsPtr = CGF.Builder.CreateElementBitCast(numElementsPtr, CGF.SizeTy);
2318 return CGF.Builder.CreateLoad(numElementsPtr);
2319 }
2320
2321 /*********************** Static local initialization **************************/
2322
getGuardAcquireFn(CodeGenModule & CGM,llvm::PointerType * GuardPtrTy)2323 static llvm::FunctionCallee getGuardAcquireFn(CodeGenModule &CGM,
2324 llvm::PointerType *GuardPtrTy) {
2325 // int __cxa_guard_acquire(__guard *guard_object);
2326 llvm::FunctionType *FTy =
2327 llvm::FunctionType::get(CGM.getTypes().ConvertType(CGM.getContext().IntTy),
2328 GuardPtrTy, /*isVarArg=*/false);
2329 return CGM.CreateRuntimeFunction(
2330 FTy, "__cxa_guard_acquire",
2331 llvm::AttributeList::get(CGM.getLLVMContext(),
2332 llvm::AttributeList::FunctionIndex,
2333 llvm::Attribute::NoUnwind));
2334 }
2335
getGuardReleaseFn(CodeGenModule & CGM,llvm::PointerType * GuardPtrTy)2336 static llvm::FunctionCallee getGuardReleaseFn(CodeGenModule &CGM,
2337 llvm::PointerType *GuardPtrTy) {
2338 // void __cxa_guard_release(__guard *guard_object);
2339 llvm::FunctionType *FTy =
2340 llvm::FunctionType::get(CGM.VoidTy, GuardPtrTy, /*isVarArg=*/false);
2341 return CGM.CreateRuntimeFunction(
2342 FTy, "__cxa_guard_release",
2343 llvm::AttributeList::get(CGM.getLLVMContext(),
2344 llvm::AttributeList::FunctionIndex,
2345 llvm::Attribute::NoUnwind));
2346 }
2347
getGuardAbortFn(CodeGenModule & CGM,llvm::PointerType * GuardPtrTy)2348 static llvm::FunctionCallee getGuardAbortFn(CodeGenModule &CGM,
2349 llvm::PointerType *GuardPtrTy) {
2350 // void __cxa_guard_abort(__guard *guard_object);
2351 llvm::FunctionType *FTy =
2352 llvm::FunctionType::get(CGM.VoidTy, GuardPtrTy, /*isVarArg=*/false);
2353 return CGM.CreateRuntimeFunction(
2354 FTy, "__cxa_guard_abort",
2355 llvm::AttributeList::get(CGM.getLLVMContext(),
2356 llvm::AttributeList::FunctionIndex,
2357 llvm::Attribute::NoUnwind));
2358 }
2359
2360 namespace {
2361 struct CallGuardAbort final : EHScopeStack::Cleanup {
2362 llvm::GlobalVariable *Guard;
CallGuardAbort__anon2589cd5a0211::CallGuardAbort2363 CallGuardAbort(llvm::GlobalVariable *Guard) : Guard(Guard) {}
2364
Emit__anon2589cd5a0211::CallGuardAbort2365 void Emit(CodeGenFunction &CGF, Flags flags) override {
2366 CGF.EmitNounwindRuntimeCall(getGuardAbortFn(CGF.CGM, Guard->getType()),
2367 Guard);
2368 }
2369 };
2370 }
2371
2372 /// The ARM code here follows the Itanium code closely enough that we
2373 /// just special-case it at particular places.
EmitGuardedInit(CodeGenFunction & CGF,const VarDecl & D,llvm::GlobalVariable * var,bool shouldPerformInit)2374 void ItaniumCXXABI::EmitGuardedInit(CodeGenFunction &CGF,
2375 const VarDecl &D,
2376 llvm::GlobalVariable *var,
2377 bool shouldPerformInit) {
2378 CGBuilderTy &Builder = CGF.Builder;
2379
2380 // Inline variables that weren't instantiated from variable templates have
2381 // partially-ordered initialization within their translation unit.
2382 bool NonTemplateInline =
2383 D.isInline() &&
2384 !isTemplateInstantiation(D.getTemplateSpecializationKind());
2385
2386 // We only need to use thread-safe statics for local non-TLS variables and
2387 // inline variables; other global initialization is always single-threaded
2388 // or (through lazy dynamic loading in multiple threads) unsequenced.
2389 bool threadsafe = getContext().getLangOpts().ThreadsafeStatics &&
2390 (D.isLocalVarDecl() || NonTemplateInline) &&
2391 !D.getTLSKind();
2392
2393 // If we have a global variable with internal linkage and thread-safe statics
2394 // are disabled, we can just let the guard variable be of type i8.
2395 bool useInt8GuardVariable = !threadsafe && var->hasInternalLinkage();
2396
2397 llvm::IntegerType *guardTy;
2398 CharUnits guardAlignment;
2399 if (useInt8GuardVariable) {
2400 guardTy = CGF.Int8Ty;
2401 guardAlignment = CharUnits::One();
2402 } else {
2403 // Guard variables are 64 bits in the generic ABI and size width on ARM
2404 // (i.e. 32-bit on AArch32, 64-bit on AArch64).
2405 if (UseARMGuardVarABI) {
2406 guardTy = CGF.SizeTy;
2407 guardAlignment = CGF.getSizeAlign();
2408 } else {
2409 guardTy = CGF.Int64Ty;
2410 guardAlignment = CharUnits::fromQuantity(
2411 CGM.getDataLayout().getABITypeAlignment(guardTy));
2412 }
2413 }
2414 llvm::PointerType *guardPtrTy = guardTy->getPointerTo(
2415 CGF.CGM.getDataLayout().getDefaultGlobalsAddressSpace());
2416
2417 // Create the guard variable if we don't already have it (as we
2418 // might if we're double-emitting this function body).
2419 llvm::GlobalVariable *guard = CGM.getStaticLocalDeclGuardAddress(&D);
2420 if (!guard) {
2421 // Mangle the name for the guard.
2422 SmallString<256> guardName;
2423 {
2424 llvm::raw_svector_ostream out(guardName);
2425 getMangleContext().mangleStaticGuardVariable(&D, out);
2426 }
2427
2428 // Create the guard variable with a zero-initializer.
2429 // Just absorb linkage and visibility from the guarded variable.
2430 guard = new llvm::GlobalVariable(CGM.getModule(), guardTy,
2431 false, var->getLinkage(),
2432 llvm::ConstantInt::get(guardTy, 0),
2433 guardName.str());
2434 guard->setDSOLocal(var->isDSOLocal());
2435 guard->setVisibility(var->getVisibility());
2436 // If the variable is thread-local, so is its guard variable.
2437 guard->setThreadLocalMode(var->getThreadLocalMode());
2438 guard->setAlignment(guardAlignment.getAsAlign());
2439
2440 // The ABI says: "It is suggested that it be emitted in the same COMDAT
2441 // group as the associated data object." In practice, this doesn't work for
2442 // non-ELF and non-Wasm object formats, so only do it for ELF and Wasm.
2443 llvm::Comdat *C = var->getComdat();
2444 if (!D.isLocalVarDecl() && C &&
2445 (CGM.getTarget().getTriple().isOSBinFormatELF() ||
2446 CGM.getTarget().getTriple().isOSBinFormatWasm())) {
2447 guard->setComdat(C);
2448 // An inline variable's guard function is run from the per-TU
2449 // initialization function, not via a dedicated global ctor function, so
2450 // we can't put it in a comdat.
2451 if (!NonTemplateInline)
2452 CGF.CurFn->setComdat(C);
2453 } else if (CGM.supportsCOMDAT() && guard->isWeakForLinker()) {
2454 guard->setComdat(CGM.getModule().getOrInsertComdat(guard->getName()));
2455 }
2456
2457 CGM.setStaticLocalDeclGuardAddress(&D, guard);
2458 }
2459
2460 Address guardAddr = Address(guard, guardAlignment);
2461
2462 // Test whether the variable has completed initialization.
2463 //
2464 // Itanium C++ ABI 3.3.2:
2465 // The following is pseudo-code showing how these functions can be used:
2466 // if (obj_guard.first_byte == 0) {
2467 // if ( __cxa_guard_acquire (&obj_guard) ) {
2468 // try {
2469 // ... initialize the object ...;
2470 // } catch (...) {
2471 // __cxa_guard_abort (&obj_guard);
2472 // throw;
2473 // }
2474 // ... queue object destructor with __cxa_atexit() ...;
2475 // __cxa_guard_release (&obj_guard);
2476 // }
2477 // }
2478
2479 // Load the first byte of the guard variable.
2480 llvm::LoadInst *LI =
2481 Builder.CreateLoad(Builder.CreateElementBitCast(guardAddr, CGM.Int8Ty));
2482
2483 // Itanium ABI:
2484 // An implementation supporting thread-safety on multiprocessor
2485 // systems must also guarantee that references to the initialized
2486 // object do not occur before the load of the initialization flag.
2487 //
2488 // In LLVM, we do this by marking the load Acquire.
2489 if (threadsafe)
2490 LI->setAtomic(llvm::AtomicOrdering::Acquire);
2491
2492 // For ARM, we should only check the first bit, rather than the entire byte:
2493 //
2494 // ARM C++ ABI 3.2.3.1:
2495 // To support the potential use of initialization guard variables
2496 // as semaphores that are the target of ARM SWP and LDREX/STREX
2497 // synchronizing instructions we define a static initialization
2498 // guard variable to be a 4-byte aligned, 4-byte word with the
2499 // following inline access protocol.
2500 // #define INITIALIZED 1
2501 // if ((obj_guard & INITIALIZED) != INITIALIZED) {
2502 // if (__cxa_guard_acquire(&obj_guard))
2503 // ...
2504 // }
2505 //
2506 // and similarly for ARM64:
2507 //
2508 // ARM64 C++ ABI 3.2.2:
2509 // This ABI instead only specifies the value bit 0 of the static guard
2510 // variable; all other bits are platform defined. Bit 0 shall be 0 when the
2511 // variable is not initialized and 1 when it is.
2512 llvm::Value *V =
2513 (UseARMGuardVarABI && !useInt8GuardVariable)
2514 ? Builder.CreateAnd(LI, llvm::ConstantInt::get(CGM.Int8Ty, 1))
2515 : LI;
2516 llvm::Value *NeedsInit = Builder.CreateIsNull(V, "guard.uninitialized");
2517
2518 llvm::BasicBlock *InitCheckBlock = CGF.createBasicBlock("init.check");
2519 llvm::BasicBlock *EndBlock = CGF.createBasicBlock("init.end");
2520
2521 // Check if the first byte of the guard variable is zero.
2522 CGF.EmitCXXGuardedInitBranch(NeedsInit, InitCheckBlock, EndBlock,
2523 CodeGenFunction::GuardKind::VariableGuard, &D);
2524
2525 CGF.EmitBlock(InitCheckBlock);
2526
2527 // Variables used when coping with thread-safe statics and exceptions.
2528 if (threadsafe) {
2529 // Call __cxa_guard_acquire.
2530 llvm::Value *V
2531 = CGF.EmitNounwindRuntimeCall(getGuardAcquireFn(CGM, guardPtrTy), guard);
2532
2533 llvm::BasicBlock *InitBlock = CGF.createBasicBlock("init");
2534
2535 Builder.CreateCondBr(Builder.CreateIsNotNull(V, "tobool"),
2536 InitBlock, EndBlock);
2537
2538 // Call __cxa_guard_abort along the exceptional edge.
2539 CGF.EHStack.pushCleanup<CallGuardAbort>(EHCleanup, guard);
2540
2541 CGF.EmitBlock(InitBlock);
2542 }
2543
2544 // Emit the initializer and add a global destructor if appropriate.
2545 CGF.EmitCXXGlobalVarDeclInit(D, var, shouldPerformInit);
2546
2547 if (threadsafe) {
2548 // Pop the guard-abort cleanup if we pushed one.
2549 CGF.PopCleanupBlock();
2550
2551 // Call __cxa_guard_release. This cannot throw.
2552 CGF.EmitNounwindRuntimeCall(getGuardReleaseFn(CGM, guardPtrTy),
2553 guardAddr.getPointer());
2554 } else {
2555 // Store 1 into the first byte of the guard variable after initialization is
2556 // complete.
2557 Builder.CreateStore(llvm::ConstantInt::get(CGM.Int8Ty, 1),
2558 Builder.CreateElementBitCast(guardAddr, CGM.Int8Ty));
2559 }
2560
2561 CGF.EmitBlock(EndBlock);
2562 }
2563
2564 /// Register a global destructor using __cxa_atexit.
emitGlobalDtorWithCXAAtExit(CodeGenFunction & CGF,llvm::FunctionCallee dtor,llvm::Constant * addr,bool TLS)2565 static void emitGlobalDtorWithCXAAtExit(CodeGenFunction &CGF,
2566 llvm::FunctionCallee dtor,
2567 llvm::Constant *addr, bool TLS) {
2568 assert(!CGF.getTarget().getTriple().isOSAIX() &&
2569 "unexpected call to emitGlobalDtorWithCXAAtExit");
2570 assert((TLS || CGF.getTypes().getCodeGenOpts().CXAAtExit) &&
2571 "__cxa_atexit is disabled");
2572 const char *Name = "__cxa_atexit";
2573 if (TLS) {
2574 const llvm::Triple &T = CGF.getTarget().getTriple();
2575 Name = T.isOSDarwin() ? "_tlv_atexit" : "__cxa_thread_atexit";
2576 }
2577
2578 // We're assuming that the destructor function is something we can
2579 // reasonably call with the default CC. Go ahead and cast it to the
2580 // right prototype.
2581 llvm::Type *dtorTy =
2582 llvm::FunctionType::get(CGF.VoidTy, CGF.Int8PtrTy, false)->getPointerTo();
2583
2584 // Preserve address space of addr.
2585 auto AddrAS = addr ? addr->getType()->getPointerAddressSpace() : 0;
2586 auto AddrInt8PtrTy =
2587 AddrAS ? CGF.Int8Ty->getPointerTo(AddrAS) : CGF.Int8PtrTy;
2588
2589 // Create a variable that binds the atexit to this shared object.
2590 llvm::Constant *handle =
2591 CGF.CGM.CreateRuntimeVariable(CGF.Int8Ty, "__dso_handle");
2592 auto *GV = cast<llvm::GlobalValue>(handle->stripPointerCasts());
2593 GV->setVisibility(llvm::GlobalValue::HiddenVisibility);
2594
2595 // extern "C" int __cxa_atexit(void (*f)(void *), void *p, void *d);
2596 llvm::Type *paramTys[] = {dtorTy, AddrInt8PtrTy, handle->getType()};
2597 llvm::FunctionType *atexitTy =
2598 llvm::FunctionType::get(CGF.IntTy, paramTys, false);
2599
2600 // Fetch the actual function.
2601 llvm::FunctionCallee atexit = CGF.CGM.CreateRuntimeFunction(atexitTy, Name);
2602 if (llvm::Function *fn = dyn_cast<llvm::Function>(atexit.getCallee()))
2603 fn->setDoesNotThrow();
2604
2605 if (!addr)
2606 // addr is null when we are trying to register a dtor annotated with
2607 // __attribute__((destructor)) in a constructor function. Using null here is
2608 // okay because this argument is just passed back to the destructor
2609 // function.
2610 addr = llvm::Constant::getNullValue(CGF.Int8PtrTy);
2611
2612 llvm::Value *args[] = {llvm::ConstantExpr::getBitCast(
2613 cast<llvm::Constant>(dtor.getCallee()), dtorTy),
2614 llvm::ConstantExpr::getBitCast(addr, AddrInt8PtrTy),
2615 handle};
2616 CGF.EmitNounwindRuntimeCall(atexit, args);
2617 }
2618
createGlobalInitOrCleanupFn(CodeGen::CodeGenModule & CGM,StringRef FnName)2619 static llvm::Function *createGlobalInitOrCleanupFn(CodeGen::CodeGenModule &CGM,
2620 StringRef FnName) {
2621 // Create a function that registers/unregisters destructors that have the same
2622 // priority.
2623 llvm::FunctionType *FTy = llvm::FunctionType::get(CGM.VoidTy, false);
2624 llvm::Function *GlobalInitOrCleanupFn = CGM.CreateGlobalInitOrCleanUpFunction(
2625 FTy, FnName, CGM.getTypes().arrangeNullaryFunction(), SourceLocation());
2626
2627 return GlobalInitOrCleanupFn;
2628 }
2629
unregisterGlobalDtorsWithUnAtExit()2630 void CodeGenModule::unregisterGlobalDtorsWithUnAtExit() {
2631 for (const auto &I : DtorsUsingAtExit) {
2632 int Priority = I.first;
2633 std::string GlobalCleanupFnName =
2634 std::string("__GLOBAL_cleanup_") + llvm::to_string(Priority);
2635
2636 llvm::Function *GlobalCleanupFn =
2637 createGlobalInitOrCleanupFn(*this, GlobalCleanupFnName);
2638
2639 CodeGenFunction CGF(*this);
2640 CGF.StartFunction(GlobalDecl(), getContext().VoidTy, GlobalCleanupFn,
2641 getTypes().arrangeNullaryFunction(), FunctionArgList(),
2642 SourceLocation(), SourceLocation());
2643 auto AL = ApplyDebugLocation::CreateArtificial(CGF);
2644
2645 // Get the destructor function type, void(*)(void).
2646 llvm::FunctionType *dtorFuncTy = llvm::FunctionType::get(CGF.VoidTy, false);
2647 llvm::Type *dtorTy = dtorFuncTy->getPointerTo();
2648
2649 // Destructor functions are run/unregistered in non-ascending
2650 // order of their priorities.
2651 const llvm::TinyPtrVector<llvm::Function *> &Dtors = I.second;
2652 auto itv = Dtors.rbegin();
2653 while (itv != Dtors.rend()) {
2654 llvm::Function *Dtor = *itv;
2655
2656 // We're assuming that the destructor function is something we can
2657 // reasonably call with the correct CC. Go ahead and cast it to the
2658 // right prototype.
2659 llvm::Constant *dtor = llvm::ConstantExpr::getBitCast(Dtor, dtorTy);
2660 llvm::Value *V = CGF.unregisterGlobalDtorWithUnAtExit(dtor);
2661 llvm::Value *NeedsDestruct =
2662 CGF.Builder.CreateIsNull(V, "needs_destruct");
2663
2664 llvm::BasicBlock *DestructCallBlock =
2665 CGF.createBasicBlock("destruct.call");
2666 llvm::BasicBlock *EndBlock = CGF.createBasicBlock(
2667 (itv + 1) != Dtors.rend() ? "unatexit.call" : "destruct.end");
2668 // Check if unatexit returns a value of 0. If it does, jump to
2669 // DestructCallBlock, otherwise jump to EndBlock directly.
2670 CGF.Builder.CreateCondBr(NeedsDestruct, DestructCallBlock, EndBlock);
2671
2672 CGF.EmitBlock(DestructCallBlock);
2673
2674 // Emit the call to casted Dtor.
2675 llvm::CallInst *CI = CGF.Builder.CreateCall(dtorFuncTy, dtor);
2676 // Make sure the call and the callee agree on calling convention.
2677 CI->setCallingConv(Dtor->getCallingConv());
2678
2679 CGF.EmitBlock(EndBlock);
2680
2681 itv++;
2682 }
2683
2684 CGF.FinishFunction();
2685 AddGlobalDtor(GlobalCleanupFn, Priority);
2686 }
2687 }
2688
registerGlobalDtorsWithAtExit()2689 void CodeGenModule::registerGlobalDtorsWithAtExit() {
2690 for (const auto &I : DtorsUsingAtExit) {
2691 int Priority = I.first;
2692 std::string GlobalInitFnName =
2693 std::string("__GLOBAL_init_") + llvm::to_string(Priority);
2694 llvm::Function *GlobalInitFn =
2695 createGlobalInitOrCleanupFn(*this, GlobalInitFnName);
2696
2697 CodeGenFunction CGF(*this);
2698 CGF.StartFunction(GlobalDecl(), getContext().VoidTy, GlobalInitFn,
2699 getTypes().arrangeNullaryFunction(), FunctionArgList(),
2700 SourceLocation(), SourceLocation());
2701 auto AL = ApplyDebugLocation::CreateArtificial(CGF);
2702
2703 // Since constructor functions are run in non-descending order of their
2704 // priorities, destructors are registered in non-descending order of their
2705 // priorities, and since destructor functions are run in the reverse order
2706 // of their registration, destructor functions are run in non-ascending
2707 // order of their priorities.
2708 const llvm::TinyPtrVector<llvm::Function *> &Dtors = I.second;
2709 for (auto *Dtor : Dtors) {
2710 // Register the destructor function calling __cxa_atexit if it is
2711 // available. Otherwise fall back on calling atexit.
2712 if (getCodeGenOpts().CXAAtExit) {
2713 emitGlobalDtorWithCXAAtExit(CGF, Dtor, nullptr, false);
2714 } else {
2715 // Get the destructor function type, void(*)(void).
2716 llvm::Type *dtorTy =
2717 llvm::FunctionType::get(CGF.VoidTy, false)->getPointerTo();
2718
2719 // We're assuming that the destructor function is something we can
2720 // reasonably call with the correct CC. Go ahead and cast it to the
2721 // right prototype.
2722 CGF.registerGlobalDtorWithAtExit(
2723 llvm::ConstantExpr::getBitCast(Dtor, dtorTy));
2724 }
2725 }
2726
2727 CGF.FinishFunction();
2728 AddGlobalCtor(GlobalInitFn, Priority, nullptr);
2729 }
2730
2731 if (getCXXABI().useSinitAndSterm())
2732 unregisterGlobalDtorsWithUnAtExit();
2733 }
2734
2735 /// Register a global destructor as best as we know how.
registerGlobalDtor(CodeGenFunction & CGF,const VarDecl & D,llvm::FunctionCallee dtor,llvm::Constant * addr)2736 void ItaniumCXXABI::registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
2737 llvm::FunctionCallee dtor,
2738 llvm::Constant *addr) {
2739 if (D.isNoDestroy(CGM.getContext()))
2740 return;
2741
2742 // emitGlobalDtorWithCXAAtExit will emit a call to either __cxa_thread_atexit
2743 // or __cxa_atexit depending on whether this VarDecl is a thread-local storage
2744 // or not. CXAAtExit controls only __cxa_atexit, so use it if it is enabled.
2745 // We can always use __cxa_thread_atexit.
2746 if (CGM.getCodeGenOpts().CXAAtExit || D.getTLSKind())
2747 return emitGlobalDtorWithCXAAtExit(CGF, dtor, addr, D.getTLSKind());
2748
2749 // In Apple kexts, we want to add a global destructor entry.
2750 // FIXME: shouldn't this be guarded by some variable?
2751 if (CGM.getLangOpts().AppleKext) {
2752 // Generate a global destructor entry.
2753 return CGM.AddCXXDtorEntry(dtor, addr);
2754 }
2755
2756 CGF.registerGlobalDtorWithAtExit(D, dtor, addr);
2757 }
2758
isThreadWrapperReplaceable(const VarDecl * VD,CodeGen::CodeGenModule & CGM)2759 static bool isThreadWrapperReplaceable(const VarDecl *VD,
2760 CodeGen::CodeGenModule &CGM) {
2761 assert(!VD->isStaticLocal() && "static local VarDecls don't need wrappers!");
2762 // Darwin prefers to have references to thread local variables to go through
2763 // the thread wrapper instead of directly referencing the backing variable.
2764 return VD->getTLSKind() == VarDecl::TLS_Dynamic &&
2765 CGM.getTarget().getTriple().isOSDarwin();
2766 }
2767
2768 /// Get the appropriate linkage for the wrapper function. This is essentially
2769 /// the weak form of the variable's linkage; every translation unit which needs
2770 /// the wrapper emits a copy, and we want the linker to merge them.
2771 static llvm::GlobalValue::LinkageTypes
getThreadLocalWrapperLinkage(const VarDecl * VD,CodeGen::CodeGenModule & CGM)2772 getThreadLocalWrapperLinkage(const VarDecl *VD, CodeGen::CodeGenModule &CGM) {
2773 llvm::GlobalValue::LinkageTypes VarLinkage =
2774 CGM.getLLVMLinkageVarDefinition(VD, /*IsConstant=*/false);
2775
2776 // For internal linkage variables, we don't need an external or weak wrapper.
2777 if (llvm::GlobalValue::isLocalLinkage(VarLinkage))
2778 return VarLinkage;
2779
2780 // If the thread wrapper is replaceable, give it appropriate linkage.
2781 if (isThreadWrapperReplaceable(VD, CGM))
2782 if (!llvm::GlobalVariable::isLinkOnceLinkage(VarLinkage) &&
2783 !llvm::GlobalVariable::isWeakODRLinkage(VarLinkage))
2784 return VarLinkage;
2785 return llvm::GlobalValue::WeakODRLinkage;
2786 }
2787
2788 llvm::Function *
getOrCreateThreadLocalWrapper(const VarDecl * VD,llvm::Value * Val)2789 ItaniumCXXABI::getOrCreateThreadLocalWrapper(const VarDecl *VD,
2790 llvm::Value *Val) {
2791 // Mangle the name for the thread_local wrapper function.
2792 SmallString<256> WrapperName;
2793 {
2794 llvm::raw_svector_ostream Out(WrapperName);
2795 getMangleContext().mangleItaniumThreadLocalWrapper(VD, Out);
2796 }
2797
2798 // FIXME: If VD is a definition, we should regenerate the function attributes
2799 // before returning.
2800 if (llvm::Value *V = CGM.getModule().getNamedValue(WrapperName))
2801 return cast<llvm::Function>(V);
2802
2803 QualType RetQT = VD->getType();
2804 if (RetQT->isReferenceType())
2805 RetQT = RetQT.getNonReferenceType();
2806
2807 const CGFunctionInfo &FI = CGM.getTypes().arrangeBuiltinFunctionDeclaration(
2808 getContext().getPointerType(RetQT), FunctionArgList());
2809
2810 llvm::FunctionType *FnTy = CGM.getTypes().GetFunctionType(FI);
2811 llvm::Function *Wrapper =
2812 llvm::Function::Create(FnTy, getThreadLocalWrapperLinkage(VD, CGM),
2813 WrapperName.str(), &CGM.getModule());
2814
2815 if (CGM.supportsCOMDAT() && Wrapper->isWeakForLinker())
2816 Wrapper->setComdat(CGM.getModule().getOrInsertComdat(Wrapper->getName()));
2817
2818 CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, Wrapper, /*IsThunk=*/false);
2819
2820 // Always resolve references to the wrapper at link time.
2821 if (!Wrapper->hasLocalLinkage())
2822 if (!isThreadWrapperReplaceable(VD, CGM) ||
2823 llvm::GlobalVariable::isLinkOnceLinkage(Wrapper->getLinkage()) ||
2824 llvm::GlobalVariable::isWeakODRLinkage(Wrapper->getLinkage()) ||
2825 VD->getVisibility() == HiddenVisibility)
2826 Wrapper->setVisibility(llvm::GlobalValue::HiddenVisibility);
2827
2828 if (isThreadWrapperReplaceable(VD, CGM)) {
2829 Wrapper->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
2830 Wrapper->addFnAttr(llvm::Attribute::NoUnwind);
2831 }
2832
2833 ThreadWrappers.push_back({VD, Wrapper});
2834 return Wrapper;
2835 }
2836
EmitThreadLocalInitFuncs(CodeGenModule & CGM,ArrayRef<const VarDecl * > CXXThreadLocals,ArrayRef<llvm::Function * > CXXThreadLocalInits,ArrayRef<const VarDecl * > CXXThreadLocalInitVars)2837 void ItaniumCXXABI::EmitThreadLocalInitFuncs(
2838 CodeGenModule &CGM, ArrayRef<const VarDecl *> CXXThreadLocals,
2839 ArrayRef<llvm::Function *> CXXThreadLocalInits,
2840 ArrayRef<const VarDecl *> CXXThreadLocalInitVars) {
2841 llvm::Function *InitFunc = nullptr;
2842
2843 // Separate initializers into those with ordered (or partially-ordered)
2844 // initialization and those with unordered initialization.
2845 llvm::SmallVector<llvm::Function *, 8> OrderedInits;
2846 llvm::SmallDenseMap<const VarDecl *, llvm::Function *> UnorderedInits;
2847 for (unsigned I = 0; I != CXXThreadLocalInits.size(); ++I) {
2848 if (isTemplateInstantiation(
2849 CXXThreadLocalInitVars[I]->getTemplateSpecializationKind()))
2850 UnorderedInits[CXXThreadLocalInitVars[I]->getCanonicalDecl()] =
2851 CXXThreadLocalInits[I];
2852 else
2853 OrderedInits.push_back(CXXThreadLocalInits[I]);
2854 }
2855
2856 if (!OrderedInits.empty()) {
2857 // Generate a guarded initialization function.
2858 llvm::FunctionType *FTy =
2859 llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
2860 const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
2861 InitFunc = CGM.CreateGlobalInitOrCleanUpFunction(FTy, "__tls_init", FI,
2862 SourceLocation(),
2863 /*TLS=*/true);
2864 llvm::GlobalVariable *Guard = new llvm::GlobalVariable(
2865 CGM.getModule(), CGM.Int8Ty, /*isConstant=*/false,
2866 llvm::GlobalVariable::InternalLinkage,
2867 llvm::ConstantInt::get(CGM.Int8Ty, 0), "__tls_guard");
2868 Guard->setThreadLocal(true);
2869 Guard->setThreadLocalMode(CGM.GetDefaultLLVMTLSModel());
2870
2871 CharUnits GuardAlign = CharUnits::One();
2872 Guard->setAlignment(GuardAlign.getAsAlign());
2873
2874 CodeGenFunction(CGM).GenerateCXXGlobalInitFunc(
2875 InitFunc, OrderedInits, ConstantAddress(Guard, GuardAlign));
2876 // On Darwin platforms, use CXX_FAST_TLS calling convention.
2877 if (CGM.getTarget().getTriple().isOSDarwin()) {
2878 InitFunc->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
2879 InitFunc->addFnAttr(llvm::Attribute::NoUnwind);
2880 }
2881 }
2882
2883 // Create declarations for thread wrappers for all thread-local variables
2884 // with non-discardable definitions in this translation unit.
2885 for (const VarDecl *VD : CXXThreadLocals) {
2886 if (VD->hasDefinition() &&
2887 !isDiscardableGVALinkage(getContext().GetGVALinkageForVariable(VD))) {
2888 llvm::GlobalValue *GV = CGM.GetGlobalValue(CGM.getMangledName(VD));
2889 getOrCreateThreadLocalWrapper(VD, GV);
2890 }
2891 }
2892
2893 // Emit all referenced thread wrappers.
2894 for (auto VDAndWrapper : ThreadWrappers) {
2895 const VarDecl *VD = VDAndWrapper.first;
2896 llvm::GlobalVariable *Var =
2897 cast<llvm::GlobalVariable>(CGM.GetGlobalValue(CGM.getMangledName(VD)));
2898 llvm::Function *Wrapper = VDAndWrapper.second;
2899
2900 // Some targets require that all access to thread local variables go through
2901 // the thread wrapper. This means that we cannot attempt to create a thread
2902 // wrapper or a thread helper.
2903 if (!VD->hasDefinition()) {
2904 if (isThreadWrapperReplaceable(VD, CGM)) {
2905 Wrapper->setLinkage(llvm::Function::ExternalLinkage);
2906 continue;
2907 }
2908
2909 // If this isn't a TU in which this variable is defined, the thread
2910 // wrapper is discardable.
2911 if (Wrapper->getLinkage() == llvm::Function::WeakODRLinkage)
2912 Wrapper->setLinkage(llvm::Function::LinkOnceODRLinkage);
2913 }
2914
2915 CGM.SetLLVMFunctionAttributesForDefinition(nullptr, Wrapper);
2916
2917 // Mangle the name for the thread_local initialization function.
2918 SmallString<256> InitFnName;
2919 {
2920 llvm::raw_svector_ostream Out(InitFnName);
2921 getMangleContext().mangleItaniumThreadLocalInit(VD, Out);
2922 }
2923
2924 llvm::FunctionType *InitFnTy = llvm::FunctionType::get(CGM.VoidTy, false);
2925
2926 // If we have a definition for the variable, emit the initialization
2927 // function as an alias to the global Init function (if any). Otherwise,
2928 // produce a declaration of the initialization function.
2929 llvm::GlobalValue *Init = nullptr;
2930 bool InitIsInitFunc = false;
2931 bool HasConstantInitialization = false;
2932 if (!usesThreadWrapperFunction(VD)) {
2933 HasConstantInitialization = true;
2934 } else if (VD->hasDefinition()) {
2935 InitIsInitFunc = true;
2936 llvm::Function *InitFuncToUse = InitFunc;
2937 if (isTemplateInstantiation(VD->getTemplateSpecializationKind()))
2938 InitFuncToUse = UnorderedInits.lookup(VD->getCanonicalDecl());
2939 if (InitFuncToUse)
2940 Init = llvm::GlobalAlias::create(Var->getLinkage(), InitFnName.str(),
2941 InitFuncToUse);
2942 } else {
2943 // Emit a weak global function referring to the initialization function.
2944 // This function will not exist if the TU defining the thread_local
2945 // variable in question does not need any dynamic initialization for
2946 // its thread_local variables.
2947 Init = llvm::Function::Create(InitFnTy,
2948 llvm::GlobalVariable::ExternalWeakLinkage,
2949 InitFnName.str(), &CGM.getModule());
2950 const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
2951 CGM.SetLLVMFunctionAttributes(
2952 GlobalDecl(), FI, cast<llvm::Function>(Init), /*IsThunk=*/false);
2953 }
2954
2955 if (Init) {
2956 Init->setVisibility(Var->getVisibility());
2957 // Don't mark an extern_weak function DSO local on windows.
2958 if (!CGM.getTriple().isOSWindows() || !Init->hasExternalWeakLinkage())
2959 Init->setDSOLocal(Var->isDSOLocal());
2960 }
2961
2962 llvm::LLVMContext &Context = CGM.getModule().getContext();
2963
2964 // The linker on AIX is not happy with missing weak symbols. However,
2965 // other TUs will not know whether the initialization routine exists
2966 // so create an empty, init function to satisfy the linker.
2967 // This is needed whenever a thread wrapper function is not used, and
2968 // also when the symbol is weak.
2969 if (CGM.getTriple().isOSAIX() && VD->hasDefinition() &&
2970 isEmittedWithConstantInitializer(VD, true) &&
2971 !VD->needsDestruction(getContext())) {
2972 // Init should be null. If it were non-null, then the logic above would
2973 // either be defining the function to be an alias or declaring the
2974 // function with the expectation that the definition of the variable
2975 // is elsewhere.
2976 assert(Init == nullptr && "Expected Init to be null.");
2977
2978 llvm::Function *Func = llvm::Function::Create(
2979 InitFnTy, Var->getLinkage(), InitFnName.str(), &CGM.getModule());
2980 const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
2981 CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI,
2982 cast<llvm::Function>(Func),
2983 /*IsThunk=*/false);
2984 // Create a function body that just returns
2985 llvm::BasicBlock *Entry = llvm::BasicBlock::Create(Context, "", Func);
2986 CGBuilderTy Builder(CGM, Entry);
2987 Builder.CreateRetVoid();
2988 }
2989
2990 llvm::BasicBlock *Entry = llvm::BasicBlock::Create(Context, "", Wrapper);
2991 CGBuilderTy Builder(CGM, Entry);
2992 if (HasConstantInitialization) {
2993 // No dynamic initialization to invoke.
2994 } else if (InitIsInitFunc) {
2995 if (Init) {
2996 llvm::CallInst *CallVal = Builder.CreateCall(InitFnTy, Init);
2997 if (isThreadWrapperReplaceable(VD, CGM)) {
2998 CallVal->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
2999 llvm::Function *Fn =
3000 cast<llvm::Function>(cast<llvm::GlobalAlias>(Init)->getAliasee());
3001 Fn->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
3002 }
3003 }
3004 } else if (CGM.getTriple().isOSAIX()) {
3005 // On AIX, except if constinit and also neither of class type or of
3006 // (possibly multi-dimensional) array of class type, thread_local vars
3007 // will have init routines regardless of whether they are
3008 // const-initialized. Since the routine is guaranteed to exist, we can
3009 // unconditionally call it without testing for its existance. This
3010 // avoids potentially unresolved weak symbols which the AIX linker
3011 // isn't happy with.
3012 Builder.CreateCall(InitFnTy, Init);
3013 } else {
3014 // Don't know whether we have an init function. Call it if it exists.
3015 llvm::Value *Have = Builder.CreateIsNotNull(Init);
3016 llvm::BasicBlock *InitBB = llvm::BasicBlock::Create(Context, "", Wrapper);
3017 llvm::BasicBlock *ExitBB = llvm::BasicBlock::Create(Context, "", Wrapper);
3018 Builder.CreateCondBr(Have, InitBB, ExitBB);
3019
3020 Builder.SetInsertPoint(InitBB);
3021 Builder.CreateCall(InitFnTy, Init);
3022 Builder.CreateBr(ExitBB);
3023
3024 Builder.SetInsertPoint(ExitBB);
3025 }
3026
3027 // For a reference, the result of the wrapper function is a pointer to
3028 // the referenced object.
3029 llvm::Value *Val = Var;
3030 if (VD->getType()->isReferenceType()) {
3031 CharUnits Align = CGM.getContext().getDeclAlign(VD);
3032 Val = Builder.CreateAlignedLoad(Var->getValueType(), Var, Align);
3033 }
3034 if (Val->getType() != Wrapper->getReturnType())
3035 Val = Builder.CreatePointerBitCastOrAddrSpaceCast(
3036 Val, Wrapper->getReturnType(), "");
3037 Builder.CreateRet(Val);
3038 }
3039 }
3040
EmitThreadLocalVarDeclLValue(CodeGenFunction & CGF,const VarDecl * VD,QualType LValType)3041 LValue ItaniumCXXABI::EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF,
3042 const VarDecl *VD,
3043 QualType LValType) {
3044 llvm::Value *Val = CGF.CGM.GetAddrOfGlobalVar(VD);
3045 llvm::Function *Wrapper = getOrCreateThreadLocalWrapper(VD, Val);
3046
3047 llvm::CallInst *CallVal = CGF.Builder.CreateCall(Wrapper);
3048 CallVal->setCallingConv(Wrapper->getCallingConv());
3049
3050 LValue LV;
3051 if (VD->getType()->isReferenceType())
3052 LV = CGF.MakeNaturalAlignAddrLValue(CallVal, LValType);
3053 else
3054 LV = CGF.MakeAddrLValue(CallVal, LValType,
3055 CGF.getContext().getDeclAlign(VD));
3056 // FIXME: need setObjCGCLValueClass?
3057 return LV;
3058 }
3059
3060 /// Return whether the given global decl needs a VTT parameter, which it does
3061 /// if it's a base constructor or destructor with virtual bases.
NeedsVTTParameter(GlobalDecl GD)3062 bool ItaniumCXXABI::NeedsVTTParameter(GlobalDecl GD) {
3063 const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
3064
3065 // We don't have any virtual bases, just return early.
3066 if (!MD->getParent()->getNumVBases())
3067 return false;
3068
3069 // Check if we have a base constructor.
3070 if (isa<CXXConstructorDecl>(MD) && GD.getCtorType() == Ctor_Base)
3071 return true;
3072
3073 // Check if we have a base destructor.
3074 if (isa<CXXDestructorDecl>(MD) && GD.getDtorType() == Dtor_Base)
3075 return true;
3076
3077 return false;
3078 }
3079
3080 namespace {
3081 class ItaniumRTTIBuilder {
3082 CodeGenModule &CGM; // Per-module state.
3083 llvm::LLVMContext &VMContext;
3084 const ItaniumCXXABI &CXXABI; // Per-module state.
3085
3086 /// Fields - The fields of the RTTI descriptor currently being built.
3087 SmallVector<llvm::Constant *, 16> Fields;
3088
3089 /// GetAddrOfTypeName - Returns the mangled type name of the given type.
3090 llvm::GlobalVariable *
3091 GetAddrOfTypeName(QualType Ty, llvm::GlobalVariable::LinkageTypes Linkage);
3092
3093 /// GetAddrOfExternalRTTIDescriptor - Returns the constant for the RTTI
3094 /// descriptor of the given type.
3095 llvm::Constant *GetAddrOfExternalRTTIDescriptor(QualType Ty);
3096
3097 /// BuildVTablePointer - Build the vtable pointer for the given type.
3098 void BuildVTablePointer(const Type *Ty);
3099
3100 /// BuildSIClassTypeInfo - Build an abi::__si_class_type_info, used for single
3101 /// inheritance, according to the Itanium C++ ABI, 2.9.5p6b.
3102 void BuildSIClassTypeInfo(const CXXRecordDecl *RD);
3103
3104 /// BuildVMIClassTypeInfo - Build an abi::__vmi_class_type_info, used for
3105 /// classes with bases that do not satisfy the abi::__si_class_type_info
3106 /// constraints, according ti the Itanium C++ ABI, 2.9.5p5c.
3107 void BuildVMIClassTypeInfo(const CXXRecordDecl *RD);
3108
3109 /// BuildPointerTypeInfo - Build an abi::__pointer_type_info struct, used
3110 /// for pointer types.
3111 void BuildPointerTypeInfo(QualType PointeeTy);
3112
3113 /// BuildObjCObjectTypeInfo - Build the appropriate kind of
3114 /// type_info for an object type.
3115 void BuildObjCObjectTypeInfo(const ObjCObjectType *Ty);
3116
3117 /// BuildPointerToMemberTypeInfo - Build an abi::__pointer_to_member_type_info
3118 /// struct, used for member pointer types.
3119 void BuildPointerToMemberTypeInfo(const MemberPointerType *Ty);
3120
3121 public:
ItaniumRTTIBuilder(const ItaniumCXXABI & ABI)3122 ItaniumRTTIBuilder(const ItaniumCXXABI &ABI)
3123 : CGM(ABI.CGM), VMContext(CGM.getModule().getContext()), CXXABI(ABI) {}
3124
3125 // Pointer type info flags.
3126 enum {
3127 /// PTI_Const - Type has const qualifier.
3128 PTI_Const = 0x1,
3129
3130 /// PTI_Volatile - Type has volatile qualifier.
3131 PTI_Volatile = 0x2,
3132
3133 /// PTI_Restrict - Type has restrict qualifier.
3134 PTI_Restrict = 0x4,
3135
3136 /// PTI_Incomplete - Type is incomplete.
3137 PTI_Incomplete = 0x8,
3138
3139 /// PTI_ContainingClassIncomplete - Containing class is incomplete.
3140 /// (in pointer to member).
3141 PTI_ContainingClassIncomplete = 0x10,
3142
3143 /// PTI_TransactionSafe - Pointee is transaction_safe function (C++ TM TS).
3144 //PTI_TransactionSafe = 0x20,
3145
3146 /// PTI_Noexcept - Pointee is noexcept function (C++1z).
3147 PTI_Noexcept = 0x40,
3148 };
3149
3150 // VMI type info flags.
3151 enum {
3152 /// VMI_NonDiamondRepeat - Class has non-diamond repeated inheritance.
3153 VMI_NonDiamondRepeat = 0x1,
3154
3155 /// VMI_DiamondShaped - Class is diamond shaped.
3156 VMI_DiamondShaped = 0x2
3157 };
3158
3159 // Base class type info flags.
3160 enum {
3161 /// BCTI_Virtual - Base class is virtual.
3162 BCTI_Virtual = 0x1,
3163
3164 /// BCTI_Public - Base class is public.
3165 BCTI_Public = 0x2
3166 };
3167
3168 /// BuildTypeInfo - Build the RTTI type info struct for the given type, or
3169 /// link to an existing RTTI descriptor if one already exists.
3170 llvm::Constant *BuildTypeInfo(QualType Ty);
3171
3172 /// BuildTypeInfo - Build the RTTI type info struct for the given type.
3173 llvm::Constant *BuildTypeInfo(
3174 QualType Ty,
3175 llvm::GlobalVariable::LinkageTypes Linkage,
3176 llvm::GlobalValue::VisibilityTypes Visibility,
3177 llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass);
3178 };
3179 }
3180
GetAddrOfTypeName(QualType Ty,llvm::GlobalVariable::LinkageTypes Linkage)3181 llvm::GlobalVariable *ItaniumRTTIBuilder::GetAddrOfTypeName(
3182 QualType Ty, llvm::GlobalVariable::LinkageTypes Linkage) {
3183 SmallString<256> Name;
3184 llvm::raw_svector_ostream Out(Name);
3185 CGM.getCXXABI().getMangleContext().mangleCXXRTTIName(Ty, Out);
3186
3187 // We know that the mangled name of the type starts at index 4 of the
3188 // mangled name of the typename, so we can just index into it in order to
3189 // get the mangled name of the type.
3190 llvm::Constant *Init = llvm::ConstantDataArray::getString(VMContext,
3191 Name.substr(4));
3192 auto Align = CGM.getContext().getTypeAlignInChars(CGM.getContext().CharTy);
3193
3194 llvm::GlobalVariable *GV = CGM.CreateOrReplaceCXXRuntimeVariable(
3195 Name, Init->getType(), Linkage, Align.getQuantity());
3196
3197 GV->setInitializer(Init);
3198
3199 return GV;
3200 }
3201
3202 llvm::Constant *
GetAddrOfExternalRTTIDescriptor(QualType Ty)3203 ItaniumRTTIBuilder::GetAddrOfExternalRTTIDescriptor(QualType Ty) {
3204 // Mangle the RTTI name.
3205 SmallString<256> Name;
3206 llvm::raw_svector_ostream Out(Name);
3207 CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out);
3208
3209 // Look for an existing global.
3210 llvm::GlobalVariable *GV = CGM.getModule().getNamedGlobal(Name);
3211
3212 if (!GV) {
3213 // Create a new global variable.
3214 // Note for the future: If we would ever like to do deferred emission of
3215 // RTTI, check if emitting vtables opportunistically need any adjustment.
3216
3217 GV = new llvm::GlobalVariable(CGM.getModule(), CGM.Int8PtrTy,
3218 /*isConstant=*/true,
3219 llvm::GlobalValue::ExternalLinkage, nullptr,
3220 Name);
3221 const CXXRecordDecl *RD = Ty->getAsCXXRecordDecl();
3222 CGM.setGVProperties(GV, RD);
3223 // Import the typeinfo symbol when all non-inline virtual methods are
3224 // imported.
3225 if (CGM.getTarget().hasPS4DLLImportExport()) {
3226 if (RD && CXXRecordAllNonInlineVirtualsHaveAttr<DLLImportAttr>(RD)) {
3227 GV->setDLLStorageClass(llvm::GlobalVariable::DLLImportStorageClass);
3228 CGM.setDSOLocal(GV);
3229 }
3230 }
3231 }
3232
3233 return llvm::ConstantExpr::getBitCast(GV, CGM.Int8PtrTy);
3234 }
3235
3236 /// TypeInfoIsInStandardLibrary - Given a builtin type, returns whether the type
3237 /// info for that type is defined in the standard library.
TypeInfoIsInStandardLibrary(const BuiltinType * Ty)3238 static bool TypeInfoIsInStandardLibrary(const BuiltinType *Ty) {
3239 // Itanium C++ ABI 2.9.2:
3240 // Basic type information (e.g. for "int", "bool", etc.) will be kept in
3241 // the run-time support library. Specifically, the run-time support
3242 // library should contain type_info objects for the types X, X* and
3243 // X const*, for every X in: void, std::nullptr_t, bool, wchar_t, char,
3244 // unsigned char, signed char, short, unsigned short, int, unsigned int,
3245 // long, unsigned long, long long, unsigned long long, float, double,
3246 // long double, char16_t, char32_t, and the IEEE 754r decimal and
3247 // half-precision floating point types.
3248 //
3249 // GCC also emits RTTI for __int128.
3250 // FIXME: We do not emit RTTI information for decimal types here.
3251
3252 // Types added here must also be added to EmitFundamentalRTTIDescriptors.
3253 switch (Ty->getKind()) {
3254 case BuiltinType::Void:
3255 case BuiltinType::NullPtr:
3256 case BuiltinType::Bool:
3257 case BuiltinType::WChar_S:
3258 case BuiltinType::WChar_U:
3259 case BuiltinType::Char_U:
3260 case BuiltinType::Char_S:
3261 case BuiltinType::UChar:
3262 case BuiltinType::SChar:
3263 case BuiltinType::Short:
3264 case BuiltinType::UShort:
3265 case BuiltinType::Int:
3266 case BuiltinType::UInt:
3267 case BuiltinType::Long:
3268 case BuiltinType::ULong:
3269 case BuiltinType::LongLong:
3270 case BuiltinType::ULongLong:
3271 case BuiltinType::Half:
3272 case BuiltinType::Float:
3273 case BuiltinType::Double:
3274 case BuiltinType::LongDouble:
3275 case BuiltinType::Float16:
3276 case BuiltinType::Float128:
3277 case BuiltinType::Char8:
3278 case BuiltinType::Char16:
3279 case BuiltinType::Char32:
3280 case BuiltinType::Int128:
3281 case BuiltinType::UInt128:
3282 return true;
3283
3284 #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
3285 case BuiltinType::Id:
3286 #include "clang/Basic/OpenCLImageTypes.def"
3287 #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
3288 case BuiltinType::Id:
3289 #include "clang/Basic/OpenCLExtensionTypes.def"
3290 case BuiltinType::OCLSampler:
3291 case BuiltinType::OCLEvent:
3292 case BuiltinType::OCLClkEvent:
3293 case BuiltinType::OCLQueue:
3294 case BuiltinType::OCLReserveID:
3295 #define SVE_TYPE(Name, Id, SingletonId) \
3296 case BuiltinType::Id:
3297 #include "clang/Basic/AArch64SVEACLETypes.def"
3298 #define PPC_VECTOR_TYPE(Name, Id, Size) \
3299 case BuiltinType::Id:
3300 #include "clang/Basic/PPCTypes.def"
3301 #define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
3302 #include "clang/Basic/RISCVVTypes.def"
3303 case BuiltinType::ShortAccum:
3304 case BuiltinType::Accum:
3305 case BuiltinType::LongAccum:
3306 case BuiltinType::UShortAccum:
3307 case BuiltinType::UAccum:
3308 case BuiltinType::ULongAccum:
3309 case BuiltinType::ShortFract:
3310 case BuiltinType::Fract:
3311 case BuiltinType::LongFract:
3312 case BuiltinType::UShortFract:
3313 case BuiltinType::UFract:
3314 case BuiltinType::ULongFract:
3315 case BuiltinType::SatShortAccum:
3316 case BuiltinType::SatAccum:
3317 case BuiltinType::SatLongAccum:
3318 case BuiltinType::SatUShortAccum:
3319 case BuiltinType::SatUAccum:
3320 case BuiltinType::SatULongAccum:
3321 case BuiltinType::SatShortFract:
3322 case BuiltinType::SatFract:
3323 case BuiltinType::SatLongFract:
3324 case BuiltinType::SatUShortFract:
3325 case BuiltinType::SatUFract:
3326 case BuiltinType::SatULongFract:
3327 case BuiltinType::BFloat16:
3328 return false;
3329
3330 case BuiltinType::Dependent:
3331 #define BUILTIN_TYPE(Id, SingletonId)
3332 #define PLACEHOLDER_TYPE(Id, SingletonId) \
3333 case BuiltinType::Id:
3334 #include "clang/AST/BuiltinTypes.def"
3335 llvm_unreachable("asking for RRTI for a placeholder type!");
3336
3337 case BuiltinType::ObjCId:
3338 case BuiltinType::ObjCClass:
3339 case BuiltinType::ObjCSel:
3340 llvm_unreachable("FIXME: Objective-C types are unsupported!");
3341 }
3342
3343 llvm_unreachable("Invalid BuiltinType Kind!");
3344 }
3345
TypeInfoIsInStandardLibrary(const PointerType * PointerTy)3346 static bool TypeInfoIsInStandardLibrary(const PointerType *PointerTy) {
3347 QualType PointeeTy = PointerTy->getPointeeType();
3348 const BuiltinType *BuiltinTy = dyn_cast<BuiltinType>(PointeeTy);
3349 if (!BuiltinTy)
3350 return false;
3351
3352 // Check the qualifiers.
3353 Qualifiers Quals = PointeeTy.getQualifiers();
3354 Quals.removeConst();
3355
3356 if (!Quals.empty())
3357 return false;
3358
3359 return TypeInfoIsInStandardLibrary(BuiltinTy);
3360 }
3361
3362 /// IsStandardLibraryRTTIDescriptor - Returns whether the type
3363 /// information for the given type exists in the standard library.
IsStandardLibraryRTTIDescriptor(QualType Ty)3364 static bool IsStandardLibraryRTTIDescriptor(QualType Ty) {
3365 // Type info for builtin types is defined in the standard library.
3366 if (const BuiltinType *BuiltinTy = dyn_cast<BuiltinType>(Ty))
3367 return TypeInfoIsInStandardLibrary(BuiltinTy);
3368
3369 // Type info for some pointer types to builtin types is defined in the
3370 // standard library.
3371 if (const PointerType *PointerTy = dyn_cast<PointerType>(Ty))
3372 return TypeInfoIsInStandardLibrary(PointerTy);
3373
3374 return false;
3375 }
3376
3377 /// ShouldUseExternalRTTIDescriptor - Returns whether the type information for
3378 /// the given type exists somewhere else, and that we should not emit the type
3379 /// information in this translation unit. Assumes that it is not a
3380 /// standard-library type.
ShouldUseExternalRTTIDescriptor(CodeGenModule & CGM,QualType Ty)3381 static bool ShouldUseExternalRTTIDescriptor(CodeGenModule &CGM,
3382 QualType Ty) {
3383 ASTContext &Context = CGM.getContext();
3384
3385 // If RTTI is disabled, assume it might be disabled in the
3386 // translation unit that defines any potential key function, too.
3387 if (!Context.getLangOpts().RTTI) return false;
3388
3389 if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) {
3390 const CXXRecordDecl *RD = cast<CXXRecordDecl>(RecordTy->getDecl());
3391 if (!RD->hasDefinition())
3392 return false;
3393
3394 if (!RD->isDynamicClass())
3395 return false;
3396
3397 // FIXME: this may need to be reconsidered if the key function
3398 // changes.
3399 // N.B. We must always emit the RTTI data ourselves if there exists a key
3400 // function.
3401 bool IsDLLImport = RD->hasAttr<DLLImportAttr>();
3402
3403 // Don't import the RTTI but emit it locally.
3404 if (CGM.getTriple().isWindowsGNUEnvironment())
3405 return false;
3406
3407 if (CGM.getVTables().isVTableExternal(RD)) {
3408 if (CGM.getTarget().hasPS4DLLImportExport())
3409 return true;
3410
3411 return IsDLLImport && !CGM.getTriple().isWindowsItaniumEnvironment()
3412 ? false
3413 : true;
3414 }
3415 if (IsDLLImport)
3416 return true;
3417 }
3418
3419 return false;
3420 }
3421
3422 /// IsIncompleteClassType - Returns whether the given record type is incomplete.
IsIncompleteClassType(const RecordType * RecordTy)3423 static bool IsIncompleteClassType(const RecordType *RecordTy) {
3424 return !RecordTy->getDecl()->isCompleteDefinition();
3425 }
3426
3427 /// ContainsIncompleteClassType - Returns whether the given type contains an
3428 /// incomplete class type. This is true if
3429 ///
3430 /// * The given type is an incomplete class type.
3431 /// * The given type is a pointer type whose pointee type contains an
3432 /// incomplete class type.
3433 /// * The given type is a member pointer type whose class is an incomplete
3434 /// class type.
3435 /// * The given type is a member pointer type whoise pointee type contains an
3436 /// incomplete class type.
3437 /// is an indirect or direct pointer to an incomplete class type.
ContainsIncompleteClassType(QualType Ty)3438 static bool ContainsIncompleteClassType(QualType Ty) {
3439 if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) {
3440 if (IsIncompleteClassType(RecordTy))
3441 return true;
3442 }
3443
3444 if (const PointerType *PointerTy = dyn_cast<PointerType>(Ty))
3445 return ContainsIncompleteClassType(PointerTy->getPointeeType());
3446
3447 if (const MemberPointerType *MemberPointerTy =
3448 dyn_cast<MemberPointerType>(Ty)) {
3449 // Check if the class type is incomplete.
3450 const RecordType *ClassType = cast<RecordType>(MemberPointerTy->getClass());
3451 if (IsIncompleteClassType(ClassType))
3452 return true;
3453
3454 return ContainsIncompleteClassType(MemberPointerTy->getPointeeType());
3455 }
3456
3457 return false;
3458 }
3459
3460 // CanUseSingleInheritance - Return whether the given record decl has a "single,
3461 // public, non-virtual base at offset zero (i.e. the derived class is dynamic
3462 // iff the base is)", according to Itanium C++ ABI, 2.95p6b.
CanUseSingleInheritance(const CXXRecordDecl * RD)3463 static bool CanUseSingleInheritance(const CXXRecordDecl *RD) {
3464 // Check the number of bases.
3465 if (RD->getNumBases() != 1)
3466 return false;
3467
3468 // Get the base.
3469 CXXRecordDecl::base_class_const_iterator Base = RD->bases_begin();
3470
3471 // Check that the base is not virtual.
3472 if (Base->isVirtual())
3473 return false;
3474
3475 // Check that the base is public.
3476 if (Base->getAccessSpecifier() != AS_public)
3477 return false;
3478
3479 // Check that the class is dynamic iff the base is.
3480 auto *BaseDecl =
3481 cast<CXXRecordDecl>(Base->getType()->castAs<RecordType>()->getDecl());
3482 if (!BaseDecl->isEmpty() &&
3483 BaseDecl->isDynamicClass() != RD->isDynamicClass())
3484 return false;
3485
3486 return true;
3487 }
3488
BuildVTablePointer(const Type * Ty)3489 void ItaniumRTTIBuilder::BuildVTablePointer(const Type *Ty) {
3490 // abi::__class_type_info.
3491 static const char * const ClassTypeInfo =
3492 "_ZTVN10__cxxabiv117__class_type_infoE";
3493 // abi::__si_class_type_info.
3494 static const char * const SIClassTypeInfo =
3495 "_ZTVN10__cxxabiv120__si_class_type_infoE";
3496 // abi::__vmi_class_type_info.
3497 static const char * const VMIClassTypeInfo =
3498 "_ZTVN10__cxxabiv121__vmi_class_type_infoE";
3499
3500 const char *VTableName = nullptr;
3501
3502 switch (Ty->getTypeClass()) {
3503 #define TYPE(Class, Base)
3504 #define ABSTRACT_TYPE(Class, Base)
3505 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
3506 #define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
3507 #define DEPENDENT_TYPE(Class, Base) case Type::Class:
3508 #include "clang/AST/TypeNodes.inc"
3509 llvm_unreachable("Non-canonical and dependent types shouldn't get here");
3510
3511 case Type::LValueReference:
3512 case Type::RValueReference:
3513 llvm_unreachable("References shouldn't get here");
3514
3515 case Type::Auto:
3516 case Type::DeducedTemplateSpecialization:
3517 llvm_unreachable("Undeduced type shouldn't get here");
3518
3519 case Type::Pipe:
3520 llvm_unreachable("Pipe types shouldn't get here");
3521
3522 case Type::Builtin:
3523 case Type::ExtInt:
3524 // GCC treats vector and complex types as fundamental types.
3525 case Type::Vector:
3526 case Type::ExtVector:
3527 case Type::ConstantMatrix:
3528 case Type::Complex:
3529 case Type::Atomic:
3530 // FIXME: GCC treats block pointers as fundamental types?!
3531 case Type::BlockPointer:
3532 // abi::__fundamental_type_info.
3533 VTableName = "_ZTVN10__cxxabiv123__fundamental_type_infoE";
3534 break;
3535
3536 case Type::ConstantArray:
3537 case Type::IncompleteArray:
3538 case Type::VariableArray:
3539 // abi::__array_type_info.
3540 VTableName = "_ZTVN10__cxxabiv117__array_type_infoE";
3541 break;
3542
3543 case Type::FunctionNoProto:
3544 case Type::FunctionProto:
3545 // abi::__function_type_info.
3546 VTableName = "_ZTVN10__cxxabiv120__function_type_infoE";
3547 break;
3548
3549 case Type::Enum:
3550 // abi::__enum_type_info.
3551 VTableName = "_ZTVN10__cxxabiv116__enum_type_infoE";
3552 break;
3553
3554 case Type::Record: {
3555 const CXXRecordDecl *RD =
3556 cast<CXXRecordDecl>(cast<RecordType>(Ty)->getDecl());
3557
3558 if (!RD->hasDefinition() || !RD->getNumBases()) {
3559 VTableName = ClassTypeInfo;
3560 } else if (CanUseSingleInheritance(RD)) {
3561 VTableName = SIClassTypeInfo;
3562 } else {
3563 VTableName = VMIClassTypeInfo;
3564 }
3565
3566 break;
3567 }
3568
3569 case Type::ObjCObject:
3570 // Ignore protocol qualifiers.
3571 Ty = cast<ObjCObjectType>(Ty)->getBaseType().getTypePtr();
3572
3573 // Handle id and Class.
3574 if (isa<BuiltinType>(Ty)) {
3575 VTableName = ClassTypeInfo;
3576 break;
3577 }
3578
3579 assert(isa<ObjCInterfaceType>(Ty));
3580 LLVM_FALLTHROUGH;
3581
3582 case Type::ObjCInterface:
3583 if (cast<ObjCInterfaceType>(Ty)->getDecl()->getSuperClass()) {
3584 VTableName = SIClassTypeInfo;
3585 } else {
3586 VTableName = ClassTypeInfo;
3587 }
3588 break;
3589
3590 case Type::ObjCObjectPointer:
3591 case Type::Pointer:
3592 // abi::__pointer_type_info.
3593 VTableName = "_ZTVN10__cxxabiv119__pointer_type_infoE";
3594 break;
3595
3596 case Type::MemberPointer:
3597 // abi::__pointer_to_member_type_info.
3598 VTableName = "_ZTVN10__cxxabiv129__pointer_to_member_type_infoE";
3599 break;
3600 }
3601
3602 llvm::Constant *VTable = nullptr;
3603
3604 // Check if the alias exists. If it doesn't, then get or create the global.
3605 if (CGM.getItaniumVTableContext().isRelativeLayout())
3606 VTable = CGM.getModule().getNamedAlias(VTableName);
3607 if (!VTable)
3608 VTable = CGM.getModule().getOrInsertGlobal(VTableName, CGM.Int8PtrTy);
3609
3610 CGM.setDSOLocal(cast<llvm::GlobalValue>(VTable->stripPointerCasts()));
3611
3612 llvm::Type *PtrDiffTy =
3613 CGM.getTypes().ConvertType(CGM.getContext().getPointerDiffType());
3614
3615 // The vtable address point is 2.
3616 if (CGM.getItaniumVTableContext().isRelativeLayout()) {
3617 // The vtable address point is 8 bytes after its start:
3618 // 4 for the offset to top + 4 for the relative offset to rtti.
3619 llvm::Constant *Eight = llvm::ConstantInt::get(CGM.Int32Ty, 8);
3620 VTable = llvm::ConstantExpr::getBitCast(VTable, CGM.Int8PtrTy);
3621 VTable =
3622 llvm::ConstantExpr::getInBoundsGetElementPtr(CGM.Int8Ty, VTable, Eight);
3623 } else {
3624 llvm::Constant *Two = llvm::ConstantInt::get(PtrDiffTy, 2);
3625 VTable = llvm::ConstantExpr::getInBoundsGetElementPtr(CGM.Int8PtrTy, VTable,
3626 Two);
3627 }
3628 VTable = llvm::ConstantExpr::getBitCast(VTable, CGM.Int8PtrTy);
3629
3630 Fields.push_back(VTable);
3631 }
3632
3633 /// Return the linkage that the type info and type info name constants
3634 /// should have for the given type.
getTypeInfoLinkage(CodeGenModule & CGM,QualType Ty)3635 static llvm::GlobalVariable::LinkageTypes getTypeInfoLinkage(CodeGenModule &CGM,
3636 QualType Ty) {
3637 // Itanium C++ ABI 2.9.5p7:
3638 // In addition, it and all of the intermediate abi::__pointer_type_info
3639 // structs in the chain down to the abi::__class_type_info for the
3640 // incomplete class type must be prevented from resolving to the
3641 // corresponding type_info structs for the complete class type, possibly
3642 // by making them local static objects. Finally, a dummy class RTTI is
3643 // generated for the incomplete type that will not resolve to the final
3644 // complete class RTTI (because the latter need not exist), possibly by
3645 // making it a local static object.
3646 if (ContainsIncompleteClassType(Ty))
3647 return llvm::GlobalValue::InternalLinkage;
3648
3649 switch (Ty->getLinkage()) {
3650 case NoLinkage:
3651 case InternalLinkage:
3652 case UniqueExternalLinkage:
3653 return llvm::GlobalValue::InternalLinkage;
3654
3655 case VisibleNoLinkage:
3656 case ModuleInternalLinkage:
3657 case ModuleLinkage:
3658 case ExternalLinkage:
3659 // RTTI is not enabled, which means that this type info struct is going
3660 // to be used for exception handling. Give it linkonce_odr linkage.
3661 if (!CGM.getLangOpts().RTTI)
3662 return llvm::GlobalValue::LinkOnceODRLinkage;
3663
3664 if (const RecordType *Record = dyn_cast<RecordType>(Ty)) {
3665 const CXXRecordDecl *RD = cast<CXXRecordDecl>(Record->getDecl());
3666 if (RD->hasAttr<WeakAttr>())
3667 return llvm::GlobalValue::WeakODRLinkage;
3668 if (CGM.getTriple().isWindowsItaniumEnvironment())
3669 if (RD->hasAttr<DLLImportAttr>() &&
3670 ShouldUseExternalRTTIDescriptor(CGM, Ty))
3671 return llvm::GlobalValue::ExternalLinkage;
3672 // MinGW always uses LinkOnceODRLinkage for type info.
3673 if (RD->isDynamicClass() &&
3674 !CGM.getContext()
3675 .getTargetInfo()
3676 .getTriple()
3677 .isWindowsGNUEnvironment())
3678 return CGM.getVTableLinkage(RD);
3679 }
3680
3681 return llvm::GlobalValue::LinkOnceODRLinkage;
3682 }
3683
3684 llvm_unreachable("Invalid linkage!");
3685 }
3686
BuildTypeInfo(QualType Ty)3687 llvm::Constant *ItaniumRTTIBuilder::BuildTypeInfo(QualType Ty) {
3688 // We want to operate on the canonical type.
3689 Ty = Ty.getCanonicalType();
3690
3691 // Check if we've already emitted an RTTI descriptor for this type.
3692 SmallString<256> Name;
3693 llvm::raw_svector_ostream Out(Name);
3694 CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out);
3695
3696 llvm::GlobalVariable *OldGV = CGM.getModule().getNamedGlobal(Name);
3697 if (OldGV && !OldGV->isDeclaration()) {
3698 assert(!OldGV->hasAvailableExternallyLinkage() &&
3699 "available_externally typeinfos not yet implemented");
3700
3701 return llvm::ConstantExpr::getBitCast(OldGV, CGM.Int8PtrTy);
3702 }
3703
3704 // Check if there is already an external RTTI descriptor for this type.
3705 if (IsStandardLibraryRTTIDescriptor(Ty) ||
3706 ShouldUseExternalRTTIDescriptor(CGM, Ty))
3707 return GetAddrOfExternalRTTIDescriptor(Ty);
3708
3709 // Emit the standard library with external linkage.
3710 llvm::GlobalVariable::LinkageTypes Linkage = getTypeInfoLinkage(CGM, Ty);
3711
3712 // Give the type_info object and name the formal visibility of the
3713 // type itself.
3714 llvm::GlobalValue::VisibilityTypes llvmVisibility;
3715 if (llvm::GlobalValue::isLocalLinkage(Linkage))
3716 // If the linkage is local, only default visibility makes sense.
3717 llvmVisibility = llvm::GlobalValue::DefaultVisibility;
3718 else if (CXXABI.classifyRTTIUniqueness(Ty, Linkage) ==
3719 ItaniumCXXABI::RUK_NonUniqueHidden)
3720 llvmVisibility = llvm::GlobalValue::HiddenVisibility;
3721 else
3722 llvmVisibility = CodeGenModule::GetLLVMVisibility(Ty->getVisibility());
3723
3724 llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass =
3725 llvm::GlobalValue::DefaultStorageClass;
3726 if (CGM.getTriple().isWindowsItaniumEnvironment()) {
3727 auto RD = Ty->getAsCXXRecordDecl();
3728 if (RD && RD->hasAttr<DLLExportAttr>())
3729 DLLStorageClass = llvm::GlobalValue::DLLExportStorageClass;
3730 }
3731
3732 return BuildTypeInfo(Ty, Linkage, llvmVisibility, DLLStorageClass);
3733 }
3734
BuildTypeInfo(QualType Ty,llvm::GlobalVariable::LinkageTypes Linkage,llvm::GlobalValue::VisibilityTypes Visibility,llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass)3735 llvm::Constant *ItaniumRTTIBuilder::BuildTypeInfo(
3736 QualType Ty,
3737 llvm::GlobalVariable::LinkageTypes Linkage,
3738 llvm::GlobalValue::VisibilityTypes Visibility,
3739 llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass) {
3740 // Add the vtable pointer.
3741 BuildVTablePointer(cast<Type>(Ty));
3742
3743 // And the name.
3744 llvm::GlobalVariable *TypeName = GetAddrOfTypeName(Ty, Linkage);
3745 llvm::Constant *TypeNameField;
3746
3747 // If we're supposed to demote the visibility, be sure to set a flag
3748 // to use a string comparison for type_info comparisons.
3749 ItaniumCXXABI::RTTIUniquenessKind RTTIUniqueness =
3750 CXXABI.classifyRTTIUniqueness(Ty, Linkage);
3751 if (RTTIUniqueness != ItaniumCXXABI::RUK_Unique) {
3752 // The flag is the sign bit, which on ARM64 is defined to be clear
3753 // for global pointers. This is very ARM64-specific.
3754 TypeNameField = llvm::ConstantExpr::getPtrToInt(TypeName, CGM.Int64Ty);
3755 llvm::Constant *flag =
3756 llvm::ConstantInt::get(CGM.Int64Ty, ((uint64_t)1) << 63);
3757 TypeNameField = llvm::ConstantExpr::getAdd(TypeNameField, flag);
3758 TypeNameField =
3759 llvm::ConstantExpr::getIntToPtr(TypeNameField, CGM.Int8PtrTy);
3760 } else {
3761 TypeNameField = llvm::ConstantExpr::getBitCast(TypeName, CGM.Int8PtrTy);
3762 }
3763 Fields.push_back(TypeNameField);
3764
3765 switch (Ty->getTypeClass()) {
3766 #define TYPE(Class, Base)
3767 #define ABSTRACT_TYPE(Class, Base)
3768 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
3769 #define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
3770 #define DEPENDENT_TYPE(Class, Base) case Type::Class:
3771 #include "clang/AST/TypeNodes.inc"
3772 llvm_unreachable("Non-canonical and dependent types shouldn't get here");
3773
3774 // GCC treats vector types as fundamental types.
3775 case Type::Builtin:
3776 case Type::Vector:
3777 case Type::ExtVector:
3778 case Type::ConstantMatrix:
3779 case Type::Complex:
3780 case Type::BlockPointer:
3781 // Itanium C++ ABI 2.9.5p4:
3782 // abi::__fundamental_type_info adds no data members to std::type_info.
3783 break;
3784
3785 case Type::LValueReference:
3786 case Type::RValueReference:
3787 llvm_unreachable("References shouldn't get here");
3788
3789 case Type::Auto:
3790 case Type::DeducedTemplateSpecialization:
3791 llvm_unreachable("Undeduced type shouldn't get here");
3792
3793 case Type::Pipe:
3794 break;
3795
3796 case Type::ExtInt:
3797 break;
3798
3799 case Type::ConstantArray:
3800 case Type::IncompleteArray:
3801 case Type::VariableArray:
3802 // Itanium C++ ABI 2.9.5p5:
3803 // abi::__array_type_info adds no data members to std::type_info.
3804 break;
3805
3806 case Type::FunctionNoProto:
3807 case Type::FunctionProto:
3808 // Itanium C++ ABI 2.9.5p5:
3809 // abi::__function_type_info adds no data members to std::type_info.
3810 break;
3811
3812 case Type::Enum:
3813 // Itanium C++ ABI 2.9.5p5:
3814 // abi::__enum_type_info adds no data members to std::type_info.
3815 break;
3816
3817 case Type::Record: {
3818 const CXXRecordDecl *RD =
3819 cast<CXXRecordDecl>(cast<RecordType>(Ty)->getDecl());
3820 if (!RD->hasDefinition() || !RD->getNumBases()) {
3821 // We don't need to emit any fields.
3822 break;
3823 }
3824
3825 if (CanUseSingleInheritance(RD))
3826 BuildSIClassTypeInfo(RD);
3827 else
3828 BuildVMIClassTypeInfo(RD);
3829
3830 break;
3831 }
3832
3833 case Type::ObjCObject:
3834 case Type::ObjCInterface:
3835 BuildObjCObjectTypeInfo(cast<ObjCObjectType>(Ty));
3836 break;
3837
3838 case Type::ObjCObjectPointer:
3839 BuildPointerTypeInfo(cast<ObjCObjectPointerType>(Ty)->getPointeeType());
3840 break;
3841
3842 case Type::Pointer:
3843 BuildPointerTypeInfo(cast<PointerType>(Ty)->getPointeeType());
3844 break;
3845
3846 case Type::MemberPointer:
3847 BuildPointerToMemberTypeInfo(cast<MemberPointerType>(Ty));
3848 break;
3849
3850 case Type::Atomic:
3851 // No fields, at least for the moment.
3852 break;
3853 }
3854
3855 llvm::Constant *Init = llvm::ConstantStruct::getAnon(Fields);
3856
3857 SmallString<256> Name;
3858 llvm::raw_svector_ostream Out(Name);
3859 CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out);
3860 llvm::Module &M = CGM.getModule();
3861 llvm::GlobalVariable *OldGV = M.getNamedGlobal(Name);
3862 llvm::GlobalVariable *GV =
3863 new llvm::GlobalVariable(M, Init->getType(),
3864 /*isConstant=*/true, Linkage, Init, Name);
3865
3866 // Export the typeinfo in the same circumstances as the vtable is exported.
3867 auto GVDLLStorageClass = DLLStorageClass;
3868 if (CGM.getTarget().hasPS4DLLImportExport()) {
3869 if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) {
3870 const CXXRecordDecl *RD = cast<CXXRecordDecl>(RecordTy->getDecl());
3871 if (RD->hasAttr<DLLExportAttr>() ||
3872 CXXRecordAllNonInlineVirtualsHaveAttr<DLLExportAttr>(RD)) {
3873 GVDLLStorageClass = llvm::GlobalVariable::DLLExportStorageClass;
3874 }
3875 }
3876 }
3877
3878 // If there's already an old global variable, replace it with the new one.
3879 if (OldGV) {
3880 GV->takeName(OldGV);
3881 llvm::Constant *NewPtr =
3882 llvm::ConstantExpr::getBitCast(GV, OldGV->getType());
3883 OldGV->replaceAllUsesWith(NewPtr);
3884 OldGV->eraseFromParent();
3885 }
3886
3887 if (CGM.supportsCOMDAT() && GV->isWeakForLinker())
3888 GV->setComdat(M.getOrInsertComdat(GV->getName()));
3889
3890 CharUnits Align =
3891 CGM.getContext().toCharUnitsFromBits(CGM.getTarget().getPointerAlign(0));
3892 GV->setAlignment(Align.getAsAlign());
3893
3894 // The Itanium ABI specifies that type_info objects must be globally
3895 // unique, with one exception: if the type is an incomplete class
3896 // type or a (possibly indirect) pointer to one. That exception
3897 // affects the general case of comparing type_info objects produced
3898 // by the typeid operator, which is why the comparison operators on
3899 // std::type_info generally use the type_info name pointers instead
3900 // of the object addresses. However, the language's built-in uses
3901 // of RTTI generally require class types to be complete, even when
3902 // manipulating pointers to those class types. This allows the
3903 // implementation of dynamic_cast to rely on address equality tests,
3904 // which is much faster.
3905
3906 // All of this is to say that it's important that both the type_info
3907 // object and the type_info name be uniqued when weakly emitted.
3908
3909 TypeName->setVisibility(Visibility);
3910 CGM.setDSOLocal(TypeName);
3911
3912 GV->setVisibility(Visibility);
3913 CGM.setDSOLocal(GV);
3914
3915 TypeName->setDLLStorageClass(DLLStorageClass);
3916 GV->setDLLStorageClass(CGM.getTarget().hasPS4DLLImportExport()
3917 ? GVDLLStorageClass
3918 : DLLStorageClass);
3919
3920 TypeName->setPartition(CGM.getCodeGenOpts().SymbolPartition);
3921 GV->setPartition(CGM.getCodeGenOpts().SymbolPartition);
3922
3923 return llvm::ConstantExpr::getBitCast(GV, CGM.Int8PtrTy);
3924 }
3925
3926 /// BuildObjCObjectTypeInfo - Build the appropriate kind of type_info
3927 /// for the given Objective-C object type.
BuildObjCObjectTypeInfo(const ObjCObjectType * OT)3928 void ItaniumRTTIBuilder::BuildObjCObjectTypeInfo(const ObjCObjectType *OT) {
3929 // Drop qualifiers.
3930 const Type *T = OT->getBaseType().getTypePtr();
3931 assert(isa<BuiltinType>(T) || isa<ObjCInterfaceType>(T));
3932
3933 // The builtin types are abi::__class_type_infos and don't require
3934 // extra fields.
3935 if (isa<BuiltinType>(T)) return;
3936
3937 ObjCInterfaceDecl *Class = cast<ObjCInterfaceType>(T)->getDecl();
3938 ObjCInterfaceDecl *Super = Class->getSuperClass();
3939
3940 // Root classes are also __class_type_info.
3941 if (!Super) return;
3942
3943 QualType SuperTy = CGM.getContext().getObjCInterfaceType(Super);
3944
3945 // Everything else is single inheritance.
3946 llvm::Constant *BaseTypeInfo =
3947 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(SuperTy);
3948 Fields.push_back(BaseTypeInfo);
3949 }
3950
3951 /// BuildSIClassTypeInfo - Build an abi::__si_class_type_info, used for single
3952 /// inheritance, according to the Itanium C++ ABI, 2.95p6b.
BuildSIClassTypeInfo(const CXXRecordDecl * RD)3953 void ItaniumRTTIBuilder::BuildSIClassTypeInfo(const CXXRecordDecl *RD) {
3954 // Itanium C++ ABI 2.9.5p6b:
3955 // It adds to abi::__class_type_info a single member pointing to the
3956 // type_info structure for the base type,
3957 llvm::Constant *BaseTypeInfo =
3958 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(RD->bases_begin()->getType());
3959 Fields.push_back(BaseTypeInfo);
3960 }
3961
3962 namespace {
3963 /// SeenBases - Contains virtual and non-virtual bases seen when traversing
3964 /// a class hierarchy.
3965 struct SeenBases {
3966 llvm::SmallPtrSet<const CXXRecordDecl *, 16> NonVirtualBases;
3967 llvm::SmallPtrSet<const CXXRecordDecl *, 16> VirtualBases;
3968 };
3969 }
3970
3971 /// ComputeVMIClassTypeInfoFlags - Compute the value of the flags member in
3972 /// abi::__vmi_class_type_info.
3973 ///
ComputeVMIClassTypeInfoFlags(const CXXBaseSpecifier * Base,SeenBases & Bases)3974 static unsigned ComputeVMIClassTypeInfoFlags(const CXXBaseSpecifier *Base,
3975 SeenBases &Bases) {
3976
3977 unsigned Flags = 0;
3978
3979 auto *BaseDecl =
3980 cast<CXXRecordDecl>(Base->getType()->castAs<RecordType>()->getDecl());
3981
3982 if (Base->isVirtual()) {
3983 // Mark the virtual base as seen.
3984 if (!Bases.VirtualBases.insert(BaseDecl).second) {
3985 // If this virtual base has been seen before, then the class is diamond
3986 // shaped.
3987 Flags |= ItaniumRTTIBuilder::VMI_DiamondShaped;
3988 } else {
3989 if (Bases.NonVirtualBases.count(BaseDecl))
3990 Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat;
3991 }
3992 } else {
3993 // Mark the non-virtual base as seen.
3994 if (!Bases.NonVirtualBases.insert(BaseDecl).second) {
3995 // If this non-virtual base has been seen before, then the class has non-
3996 // diamond shaped repeated inheritance.
3997 Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat;
3998 } else {
3999 if (Bases.VirtualBases.count(BaseDecl))
4000 Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat;
4001 }
4002 }
4003
4004 // Walk all bases.
4005 for (const auto &I : BaseDecl->bases())
4006 Flags |= ComputeVMIClassTypeInfoFlags(&I, Bases);
4007
4008 return Flags;
4009 }
4010
ComputeVMIClassTypeInfoFlags(const CXXRecordDecl * RD)4011 static unsigned ComputeVMIClassTypeInfoFlags(const CXXRecordDecl *RD) {
4012 unsigned Flags = 0;
4013 SeenBases Bases;
4014
4015 // Walk all bases.
4016 for (const auto &I : RD->bases())
4017 Flags |= ComputeVMIClassTypeInfoFlags(&I, Bases);
4018
4019 return Flags;
4020 }
4021
4022 /// BuildVMIClassTypeInfo - Build an abi::__vmi_class_type_info, used for
4023 /// classes with bases that do not satisfy the abi::__si_class_type_info
4024 /// constraints, according ti the Itanium C++ ABI, 2.9.5p5c.
BuildVMIClassTypeInfo(const CXXRecordDecl * RD)4025 void ItaniumRTTIBuilder::BuildVMIClassTypeInfo(const CXXRecordDecl *RD) {
4026 llvm::Type *UnsignedIntLTy =
4027 CGM.getTypes().ConvertType(CGM.getContext().UnsignedIntTy);
4028
4029 // Itanium C++ ABI 2.9.5p6c:
4030 // __flags is a word with flags describing details about the class
4031 // structure, which may be referenced by using the __flags_masks
4032 // enumeration. These flags refer to both direct and indirect bases.
4033 unsigned Flags = ComputeVMIClassTypeInfoFlags(RD);
4034 Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags));
4035
4036 // Itanium C++ ABI 2.9.5p6c:
4037 // __base_count is a word with the number of direct proper base class
4038 // descriptions that follow.
4039 Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, RD->getNumBases()));
4040
4041 if (!RD->getNumBases())
4042 return;
4043
4044 // Now add the base class descriptions.
4045
4046 // Itanium C++ ABI 2.9.5p6c:
4047 // __base_info[] is an array of base class descriptions -- one for every
4048 // direct proper base. Each description is of the type:
4049 //
4050 // struct abi::__base_class_type_info {
4051 // public:
4052 // const __class_type_info *__base_type;
4053 // long __offset_flags;
4054 //
4055 // enum __offset_flags_masks {
4056 // __virtual_mask = 0x1,
4057 // __public_mask = 0x2,
4058 // __offset_shift = 8
4059 // };
4060 // };
4061
4062 // If we're in mingw and 'long' isn't wide enough for a pointer, use 'long
4063 // long' instead of 'long' for __offset_flags. libstdc++abi uses long long on
4064 // LLP64 platforms.
4065 // FIXME: Consider updating libc++abi to match, and extend this logic to all
4066 // LLP64 platforms.
4067 QualType OffsetFlagsTy = CGM.getContext().LongTy;
4068 const TargetInfo &TI = CGM.getContext().getTargetInfo();
4069 if (TI.getTriple().isOSCygMing() && TI.getPointerWidth(0) > TI.getLongWidth())
4070 OffsetFlagsTy = CGM.getContext().LongLongTy;
4071 llvm::Type *OffsetFlagsLTy =
4072 CGM.getTypes().ConvertType(OffsetFlagsTy);
4073
4074 for (const auto &Base : RD->bases()) {
4075 // The __base_type member points to the RTTI for the base type.
4076 Fields.push_back(ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(Base.getType()));
4077
4078 auto *BaseDecl =
4079 cast<CXXRecordDecl>(Base.getType()->castAs<RecordType>()->getDecl());
4080
4081 int64_t OffsetFlags = 0;
4082
4083 // All but the lower 8 bits of __offset_flags are a signed offset.
4084 // For a non-virtual base, this is the offset in the object of the base
4085 // subobject. For a virtual base, this is the offset in the virtual table of
4086 // the virtual base offset for the virtual base referenced (negative).
4087 CharUnits Offset;
4088 if (Base.isVirtual())
4089 Offset =
4090 CGM.getItaniumVTableContext().getVirtualBaseOffsetOffset(RD, BaseDecl);
4091 else {
4092 const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
4093 Offset = Layout.getBaseClassOffset(BaseDecl);
4094 };
4095
4096 OffsetFlags = uint64_t(Offset.getQuantity()) << 8;
4097
4098 // The low-order byte of __offset_flags contains flags, as given by the
4099 // masks from the enumeration __offset_flags_masks.
4100 if (Base.isVirtual())
4101 OffsetFlags |= BCTI_Virtual;
4102 if (Base.getAccessSpecifier() == AS_public)
4103 OffsetFlags |= BCTI_Public;
4104
4105 Fields.push_back(llvm::ConstantInt::get(OffsetFlagsLTy, OffsetFlags));
4106 }
4107 }
4108
4109 /// Compute the flags for a __pbase_type_info, and remove the corresponding
4110 /// pieces from \p Type.
extractPBaseFlags(ASTContext & Ctx,QualType & Type)4111 static unsigned extractPBaseFlags(ASTContext &Ctx, QualType &Type) {
4112 unsigned Flags = 0;
4113
4114 if (Type.isConstQualified())
4115 Flags |= ItaniumRTTIBuilder::PTI_Const;
4116 if (Type.isVolatileQualified())
4117 Flags |= ItaniumRTTIBuilder::PTI_Volatile;
4118 if (Type.isRestrictQualified())
4119 Flags |= ItaniumRTTIBuilder::PTI_Restrict;
4120 Type = Type.getUnqualifiedType();
4121
4122 // Itanium C++ ABI 2.9.5p7:
4123 // When the abi::__pbase_type_info is for a direct or indirect pointer to an
4124 // incomplete class type, the incomplete target type flag is set.
4125 if (ContainsIncompleteClassType(Type))
4126 Flags |= ItaniumRTTIBuilder::PTI_Incomplete;
4127
4128 if (auto *Proto = Type->getAs<FunctionProtoType>()) {
4129 if (Proto->isNothrow()) {
4130 Flags |= ItaniumRTTIBuilder::PTI_Noexcept;
4131 Type = Ctx.getFunctionTypeWithExceptionSpec(Type, EST_None);
4132 }
4133 }
4134
4135 return Flags;
4136 }
4137
4138 /// BuildPointerTypeInfo - Build an abi::__pointer_type_info struct,
4139 /// used for pointer types.
BuildPointerTypeInfo(QualType PointeeTy)4140 void ItaniumRTTIBuilder::BuildPointerTypeInfo(QualType PointeeTy) {
4141 // Itanium C++ ABI 2.9.5p7:
4142 // __flags is a flag word describing the cv-qualification and other
4143 // attributes of the type pointed to
4144 unsigned Flags = extractPBaseFlags(CGM.getContext(), PointeeTy);
4145
4146 llvm::Type *UnsignedIntLTy =
4147 CGM.getTypes().ConvertType(CGM.getContext().UnsignedIntTy);
4148 Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags));
4149
4150 // Itanium C++ ABI 2.9.5p7:
4151 // __pointee is a pointer to the std::type_info derivation for the
4152 // unqualified type being pointed to.
4153 llvm::Constant *PointeeTypeInfo =
4154 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(PointeeTy);
4155 Fields.push_back(PointeeTypeInfo);
4156 }
4157
4158 /// BuildPointerToMemberTypeInfo - Build an abi::__pointer_to_member_type_info
4159 /// struct, used for member pointer types.
4160 void
BuildPointerToMemberTypeInfo(const MemberPointerType * Ty)4161 ItaniumRTTIBuilder::BuildPointerToMemberTypeInfo(const MemberPointerType *Ty) {
4162 QualType PointeeTy = Ty->getPointeeType();
4163
4164 // Itanium C++ ABI 2.9.5p7:
4165 // __flags is a flag word describing the cv-qualification and other
4166 // attributes of the type pointed to.
4167 unsigned Flags = extractPBaseFlags(CGM.getContext(), PointeeTy);
4168
4169 const RecordType *ClassType = cast<RecordType>(Ty->getClass());
4170 if (IsIncompleteClassType(ClassType))
4171 Flags |= PTI_ContainingClassIncomplete;
4172
4173 llvm::Type *UnsignedIntLTy =
4174 CGM.getTypes().ConvertType(CGM.getContext().UnsignedIntTy);
4175 Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags));
4176
4177 // Itanium C++ ABI 2.9.5p7:
4178 // __pointee is a pointer to the std::type_info derivation for the
4179 // unqualified type being pointed to.
4180 llvm::Constant *PointeeTypeInfo =
4181 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(PointeeTy);
4182 Fields.push_back(PointeeTypeInfo);
4183
4184 // Itanium C++ ABI 2.9.5p9:
4185 // __context is a pointer to an abi::__class_type_info corresponding to the
4186 // class type containing the member pointed to
4187 // (e.g., the "A" in "int A::*").
4188 Fields.push_back(
4189 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(QualType(ClassType, 0)));
4190 }
4191
getAddrOfRTTIDescriptor(QualType Ty)4192 llvm::Constant *ItaniumCXXABI::getAddrOfRTTIDescriptor(QualType Ty) {
4193 return ItaniumRTTIBuilder(*this).BuildTypeInfo(Ty);
4194 }
4195
EmitFundamentalRTTIDescriptors(const CXXRecordDecl * RD)4196 void ItaniumCXXABI::EmitFundamentalRTTIDescriptors(const CXXRecordDecl *RD) {
4197 // Types added here must also be added to TypeInfoIsInStandardLibrary.
4198 QualType FundamentalTypes[] = {
4199 getContext().VoidTy, getContext().NullPtrTy,
4200 getContext().BoolTy, getContext().WCharTy,
4201 getContext().CharTy, getContext().UnsignedCharTy,
4202 getContext().SignedCharTy, getContext().ShortTy,
4203 getContext().UnsignedShortTy, getContext().IntTy,
4204 getContext().UnsignedIntTy, getContext().LongTy,
4205 getContext().UnsignedLongTy, getContext().LongLongTy,
4206 getContext().UnsignedLongLongTy, getContext().Int128Ty,
4207 getContext().UnsignedInt128Ty, getContext().HalfTy,
4208 getContext().FloatTy, getContext().DoubleTy,
4209 getContext().LongDoubleTy, getContext().Float128Ty,
4210 getContext().Char8Ty, getContext().Char16Ty,
4211 getContext().Char32Ty
4212 };
4213 llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass =
4214 RD->hasAttr<DLLExportAttr>()
4215 ? llvm::GlobalValue::DLLExportStorageClass
4216 : llvm::GlobalValue::DefaultStorageClass;
4217 llvm::GlobalValue::VisibilityTypes Visibility =
4218 CodeGenModule::GetLLVMVisibility(RD->getVisibility());
4219 for (const QualType &FundamentalType : FundamentalTypes) {
4220 QualType PointerType = getContext().getPointerType(FundamentalType);
4221 QualType PointerTypeConst = getContext().getPointerType(
4222 FundamentalType.withConst());
4223 for (QualType Type : {FundamentalType, PointerType, PointerTypeConst})
4224 ItaniumRTTIBuilder(*this).BuildTypeInfo(
4225 Type, llvm::GlobalValue::ExternalLinkage,
4226 Visibility, DLLStorageClass);
4227 }
4228 }
4229
4230 /// What sort of uniqueness rules should we use for the RTTI for the
4231 /// given type?
classifyRTTIUniqueness(QualType CanTy,llvm::GlobalValue::LinkageTypes Linkage) const4232 ItaniumCXXABI::RTTIUniquenessKind ItaniumCXXABI::classifyRTTIUniqueness(
4233 QualType CanTy, llvm::GlobalValue::LinkageTypes Linkage) const {
4234 if (shouldRTTIBeUnique())
4235 return RUK_Unique;
4236
4237 // It's only necessary for linkonce_odr or weak_odr linkage.
4238 if (Linkage != llvm::GlobalValue::LinkOnceODRLinkage &&
4239 Linkage != llvm::GlobalValue::WeakODRLinkage)
4240 return RUK_Unique;
4241
4242 // It's only necessary with default visibility.
4243 if (CanTy->getVisibility() != DefaultVisibility)
4244 return RUK_Unique;
4245
4246 // If we're not required to publish this symbol, hide it.
4247 if (Linkage == llvm::GlobalValue::LinkOnceODRLinkage)
4248 return RUK_NonUniqueHidden;
4249
4250 // If we're required to publish this symbol, as we might be under an
4251 // explicit instantiation, leave it with default visibility but
4252 // enable string-comparisons.
4253 assert(Linkage == llvm::GlobalValue::WeakODRLinkage);
4254 return RUK_NonUniqueVisible;
4255 }
4256
4257 // Find out how to codegen the complete destructor and constructor
4258 namespace {
4259 enum class StructorCodegen { Emit, RAUW, Alias, COMDAT };
4260 }
getCodegenToUse(CodeGenModule & CGM,const CXXMethodDecl * MD)4261 static StructorCodegen getCodegenToUse(CodeGenModule &CGM,
4262 const CXXMethodDecl *MD) {
4263 if (!CGM.getCodeGenOpts().CXXCtorDtorAliases)
4264 return StructorCodegen::Emit;
4265
4266 // The complete and base structors are not equivalent if there are any virtual
4267 // bases, so emit separate functions.
4268 if (MD->getParent()->getNumVBases())
4269 return StructorCodegen::Emit;
4270
4271 GlobalDecl AliasDecl;
4272 if (const auto *DD = dyn_cast<CXXDestructorDecl>(MD)) {
4273 AliasDecl = GlobalDecl(DD, Dtor_Complete);
4274 } else {
4275 const auto *CD = cast<CXXConstructorDecl>(MD);
4276 AliasDecl = GlobalDecl(CD, Ctor_Complete);
4277 }
4278 llvm::GlobalValue::LinkageTypes Linkage = CGM.getFunctionLinkage(AliasDecl);
4279
4280 if (llvm::GlobalValue::isDiscardableIfUnused(Linkage))
4281 return StructorCodegen::RAUW;
4282
4283 // FIXME: Should we allow available_externally aliases?
4284 if (!llvm::GlobalAlias::isValidLinkage(Linkage))
4285 return StructorCodegen::RAUW;
4286
4287 if (llvm::GlobalValue::isWeakForLinker(Linkage)) {
4288 // Only ELF and wasm support COMDATs with arbitrary names (C5/D5).
4289 if (CGM.getTarget().getTriple().isOSBinFormatELF() ||
4290 CGM.getTarget().getTriple().isOSBinFormatWasm())
4291 return StructorCodegen::COMDAT;
4292 return StructorCodegen::Emit;
4293 }
4294
4295 return StructorCodegen::Alias;
4296 }
4297
emitConstructorDestructorAlias(CodeGenModule & CGM,GlobalDecl AliasDecl,GlobalDecl TargetDecl)4298 static void emitConstructorDestructorAlias(CodeGenModule &CGM,
4299 GlobalDecl AliasDecl,
4300 GlobalDecl TargetDecl) {
4301 llvm::GlobalValue::LinkageTypes Linkage = CGM.getFunctionLinkage(AliasDecl);
4302
4303 StringRef MangledName = CGM.getMangledName(AliasDecl);
4304 llvm::GlobalValue *Entry = CGM.GetGlobalValue(MangledName);
4305 if (Entry && !Entry->isDeclaration())
4306 return;
4307
4308 auto *Aliasee = cast<llvm::GlobalValue>(CGM.GetAddrOfGlobal(TargetDecl));
4309
4310 // Create the alias with no name.
4311 auto *Alias = llvm::GlobalAlias::create(Linkage, "", Aliasee);
4312
4313 // Constructors and destructors are always unnamed_addr.
4314 Alias->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
4315
4316 // Switch any previous uses to the alias.
4317 if (Entry) {
4318 assert(Entry->getType() == Aliasee->getType() &&
4319 "declaration exists with different type");
4320 Alias->takeName(Entry);
4321 Entry->replaceAllUsesWith(Alias);
4322 Entry->eraseFromParent();
4323 } else {
4324 Alias->setName(MangledName);
4325 }
4326
4327 // Finally, set up the alias with its proper name and attributes.
4328 CGM.SetCommonAttributes(AliasDecl, Alias);
4329 }
4330
emitCXXStructor(GlobalDecl GD)4331 void ItaniumCXXABI::emitCXXStructor(GlobalDecl GD) {
4332 auto *MD = cast<CXXMethodDecl>(GD.getDecl());
4333 auto *CD = dyn_cast<CXXConstructorDecl>(MD);
4334 const CXXDestructorDecl *DD = CD ? nullptr : cast<CXXDestructorDecl>(MD);
4335
4336 StructorCodegen CGType = getCodegenToUse(CGM, MD);
4337
4338 if (CD ? GD.getCtorType() == Ctor_Complete
4339 : GD.getDtorType() == Dtor_Complete) {
4340 GlobalDecl BaseDecl;
4341 if (CD)
4342 BaseDecl = GD.getWithCtorType(Ctor_Base);
4343 else
4344 BaseDecl = GD.getWithDtorType(Dtor_Base);
4345
4346 if (CGType == StructorCodegen::Alias || CGType == StructorCodegen::COMDAT) {
4347 emitConstructorDestructorAlias(CGM, GD, BaseDecl);
4348 return;
4349 }
4350
4351 if (CGType == StructorCodegen::RAUW) {
4352 StringRef MangledName = CGM.getMangledName(GD);
4353 auto *Aliasee = CGM.GetAddrOfGlobal(BaseDecl);
4354 CGM.addReplacement(MangledName, Aliasee);
4355 return;
4356 }
4357 }
4358
4359 // The base destructor is equivalent to the base destructor of its
4360 // base class if there is exactly one non-virtual base class with a
4361 // non-trivial destructor, there are no fields with a non-trivial
4362 // destructor, and the body of the destructor is trivial.
4363 if (DD && GD.getDtorType() == Dtor_Base &&
4364 CGType != StructorCodegen::COMDAT &&
4365 !CGM.TryEmitBaseDestructorAsAlias(DD))
4366 return;
4367
4368 // FIXME: The deleting destructor is equivalent to the selected operator
4369 // delete if:
4370 // * either the delete is a destroying operator delete or the destructor
4371 // would be trivial if it weren't virtual,
4372 // * the conversion from the 'this' parameter to the first parameter of the
4373 // destructor is equivalent to a bitcast,
4374 // * the destructor does not have an implicit "this" return, and
4375 // * the operator delete has the same calling convention and IR function type
4376 // as the destructor.
4377 // In such cases we should try to emit the deleting dtor as an alias to the
4378 // selected 'operator delete'.
4379
4380 llvm::Function *Fn = CGM.codegenCXXStructor(GD);
4381
4382 if (CGType == StructorCodegen::COMDAT) {
4383 SmallString<256> Buffer;
4384 llvm::raw_svector_ostream Out(Buffer);
4385 if (DD)
4386 getMangleContext().mangleCXXDtorComdat(DD, Out);
4387 else
4388 getMangleContext().mangleCXXCtorComdat(CD, Out);
4389 llvm::Comdat *C = CGM.getModule().getOrInsertComdat(Out.str());
4390 Fn->setComdat(C);
4391 } else {
4392 CGM.maybeSetTrivialComdat(*MD, *Fn);
4393 }
4394 }
4395
getBeginCatchFn(CodeGenModule & CGM)4396 static llvm::FunctionCallee getBeginCatchFn(CodeGenModule &CGM) {
4397 // void *__cxa_begin_catch(void*);
4398 llvm::FunctionType *FTy = llvm::FunctionType::get(
4399 CGM.Int8PtrTy, CGM.Int8PtrTy, /*isVarArg=*/false);
4400
4401 return CGM.CreateRuntimeFunction(FTy, "__cxa_begin_catch");
4402 }
4403
getEndCatchFn(CodeGenModule & CGM)4404 static llvm::FunctionCallee getEndCatchFn(CodeGenModule &CGM) {
4405 // void __cxa_end_catch();
4406 llvm::FunctionType *FTy =
4407 llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
4408
4409 return CGM.CreateRuntimeFunction(FTy, "__cxa_end_catch");
4410 }
4411
getGetExceptionPtrFn(CodeGenModule & CGM)4412 static llvm::FunctionCallee getGetExceptionPtrFn(CodeGenModule &CGM) {
4413 // void *__cxa_get_exception_ptr(void*);
4414 llvm::FunctionType *FTy = llvm::FunctionType::get(
4415 CGM.Int8PtrTy, CGM.Int8PtrTy, /*isVarArg=*/false);
4416
4417 return CGM.CreateRuntimeFunction(FTy, "__cxa_get_exception_ptr");
4418 }
4419
4420 namespace {
4421 /// A cleanup to call __cxa_end_catch. In many cases, the caught
4422 /// exception type lets us state definitively that the thrown exception
4423 /// type does not have a destructor. In particular:
4424 /// - Catch-alls tell us nothing, so we have to conservatively
4425 /// assume that the thrown exception might have a destructor.
4426 /// - Catches by reference behave according to their base types.
4427 /// - Catches of non-record types will only trigger for exceptions
4428 /// of non-record types, which never have destructors.
4429 /// - Catches of record types can trigger for arbitrary subclasses
4430 /// of the caught type, so we have to assume the actual thrown
4431 /// exception type might have a throwing destructor, even if the
4432 /// caught type's destructor is trivial or nothrow.
4433 struct CallEndCatch final : EHScopeStack::Cleanup {
CallEndCatch__anon2589cd5a0911::CallEndCatch4434 CallEndCatch(bool MightThrow) : MightThrow(MightThrow) {}
4435 bool MightThrow;
4436
Emit__anon2589cd5a0911::CallEndCatch4437 void Emit(CodeGenFunction &CGF, Flags flags) override {
4438 if (!MightThrow) {
4439 CGF.EmitNounwindRuntimeCall(getEndCatchFn(CGF.CGM));
4440 return;
4441 }
4442
4443 CGF.EmitRuntimeCallOrInvoke(getEndCatchFn(CGF.CGM));
4444 }
4445 };
4446 }
4447
4448 /// Emits a call to __cxa_begin_catch and enters a cleanup to call
4449 /// __cxa_end_catch.
4450 ///
4451 /// \param EndMightThrow - true if __cxa_end_catch might throw
CallBeginCatch(CodeGenFunction & CGF,llvm::Value * Exn,bool EndMightThrow)4452 static llvm::Value *CallBeginCatch(CodeGenFunction &CGF,
4453 llvm::Value *Exn,
4454 bool EndMightThrow) {
4455 llvm::CallInst *call =
4456 CGF.EmitNounwindRuntimeCall(getBeginCatchFn(CGF.CGM), Exn);
4457
4458 CGF.EHStack.pushCleanup<CallEndCatch>(NormalAndEHCleanup, EndMightThrow);
4459
4460 return call;
4461 }
4462
4463 /// A "special initializer" callback for initializing a catch
4464 /// parameter during catch initialization.
InitCatchParam(CodeGenFunction & CGF,const VarDecl & CatchParam,Address ParamAddr,SourceLocation Loc)4465 static void InitCatchParam(CodeGenFunction &CGF,
4466 const VarDecl &CatchParam,
4467 Address ParamAddr,
4468 SourceLocation Loc) {
4469 // Load the exception from where the landing pad saved it.
4470 llvm::Value *Exn = CGF.getExceptionFromSlot();
4471
4472 CanQualType CatchType =
4473 CGF.CGM.getContext().getCanonicalType(CatchParam.getType());
4474 llvm::Type *LLVMCatchTy = CGF.ConvertTypeForMem(CatchType);
4475
4476 // If we're catching by reference, we can just cast the object
4477 // pointer to the appropriate pointer.
4478 if (isa<ReferenceType>(CatchType)) {
4479 QualType CaughtType = cast<ReferenceType>(CatchType)->getPointeeType();
4480 bool EndCatchMightThrow = CaughtType->isRecordType();
4481
4482 // __cxa_begin_catch returns the adjusted object pointer.
4483 llvm::Value *AdjustedExn = CallBeginCatch(CGF, Exn, EndCatchMightThrow);
4484
4485 // We have no way to tell the personality function that we're
4486 // catching by reference, so if we're catching a pointer,
4487 // __cxa_begin_catch will actually return that pointer by value.
4488 if (const PointerType *PT = dyn_cast<PointerType>(CaughtType)) {
4489 QualType PointeeType = PT->getPointeeType();
4490
4491 // When catching by reference, generally we should just ignore
4492 // this by-value pointer and use the exception object instead.
4493 if (!PointeeType->isRecordType()) {
4494
4495 // Exn points to the struct _Unwind_Exception header, which
4496 // we have to skip past in order to reach the exception data.
4497 unsigned HeaderSize =
4498 CGF.CGM.getTargetCodeGenInfo().getSizeOfUnwindException();
4499 AdjustedExn =
4500 CGF.Builder.CreateConstGEP1_32(CGF.Int8Ty, Exn, HeaderSize);
4501
4502 // However, if we're catching a pointer-to-record type that won't
4503 // work, because the personality function might have adjusted
4504 // the pointer. There's actually no way for us to fully satisfy
4505 // the language/ABI contract here: we can't use Exn because it
4506 // might have the wrong adjustment, but we can't use the by-value
4507 // pointer because it's off by a level of abstraction.
4508 //
4509 // The current solution is to dump the adjusted pointer into an
4510 // alloca, which breaks language semantics (because changing the
4511 // pointer doesn't change the exception) but at least works.
4512 // The better solution would be to filter out non-exact matches
4513 // and rethrow them, but this is tricky because the rethrow
4514 // really needs to be catchable by other sites at this landing
4515 // pad. The best solution is to fix the personality function.
4516 } else {
4517 // Pull the pointer for the reference type off.
4518 llvm::Type *PtrTy =
4519 cast<llvm::PointerType>(LLVMCatchTy)->getElementType();
4520
4521 // Create the temporary and write the adjusted pointer into it.
4522 Address ExnPtrTmp =
4523 CGF.CreateTempAlloca(PtrTy, CGF.getPointerAlign(), "exn.byref.tmp");
4524 llvm::Value *Casted = CGF.Builder.CreateBitCast(AdjustedExn, PtrTy);
4525 CGF.Builder.CreateStore(Casted, ExnPtrTmp);
4526
4527 // Bind the reference to the temporary.
4528 AdjustedExn = ExnPtrTmp.getPointer();
4529 }
4530 }
4531
4532 llvm::Value *ExnCast =
4533 CGF.Builder.CreateBitCast(AdjustedExn, LLVMCatchTy, "exn.byref");
4534 CGF.Builder.CreateStore(ExnCast, ParamAddr);
4535 return;
4536 }
4537
4538 // Scalars and complexes.
4539 TypeEvaluationKind TEK = CGF.getEvaluationKind(CatchType);
4540 if (TEK != TEK_Aggregate) {
4541 llvm::Value *AdjustedExn = CallBeginCatch(CGF, Exn, false);
4542
4543 // If the catch type is a pointer type, __cxa_begin_catch returns
4544 // the pointer by value.
4545 if (CatchType->hasPointerRepresentation()) {
4546 llvm::Value *CastExn =
4547 CGF.Builder.CreateBitCast(AdjustedExn, LLVMCatchTy, "exn.casted");
4548
4549 switch (CatchType.getQualifiers().getObjCLifetime()) {
4550 case Qualifiers::OCL_Strong:
4551 CastExn = CGF.EmitARCRetainNonBlock(CastExn);
4552 LLVM_FALLTHROUGH;
4553
4554 case Qualifiers::OCL_None:
4555 case Qualifiers::OCL_ExplicitNone:
4556 case Qualifiers::OCL_Autoreleasing:
4557 CGF.Builder.CreateStore(CastExn, ParamAddr);
4558 return;
4559
4560 case Qualifiers::OCL_Weak:
4561 CGF.EmitARCInitWeak(ParamAddr, CastExn);
4562 return;
4563 }
4564 llvm_unreachable("bad ownership qualifier!");
4565 }
4566
4567 // Otherwise, it returns a pointer into the exception object.
4568
4569 llvm::Type *PtrTy = LLVMCatchTy->getPointerTo(0); // addrspace 0 ok
4570 llvm::Value *Cast = CGF.Builder.CreateBitCast(AdjustedExn, PtrTy);
4571
4572 LValue srcLV = CGF.MakeNaturalAlignAddrLValue(Cast, CatchType);
4573 LValue destLV = CGF.MakeAddrLValue(ParamAddr, CatchType);
4574 switch (TEK) {
4575 case TEK_Complex:
4576 CGF.EmitStoreOfComplex(CGF.EmitLoadOfComplex(srcLV, Loc), destLV,
4577 /*init*/ true);
4578 return;
4579 case TEK_Scalar: {
4580 llvm::Value *ExnLoad = CGF.EmitLoadOfScalar(srcLV, Loc);
4581 CGF.EmitStoreOfScalar(ExnLoad, destLV, /*init*/ true);
4582 return;
4583 }
4584 case TEK_Aggregate:
4585 llvm_unreachable("evaluation kind filtered out!");
4586 }
4587 llvm_unreachable("bad evaluation kind");
4588 }
4589
4590 assert(isa<RecordType>(CatchType) && "unexpected catch type!");
4591 auto catchRD = CatchType->getAsCXXRecordDecl();
4592 CharUnits caughtExnAlignment = CGF.CGM.getClassPointerAlignment(catchRD);
4593
4594 llvm::Type *PtrTy = LLVMCatchTy->getPointerTo(0); // addrspace 0 ok
4595
4596 // Check for a copy expression. If we don't have a copy expression,
4597 // that means a trivial copy is okay.
4598 const Expr *copyExpr = CatchParam.getInit();
4599 if (!copyExpr) {
4600 llvm::Value *rawAdjustedExn = CallBeginCatch(CGF, Exn, true);
4601 Address adjustedExn(CGF.Builder.CreateBitCast(rawAdjustedExn, PtrTy),
4602 caughtExnAlignment);
4603 LValue Dest = CGF.MakeAddrLValue(ParamAddr, CatchType);
4604 LValue Src = CGF.MakeAddrLValue(adjustedExn, CatchType);
4605 CGF.EmitAggregateCopy(Dest, Src, CatchType, AggValueSlot::DoesNotOverlap);
4606 return;
4607 }
4608
4609 // We have to call __cxa_get_exception_ptr to get the adjusted
4610 // pointer before copying.
4611 llvm::CallInst *rawAdjustedExn =
4612 CGF.EmitNounwindRuntimeCall(getGetExceptionPtrFn(CGF.CGM), Exn);
4613
4614 // Cast that to the appropriate type.
4615 Address adjustedExn(CGF.Builder.CreateBitCast(rawAdjustedExn, PtrTy),
4616 caughtExnAlignment);
4617
4618 // The copy expression is defined in terms of an OpaqueValueExpr.
4619 // Find it and map it to the adjusted expression.
4620 CodeGenFunction::OpaqueValueMapping
4621 opaque(CGF, OpaqueValueExpr::findInCopyConstruct(copyExpr),
4622 CGF.MakeAddrLValue(adjustedExn, CatchParam.getType()));
4623
4624 // Call the copy ctor in a terminate scope.
4625 CGF.EHStack.pushTerminate();
4626
4627 // Perform the copy construction.
4628 CGF.EmitAggExpr(copyExpr,
4629 AggValueSlot::forAddr(ParamAddr, Qualifiers(),
4630 AggValueSlot::IsNotDestructed,
4631 AggValueSlot::DoesNotNeedGCBarriers,
4632 AggValueSlot::IsNotAliased,
4633 AggValueSlot::DoesNotOverlap));
4634
4635 // Leave the terminate scope.
4636 CGF.EHStack.popTerminate();
4637
4638 // Undo the opaque value mapping.
4639 opaque.pop();
4640
4641 // Finally we can call __cxa_begin_catch.
4642 CallBeginCatch(CGF, Exn, true);
4643 }
4644
4645 /// Begins a catch statement by initializing the catch variable and
4646 /// calling __cxa_begin_catch.
emitBeginCatch(CodeGenFunction & CGF,const CXXCatchStmt * S)4647 void ItaniumCXXABI::emitBeginCatch(CodeGenFunction &CGF,
4648 const CXXCatchStmt *S) {
4649 // We have to be very careful with the ordering of cleanups here:
4650 // C++ [except.throw]p4:
4651 // The destruction [of the exception temporary] occurs
4652 // immediately after the destruction of the object declared in
4653 // the exception-declaration in the handler.
4654 //
4655 // So the precise ordering is:
4656 // 1. Construct catch variable.
4657 // 2. __cxa_begin_catch
4658 // 3. Enter __cxa_end_catch cleanup
4659 // 4. Enter dtor cleanup
4660 //
4661 // We do this by using a slightly abnormal initialization process.
4662 // Delegation sequence:
4663 // - ExitCXXTryStmt opens a RunCleanupsScope
4664 // - EmitAutoVarAlloca creates the variable and debug info
4665 // - InitCatchParam initializes the variable from the exception
4666 // - CallBeginCatch calls __cxa_begin_catch
4667 // - CallBeginCatch enters the __cxa_end_catch cleanup
4668 // - EmitAutoVarCleanups enters the variable destructor cleanup
4669 // - EmitCXXTryStmt emits the code for the catch body
4670 // - EmitCXXTryStmt close the RunCleanupsScope
4671
4672 VarDecl *CatchParam = S->getExceptionDecl();
4673 if (!CatchParam) {
4674 llvm::Value *Exn = CGF.getExceptionFromSlot();
4675 CallBeginCatch(CGF, Exn, true);
4676 return;
4677 }
4678
4679 // Emit the local.
4680 CodeGenFunction::AutoVarEmission var = CGF.EmitAutoVarAlloca(*CatchParam);
4681 InitCatchParam(CGF, *CatchParam, var.getObjectAddress(CGF), S->getBeginLoc());
4682 CGF.EmitAutoVarCleanups(var);
4683 }
4684
4685 /// Get or define the following function:
4686 /// void @__clang_call_terminate(i8* %exn) nounwind noreturn
4687 /// This code is used only in C++.
getClangCallTerminateFn(CodeGenModule & CGM)4688 static llvm::FunctionCallee getClangCallTerminateFn(CodeGenModule &CGM) {
4689 llvm::FunctionType *fnTy =
4690 llvm::FunctionType::get(CGM.VoidTy, CGM.Int8PtrTy, /*isVarArg=*/false);
4691 llvm::FunctionCallee fnRef = CGM.CreateRuntimeFunction(
4692 fnTy, "__clang_call_terminate", llvm::AttributeList(), /*Local=*/true);
4693 llvm::Function *fn =
4694 cast<llvm::Function>(fnRef.getCallee()->stripPointerCasts());
4695 if (fn->empty()) {
4696 fn->setDoesNotThrow();
4697 fn->setDoesNotReturn();
4698
4699 // What we really want is to massively penalize inlining without
4700 // forbidding it completely. The difference between that and
4701 // 'noinline' is negligible.
4702 fn->addFnAttr(llvm::Attribute::NoInline);
4703
4704 // Allow this function to be shared across translation units, but
4705 // we don't want it to turn into an exported symbol.
4706 fn->setLinkage(llvm::Function::LinkOnceODRLinkage);
4707 fn->setVisibility(llvm::Function::HiddenVisibility);
4708 if (CGM.supportsCOMDAT())
4709 fn->setComdat(CGM.getModule().getOrInsertComdat(fn->getName()));
4710
4711 // Set up the function.
4712 llvm::BasicBlock *entry =
4713 llvm::BasicBlock::Create(CGM.getLLVMContext(), "", fn);
4714 CGBuilderTy builder(CGM, entry);
4715
4716 // Pull the exception pointer out of the parameter list.
4717 llvm::Value *exn = &*fn->arg_begin();
4718
4719 // Call __cxa_begin_catch(exn).
4720 llvm::CallInst *catchCall = builder.CreateCall(getBeginCatchFn(CGM), exn);
4721 catchCall->setDoesNotThrow();
4722 catchCall->setCallingConv(CGM.getRuntimeCC());
4723
4724 // Call std::terminate().
4725 llvm::CallInst *termCall = builder.CreateCall(CGM.getTerminateFn());
4726 termCall->setDoesNotThrow();
4727 termCall->setDoesNotReturn();
4728 termCall->setCallingConv(CGM.getRuntimeCC());
4729
4730 // std::terminate cannot return.
4731 builder.CreateUnreachable();
4732 }
4733 return fnRef;
4734 }
4735
4736 llvm::CallInst *
emitTerminateForUnexpectedException(CodeGenFunction & CGF,llvm::Value * Exn)4737 ItaniumCXXABI::emitTerminateForUnexpectedException(CodeGenFunction &CGF,
4738 llvm::Value *Exn) {
4739 // In C++, we want to call __cxa_begin_catch() before terminating.
4740 if (Exn) {
4741 assert(CGF.CGM.getLangOpts().CPlusPlus);
4742 return CGF.EmitNounwindRuntimeCall(getClangCallTerminateFn(CGF.CGM), Exn);
4743 }
4744 return CGF.EmitNounwindRuntimeCall(CGF.CGM.getTerminateFn());
4745 }
4746
4747 std::pair<llvm::Value *, const CXXRecordDecl *>
LoadVTablePtr(CodeGenFunction & CGF,Address This,const CXXRecordDecl * RD)4748 ItaniumCXXABI::LoadVTablePtr(CodeGenFunction &CGF, Address This,
4749 const CXXRecordDecl *RD) {
4750 return {CGF.GetVTablePtr(This, CGM.Int8PtrTy, RD), RD};
4751 }
4752
emitBeginCatch(CodeGenFunction & CGF,const CXXCatchStmt * C)4753 void WebAssemblyCXXABI::emitBeginCatch(CodeGenFunction &CGF,
4754 const CXXCatchStmt *C) {
4755 if (CGF.getTarget().hasFeature("exception-handling"))
4756 CGF.EHStack.pushCleanup<CatchRetScope>(
4757 NormalCleanup, cast<llvm::CatchPadInst>(CGF.CurrentFuncletPad));
4758 ItaniumCXXABI::emitBeginCatch(CGF, C);
4759 }
4760
4761 llvm::CallInst *
emitTerminateForUnexpectedException(CodeGenFunction & CGF,llvm::Value * Exn)4762 WebAssemblyCXXABI::emitTerminateForUnexpectedException(CodeGenFunction &CGF,
4763 llvm::Value *Exn) {
4764 // Itanium ABI calls __clang_call_terminate(), which __cxa_begin_catch() on
4765 // the violating exception to mark it handled, but it is currently hard to do
4766 // with wasm EH instruction structure with catch/catch_all, we just call
4767 // std::terminate and ignore the violating exception as in CGCXXABI.
4768 // TODO Consider code transformation that makes calling __clang_call_terminate
4769 // possible.
4770 return CGCXXABI::emitTerminateForUnexpectedException(CGF, Exn);
4771 }
4772
4773 /// Register a global destructor as best as we know how.
registerGlobalDtor(CodeGenFunction & CGF,const VarDecl & D,llvm::FunctionCallee Dtor,llvm::Constant * Addr)4774 void XLCXXABI::registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
4775 llvm::FunctionCallee Dtor,
4776 llvm::Constant *Addr) {
4777 if (D.getTLSKind() != VarDecl::TLS_None) {
4778 // atexit routine expects "int(*)(int,...)"
4779 llvm::FunctionType *FTy =
4780 llvm::FunctionType::get(CGM.IntTy, CGM.IntTy, true);
4781 llvm::PointerType *FpTy = FTy->getPointerTo();
4782
4783 // extern "C" int __pt_atexit_np(int flags, int(*)(int,...), ...);
4784 llvm::FunctionType *AtExitTy =
4785 llvm::FunctionType::get(CGM.IntTy, {CGM.IntTy, FpTy}, true);
4786
4787 // Fetch the actual function.
4788 llvm::FunctionCallee AtExit =
4789 CGM.CreateRuntimeFunction(AtExitTy, "__pt_atexit_np");
4790
4791 // Create __dtor function for the var decl.
4792 llvm::Function *DtorStub = CGF.createTLSAtExitStub(D, Dtor, Addr, AtExit);
4793
4794 // Register above __dtor with atexit().
4795 // First param is flags and must be 0, second param is function ptr
4796 llvm::Value *NV = llvm::Constant::getNullValue(CGM.IntTy);
4797 CGF.EmitNounwindRuntimeCall(AtExit, {NV, DtorStub});
4798
4799 // Cannot unregister TLS __dtor so done
4800 return;
4801 }
4802
4803 // Create __dtor function for the var decl.
4804 llvm::Function *DtorStub = CGF.createAtExitStub(D, Dtor, Addr);
4805
4806 // Register above __dtor with atexit().
4807 CGF.registerGlobalDtorWithAtExit(DtorStub);
4808
4809 // Emit __finalize function to unregister __dtor and (as appropriate) call
4810 // __dtor.
4811 emitCXXStermFinalizer(D, DtorStub, Addr);
4812 }
4813
emitCXXStermFinalizer(const VarDecl & D,llvm::Function * dtorStub,llvm::Constant * addr)4814 void XLCXXABI::emitCXXStermFinalizer(const VarDecl &D, llvm::Function *dtorStub,
4815 llvm::Constant *addr) {
4816 llvm::FunctionType *FTy = llvm::FunctionType::get(CGM.VoidTy, false);
4817 SmallString<256> FnName;
4818 {
4819 llvm::raw_svector_ostream Out(FnName);
4820 getMangleContext().mangleDynamicStermFinalizer(&D, Out);
4821 }
4822
4823 // Create the finalization action associated with a variable.
4824 const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
4825 llvm::Function *StermFinalizer = CGM.CreateGlobalInitOrCleanUpFunction(
4826 FTy, FnName.str(), FI, D.getLocation());
4827
4828 CodeGenFunction CGF(CGM);
4829
4830 CGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, StermFinalizer, FI,
4831 FunctionArgList(), D.getLocation(),
4832 D.getInit()->getExprLoc());
4833
4834 // The unatexit subroutine unregisters __dtor functions that were previously
4835 // registered by the atexit subroutine. If the referenced function is found,
4836 // the unatexit returns a value of 0, meaning that the cleanup is still
4837 // pending (and we should call the __dtor function).
4838 llvm::Value *V = CGF.unregisterGlobalDtorWithUnAtExit(dtorStub);
4839
4840 llvm::Value *NeedsDestruct = CGF.Builder.CreateIsNull(V, "needs_destruct");
4841
4842 llvm::BasicBlock *DestructCallBlock = CGF.createBasicBlock("destruct.call");
4843 llvm::BasicBlock *EndBlock = CGF.createBasicBlock("destruct.end");
4844
4845 // Check if unatexit returns a value of 0. If it does, jump to
4846 // DestructCallBlock, otherwise jump to EndBlock directly.
4847 CGF.Builder.CreateCondBr(NeedsDestruct, DestructCallBlock, EndBlock);
4848
4849 CGF.EmitBlock(DestructCallBlock);
4850
4851 // Emit the call to dtorStub.
4852 llvm::CallInst *CI = CGF.Builder.CreateCall(dtorStub);
4853
4854 // Make sure the call and the callee agree on calling convention.
4855 CI->setCallingConv(dtorStub->getCallingConv());
4856
4857 CGF.EmitBlock(EndBlock);
4858
4859 CGF.FinishFunction();
4860
4861 if (auto *IPA = D.getAttr<InitPriorityAttr>()) {
4862 CGM.AddCXXPrioritizedStermFinalizerEntry(StermFinalizer,
4863 IPA->getPriority());
4864 } else if (isTemplateInstantiation(D.getTemplateSpecializationKind()) ||
4865 getContext().GetGVALinkageForVariable(&D) == GVA_DiscardableODR) {
4866 // According to C++ [basic.start.init]p2, class template static data
4867 // members (i.e., implicitly or explicitly instantiated specializations)
4868 // have unordered initialization. As a consequence, we can put them into
4869 // their own llvm.global_dtors entry.
4870 CGM.AddCXXStermFinalizerToGlobalDtor(StermFinalizer, 65535);
4871 } else {
4872 CGM.AddCXXStermFinalizerEntry(StermFinalizer);
4873 }
4874 }
4875