1 //===---- TargetInfo.cpp - Encapsulate target details -----------*- C++ -*-===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // These classes wrap the information about a call or function
11 // definition used to handle ABI compliancy.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "TargetInfo.h"
16 #include "ABIInfo.h"
17 #include "CGCXXABI.h"
18 #include "CGValue.h"
19 #include "CodeGenFunction.h"
20 #include "clang/AST/RecordLayout.h"
21 #include "clang/CodeGen/CGFunctionInfo.h"
22 #include "clang/Frontend/CodeGenOptions.h"
23 #include "llvm/ADT/StringExtras.h"
24 #include "llvm/ADT/Triple.h"
25 #include "llvm/IR/DataLayout.h"
26 #include "llvm/IR/Type.h"
27 #include "llvm/Support/raw_ostream.h"
28 #include <algorithm>    // std::sort
29 
30 using namespace clang;
31 using namespace CodeGen;
32 
AssignToArrayRange(CodeGen::CGBuilderTy & Builder,llvm::Value * Array,llvm::Value * Value,unsigned FirstIndex,unsigned LastIndex)33 static void AssignToArrayRange(CodeGen::CGBuilderTy &Builder,
34                                llvm::Value *Array,
35                                llvm::Value *Value,
36                                unsigned FirstIndex,
37                                unsigned LastIndex) {
38   // Alternatively, we could emit this as a loop in the source.
39   for (unsigned I = FirstIndex; I <= LastIndex; ++I) {
40     llvm::Value *Cell = Builder.CreateConstInBoundsGEP1_32(Array, I);
41     Builder.CreateStore(Value, Cell);
42   }
43 }
44 
isAggregateTypeForABI(QualType T)45 static bool isAggregateTypeForABI(QualType T) {
46   return !CodeGenFunction::hasScalarEvaluationKind(T) ||
47          T->isMemberFunctionPointerType();
48 }
49 
~ABIInfo()50 ABIInfo::~ABIInfo() {}
51 
getRecordArgABI(const RecordType * RT,CGCXXABI & CXXABI)52 static CGCXXABI::RecordArgABI getRecordArgABI(const RecordType *RT,
53                                               CGCXXABI &CXXABI) {
54   const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl());
55   if (!RD)
56     return CGCXXABI::RAA_Default;
57   return CXXABI.getRecordArgABI(RD);
58 }
59 
getRecordArgABI(QualType T,CGCXXABI & CXXABI)60 static CGCXXABI::RecordArgABI getRecordArgABI(QualType T,
61                                               CGCXXABI &CXXABI) {
62   const RecordType *RT = T->getAs<RecordType>();
63   if (!RT)
64     return CGCXXABI::RAA_Default;
65   return getRecordArgABI(RT, CXXABI);
66 }
67 
68 /// Pass transparent unions as if they were the type of the first element. Sema
69 /// should ensure that all elements of the union have the same "machine type".
useFirstFieldIfTransparentUnion(QualType Ty)70 static QualType useFirstFieldIfTransparentUnion(QualType Ty) {
71   if (const RecordType *UT = Ty->getAsUnionType()) {
72     const RecordDecl *UD = UT->getDecl();
73     if (UD->hasAttr<TransparentUnionAttr>()) {
74       assert(!UD->field_empty() && "sema created an empty transparent union");
75       return UD->field_begin()->getType();
76     }
77   }
78   return Ty;
79 }
80 
getCXXABI() const81 CGCXXABI &ABIInfo::getCXXABI() const {
82   return CGT.getCXXABI();
83 }
84 
getContext() const85 ASTContext &ABIInfo::getContext() const {
86   return CGT.getContext();
87 }
88 
getVMContext() const89 llvm::LLVMContext &ABIInfo::getVMContext() const {
90   return CGT.getLLVMContext();
91 }
92 
getDataLayout() const93 const llvm::DataLayout &ABIInfo::getDataLayout() const {
94   return CGT.getDataLayout();
95 }
96 
getTarget() const97 const TargetInfo &ABIInfo::getTarget() const {
98   return CGT.getTarget();
99 }
100 
isHomogeneousAggregateBaseType(QualType Ty) const101 bool ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
102   return false;
103 }
104 
isHomogeneousAggregateSmallEnough(const Type * Base,uint64_t Members) const105 bool ABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base,
106                                                 uint64_t Members) const {
107   return false;
108 }
109 
dump() const110 void ABIArgInfo::dump() const {
111   raw_ostream &OS = llvm::errs();
112   OS << "(ABIArgInfo Kind=";
113   switch (TheKind) {
114   case Direct:
115     OS << "Direct Type=";
116     if (llvm::Type *Ty = getCoerceToType())
117       Ty->print(OS);
118     else
119       OS << "null";
120     break;
121   case Extend:
122     OS << "Extend";
123     break;
124   case Ignore:
125     OS << "Ignore";
126     break;
127   case InAlloca:
128     OS << "InAlloca Offset=" << getInAllocaFieldIndex();
129     break;
130   case Indirect:
131     OS << "Indirect Align=" << getIndirectAlign()
132        << " ByVal=" << getIndirectByVal()
133        << " Realign=" << getIndirectRealign();
134     break;
135   case Expand:
136     OS << "Expand";
137     break;
138   }
139   OS << ")\n";
140 }
141 
~TargetCodeGenInfo()142 TargetCodeGenInfo::~TargetCodeGenInfo() { delete Info; }
143 
144 // If someone can figure out a general rule for this, that would be great.
145 // It's probably just doomed to be platform-dependent, though.
getSizeOfUnwindException() const146 unsigned TargetCodeGenInfo::getSizeOfUnwindException() const {
147   // Verified for:
148   //   x86-64     FreeBSD, Linux, Darwin
149   //   x86-32     FreeBSD, Linux, Darwin
150   //   PowerPC    Linux, Darwin
151   //   ARM        Darwin (*not* EABI)
152   //   AArch64    Linux
153   return 32;
154 }
155 
isNoProtoCallVariadic(const CallArgList & args,const FunctionNoProtoType * fnType) const156 bool TargetCodeGenInfo::isNoProtoCallVariadic(const CallArgList &args,
157                                      const FunctionNoProtoType *fnType) const {
158   // The following conventions are known to require this to be false:
159   //   x86_stdcall
160   //   MIPS
161   // For everything else, we just prefer false unless we opt out.
162   return false;
163 }
164 
165 void
getDependentLibraryOption(llvm::StringRef Lib,llvm::SmallString<24> & Opt) const166 TargetCodeGenInfo::getDependentLibraryOption(llvm::StringRef Lib,
167                                              llvm::SmallString<24> &Opt) const {
168   // This assumes the user is passing a library name like "rt" instead of a
169   // filename like "librt.a/so", and that they don't care whether it's static or
170   // dynamic.
171   Opt = "-l";
172   Opt += Lib;
173 }
174 
175 static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays);
176 
177 /// isEmptyField - Return true iff a the field is "empty", that is it
178 /// is an unnamed bit-field or an (array of) empty record(s).
isEmptyField(ASTContext & Context,const FieldDecl * FD,bool AllowArrays)179 static bool isEmptyField(ASTContext &Context, const FieldDecl *FD,
180                          bool AllowArrays) {
181   if (FD->isUnnamedBitfield())
182     return true;
183 
184   QualType FT = FD->getType();
185 
186   // Constant arrays of empty records count as empty, strip them off.
187   // Constant arrays of zero length always count as empty.
188   if (AllowArrays)
189     while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) {
190       if (AT->getSize() == 0)
191         return true;
192       FT = AT->getElementType();
193     }
194 
195   const RecordType *RT = FT->getAs<RecordType>();
196   if (!RT)
197     return false;
198 
199   // C++ record fields are never empty, at least in the Itanium ABI.
200   //
201   // FIXME: We should use a predicate for whether this behavior is true in the
202   // current ABI.
203   if (isa<CXXRecordDecl>(RT->getDecl()))
204     return false;
205 
206   return isEmptyRecord(Context, FT, AllowArrays);
207 }
208 
209 /// isEmptyRecord - Return true iff a structure contains only empty
210 /// fields. Note that a structure with a flexible array member is not
211 /// considered empty.
isEmptyRecord(ASTContext & Context,QualType T,bool AllowArrays)212 static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays) {
213   const RecordType *RT = T->getAs<RecordType>();
214   if (!RT)
215     return 0;
216   const RecordDecl *RD = RT->getDecl();
217   if (RD->hasFlexibleArrayMember())
218     return false;
219 
220   // If this is a C++ record, check the bases first.
221   if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
222     for (const auto &I : CXXRD->bases())
223       if (!isEmptyRecord(Context, I.getType(), true))
224         return false;
225 
226   for (const auto *I : RD->fields())
227     if (!isEmptyField(Context, I, AllowArrays))
228       return false;
229   return true;
230 }
231 
232 /// isSingleElementStruct - Determine if a structure is a "single
233 /// element struct", i.e. it has exactly one non-empty field or
234 /// exactly one field which is itself a single element
235 /// struct. Structures with flexible array members are never
236 /// considered single element structs.
237 ///
238 /// \return The field declaration for the single non-empty field, if
239 /// it exists.
isSingleElementStruct(QualType T,ASTContext & Context)240 static const Type *isSingleElementStruct(QualType T, ASTContext &Context) {
241   const RecordType *RT = T->getAsStructureType();
242   if (!RT)
243     return nullptr;
244 
245   const RecordDecl *RD = RT->getDecl();
246   if (RD->hasFlexibleArrayMember())
247     return nullptr;
248 
249   const Type *Found = nullptr;
250 
251   // If this is a C++ record, check the bases first.
252   if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
253     for (const auto &I : CXXRD->bases()) {
254       // Ignore empty records.
255       if (isEmptyRecord(Context, I.getType(), true))
256         continue;
257 
258       // If we already found an element then this isn't a single-element struct.
259       if (Found)
260         return nullptr;
261 
262       // If this is non-empty and not a single element struct, the composite
263       // cannot be a single element struct.
264       Found = isSingleElementStruct(I.getType(), Context);
265       if (!Found)
266         return nullptr;
267     }
268   }
269 
270   // Check for single element.
271   for (const auto *FD : RD->fields()) {
272     QualType FT = FD->getType();
273 
274     // Ignore empty fields.
275     if (isEmptyField(Context, FD, true))
276       continue;
277 
278     // If we already found an element then this isn't a single-element
279     // struct.
280     if (Found)
281       return nullptr;
282 
283     // Treat single element arrays as the element.
284     while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) {
285       if (AT->getSize().getZExtValue() != 1)
286         break;
287       FT = AT->getElementType();
288     }
289 
290     if (!isAggregateTypeForABI(FT)) {
291       Found = FT.getTypePtr();
292     } else {
293       Found = isSingleElementStruct(FT, Context);
294       if (!Found)
295         return nullptr;
296     }
297   }
298 
299   // We don't consider a struct a single-element struct if it has
300   // padding beyond the element type.
301   if (Found && Context.getTypeSize(Found) != Context.getTypeSize(T))
302     return nullptr;
303 
304   return Found;
305 }
306 
is32Or64BitBasicType(QualType Ty,ASTContext & Context)307 static bool is32Or64BitBasicType(QualType Ty, ASTContext &Context) {
308   // Treat complex types as the element type.
309   if (const ComplexType *CTy = Ty->getAs<ComplexType>())
310     Ty = CTy->getElementType();
311 
312   // Check for a type which we know has a simple scalar argument-passing
313   // convention without any padding.  (We're specifically looking for 32
314   // and 64-bit integer and integer-equivalents, float, and double.)
315   if (!Ty->getAs<BuiltinType>() && !Ty->hasPointerRepresentation() &&
316       !Ty->isEnumeralType() && !Ty->isBlockPointerType())
317     return false;
318 
319   uint64_t Size = Context.getTypeSize(Ty);
320   return Size == 32 || Size == 64;
321 }
322 
323 /// canExpandIndirectArgument - Test whether an argument type which is to be
324 /// passed indirectly (on the stack) would have the equivalent layout if it was
325 /// expanded into separate arguments. If so, we prefer to do the latter to avoid
326 /// inhibiting optimizations.
327 ///
328 // FIXME: This predicate is missing many cases, currently it just follows
329 // llvm-gcc (checks that all fields are 32-bit or 64-bit primitive types). We
330 // should probably make this smarter, or better yet make the LLVM backend
331 // capable of handling it.
canExpandIndirectArgument(QualType Ty,ASTContext & Context)332 static bool canExpandIndirectArgument(QualType Ty, ASTContext &Context) {
333   // We can only expand structure types.
334   const RecordType *RT = Ty->getAs<RecordType>();
335   if (!RT)
336     return false;
337 
338   // We can only expand (C) structures.
339   //
340   // FIXME: This needs to be generalized to handle classes as well.
341   const RecordDecl *RD = RT->getDecl();
342   if (!RD->isStruct() || isa<CXXRecordDecl>(RD))
343     return false;
344 
345   uint64_t Size = 0;
346 
347   for (const auto *FD : RD->fields()) {
348     if (!is32Or64BitBasicType(FD->getType(), Context))
349       return false;
350 
351     // FIXME: Reject bit-fields wholesale; there are two problems, we don't know
352     // how to expand them yet, and the predicate for telling if a bitfield still
353     // counts as "basic" is more complicated than what we were doing previously.
354     if (FD->isBitField())
355       return false;
356 
357     Size += Context.getTypeSize(FD->getType());
358   }
359 
360   // Make sure there are not any holes in the struct.
361   if (Size != Context.getTypeSize(Ty))
362     return false;
363 
364   return true;
365 }
366 
367 namespace {
368 /// DefaultABIInfo - The default implementation for ABI specific
369 /// details. This implementation provides information which results in
370 /// self-consistent and sensible LLVM IR generation, but does not
371 /// conform to any particular ABI.
372 class DefaultABIInfo : public ABIInfo {
373 public:
DefaultABIInfo(CodeGen::CodeGenTypes & CGT)374   DefaultABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {}
375 
376   ABIArgInfo classifyReturnType(QualType RetTy) const;
377   ABIArgInfo classifyArgumentType(QualType RetTy) const;
378 
computeInfo(CGFunctionInfo & FI) const379   void computeInfo(CGFunctionInfo &FI) const override {
380     if (!getCXXABI().classifyReturnType(FI))
381       FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
382     for (auto &I : FI.arguments())
383       I.info = classifyArgumentType(I.type);
384   }
385 
386   llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
387                          CodeGenFunction &CGF) const override;
388 };
389 
390 class DefaultTargetCodeGenInfo : public TargetCodeGenInfo {
391 public:
DefaultTargetCodeGenInfo(CodeGen::CodeGenTypes & CGT)392   DefaultTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
393     : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {}
394 };
395 
EmitVAArg(llvm::Value * VAListAddr,QualType Ty,CodeGenFunction & CGF) const396 llvm::Value *DefaultABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
397                                        CodeGenFunction &CGF) const {
398   return nullptr;
399 }
400 
classifyArgumentType(QualType Ty) const401 ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty) const {
402   if (isAggregateTypeForABI(Ty))
403     return ABIArgInfo::getIndirect(0);
404 
405   // Treat an enum type as its underlying type.
406   if (const EnumType *EnumTy = Ty->getAs<EnumType>())
407     Ty = EnumTy->getDecl()->getIntegerType();
408 
409   return (Ty->isPromotableIntegerType() ?
410           ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
411 }
412 
classifyReturnType(QualType RetTy) const413 ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy) const {
414   if (RetTy->isVoidType())
415     return ABIArgInfo::getIgnore();
416 
417   if (isAggregateTypeForABI(RetTy))
418     return ABIArgInfo::getIndirect(0);
419 
420   // Treat an enum type as its underlying type.
421   if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
422     RetTy = EnumTy->getDecl()->getIntegerType();
423 
424   return (RetTy->isPromotableIntegerType() ?
425           ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
426 }
427 
428 //===----------------------------------------------------------------------===//
429 // le32/PNaCl bitcode ABI Implementation
430 //
431 // This is a simplified version of the x86_32 ABI.  Arguments and return values
432 // are always passed on the stack.
433 //===----------------------------------------------------------------------===//
434 
435 class PNaClABIInfo : public ABIInfo {
436  public:
PNaClABIInfo(CodeGen::CodeGenTypes & CGT)437   PNaClABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {}
438 
439   ABIArgInfo classifyReturnType(QualType RetTy) const;
440   ABIArgInfo classifyArgumentType(QualType RetTy) const;
441 
442   void computeInfo(CGFunctionInfo &FI) const override;
443   llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
444                          CodeGenFunction &CGF) const override;
445 };
446 
447 class PNaClTargetCodeGenInfo : public TargetCodeGenInfo {
448  public:
PNaClTargetCodeGenInfo(CodeGen::CodeGenTypes & CGT)449   PNaClTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
450     : TargetCodeGenInfo(new PNaClABIInfo(CGT)) {}
451 };
452 
computeInfo(CGFunctionInfo & FI) const453 void PNaClABIInfo::computeInfo(CGFunctionInfo &FI) const {
454   if (!getCXXABI().classifyReturnType(FI))
455     FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
456 
457   for (auto &I : FI.arguments())
458     I.info = classifyArgumentType(I.type);
459 }
460 
EmitVAArg(llvm::Value * VAListAddr,QualType Ty,CodeGenFunction & CGF) const461 llvm::Value *PNaClABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
462                                        CodeGenFunction &CGF) const {
463   return nullptr;
464 }
465 
466 /// \brief Classify argument of given type \p Ty.
classifyArgumentType(QualType Ty) const467 ABIArgInfo PNaClABIInfo::classifyArgumentType(QualType Ty) const {
468   if (isAggregateTypeForABI(Ty)) {
469     if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
470       return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
471     return ABIArgInfo::getIndirect(0);
472   } else if (const EnumType *EnumTy = Ty->getAs<EnumType>()) {
473     // Treat an enum type as its underlying type.
474     Ty = EnumTy->getDecl()->getIntegerType();
475   } else if (Ty->isFloatingType()) {
476     // Floating-point types don't go inreg.
477     return ABIArgInfo::getDirect();
478   }
479 
480   return (Ty->isPromotableIntegerType() ?
481           ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
482 }
483 
classifyReturnType(QualType RetTy) const484 ABIArgInfo PNaClABIInfo::classifyReturnType(QualType RetTy) const {
485   if (RetTy->isVoidType())
486     return ABIArgInfo::getIgnore();
487 
488   // In the PNaCl ABI we always return records/structures on the stack.
489   if (isAggregateTypeForABI(RetTy))
490     return ABIArgInfo::getIndirect(0);
491 
492   // Treat an enum type as its underlying type.
493   if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
494     RetTy = EnumTy->getDecl()->getIntegerType();
495 
496   return (RetTy->isPromotableIntegerType() ?
497           ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
498 }
499 
500 /// IsX86_MMXType - Return true if this is an MMX type.
IsX86_MMXType(llvm::Type * IRType)501 bool IsX86_MMXType(llvm::Type *IRType) {
502   // Return true if the type is an MMX type <2 x i32>, <4 x i16>, or <8 x i8>.
503   return IRType->isVectorTy() && IRType->getPrimitiveSizeInBits() == 64 &&
504     cast<llvm::VectorType>(IRType)->getElementType()->isIntegerTy() &&
505     IRType->getScalarSizeInBits() != 64;
506 }
507 
X86AdjustInlineAsmType(CodeGen::CodeGenFunction & CGF,StringRef Constraint,llvm::Type * Ty)508 static llvm::Type* X86AdjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
509                                           StringRef Constraint,
510                                           llvm::Type* Ty) {
511   if ((Constraint == "y" || Constraint == "&y") && Ty->isVectorTy()) {
512     if (cast<llvm::VectorType>(Ty)->getBitWidth() != 64) {
513       // Invalid MMX constraint
514       return nullptr;
515     }
516 
517     return llvm::Type::getX86_MMXTy(CGF.getLLVMContext());
518   }
519 
520   // No operation needed
521   return Ty;
522 }
523 
524 /// Returns true if this type can be passed in SSE registers with the
525 /// X86_VectorCall calling convention. Shared between x86_32 and x86_64.
isX86VectorTypeForVectorCall(ASTContext & Context,QualType Ty)526 static bool isX86VectorTypeForVectorCall(ASTContext &Context, QualType Ty) {
527   if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
528     if (BT->isFloatingPoint() && BT->getKind() != BuiltinType::Half)
529       return true;
530   } else if (const VectorType *VT = Ty->getAs<VectorType>()) {
531     // vectorcall can pass XMM, YMM, and ZMM vectors. We don't pass SSE1 MMX
532     // registers specially.
533     unsigned VecSize = Context.getTypeSize(VT);
534     if (VecSize == 128 || VecSize == 256 || VecSize == 512)
535       return true;
536   }
537   return false;
538 }
539 
540 /// Returns true if this aggregate is small enough to be passed in SSE registers
541 /// in the X86_VectorCall calling convention. Shared between x86_32 and x86_64.
isX86VectorCallAggregateSmallEnough(uint64_t NumMembers)542 static bool isX86VectorCallAggregateSmallEnough(uint64_t NumMembers) {
543   return NumMembers <= 4;
544 }
545 
546 //===----------------------------------------------------------------------===//
547 // X86-32 ABI Implementation
548 //===----------------------------------------------------------------------===//
549 
550 /// \brief Similar to llvm::CCState, but for Clang.
551 struct CCState {
CCState__anon2cff319c0111::CCState552   CCState(unsigned CC) : CC(CC), FreeRegs(0), FreeSSERegs(0) {}
553 
554   unsigned CC;
555   unsigned FreeRegs;
556   unsigned FreeSSERegs;
557 };
558 
559 /// X86_32ABIInfo - The X86-32 ABI information.
560 class X86_32ABIInfo : public ABIInfo {
561   enum Class {
562     Integer,
563     Float
564   };
565 
566   static const unsigned MinABIStackAlignInBytes = 4;
567 
568   bool IsDarwinVectorABI;
569   bool IsSmallStructInRegABI;
570   bool IsWin32StructABI;
571   unsigned DefaultNumRegisterParameters;
572 
isRegisterSize(unsigned Size)573   static bool isRegisterSize(unsigned Size) {
574     return (Size == 8 || Size == 16 || Size == 32 || Size == 64);
575   }
576 
isHomogeneousAggregateBaseType(QualType Ty) const577   bool isHomogeneousAggregateBaseType(QualType Ty) const override {
578     // FIXME: Assumes vectorcall is in use.
579     return isX86VectorTypeForVectorCall(getContext(), Ty);
580   }
581 
isHomogeneousAggregateSmallEnough(const Type * Ty,uint64_t NumMembers) const582   bool isHomogeneousAggregateSmallEnough(const Type *Ty,
583                                          uint64_t NumMembers) const override {
584     // FIXME: Assumes vectorcall is in use.
585     return isX86VectorCallAggregateSmallEnough(NumMembers);
586   }
587 
588   bool shouldReturnTypeInRegister(QualType Ty, ASTContext &Context) const;
589 
590   /// getIndirectResult - Give a source type \arg Ty, return a suitable result
591   /// such that the argument will be passed in memory.
592   ABIArgInfo getIndirectResult(QualType Ty, bool ByVal, CCState &State) const;
593 
594   ABIArgInfo getIndirectReturnResult(CCState &State) const;
595 
596   /// \brief Return the alignment to use for the given type on the stack.
597   unsigned getTypeStackAlignInBytes(QualType Ty, unsigned Align) const;
598 
599   Class classify(QualType Ty) const;
600   ABIArgInfo classifyReturnType(QualType RetTy, CCState &State) const;
601   ABIArgInfo classifyArgumentType(QualType RetTy, CCState &State) const;
602   bool shouldUseInReg(QualType Ty, CCState &State, bool &NeedsPadding) const;
603 
604   /// \brief Rewrite the function info so that all memory arguments use
605   /// inalloca.
606   void rewriteWithInAlloca(CGFunctionInfo &FI) const;
607 
608   void addFieldToArgStruct(SmallVector<llvm::Type *, 6> &FrameFields,
609                            unsigned &StackOffset, ABIArgInfo &Info,
610                            QualType Type) const;
611 
612 public:
613 
614   void computeInfo(CGFunctionInfo &FI) const override;
615   llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
616                          CodeGenFunction &CGF) const override;
617 
X86_32ABIInfo(CodeGen::CodeGenTypes & CGT,bool d,bool p,bool w,unsigned r)618   X86_32ABIInfo(CodeGen::CodeGenTypes &CGT, bool d, bool p, bool w,
619                 unsigned r)
620     : ABIInfo(CGT), IsDarwinVectorABI(d), IsSmallStructInRegABI(p),
621       IsWin32StructABI(w), DefaultNumRegisterParameters(r) {}
622 };
623 
624 class X86_32TargetCodeGenInfo : public TargetCodeGenInfo {
625 public:
X86_32TargetCodeGenInfo(CodeGen::CodeGenTypes & CGT,bool d,bool p,bool w,unsigned r)626   X86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT,
627       bool d, bool p, bool w, unsigned r)
628     :TargetCodeGenInfo(new X86_32ABIInfo(CGT, d, p, w, r)) {}
629 
630   static bool isStructReturnInRegABI(
631       const llvm::Triple &Triple, const CodeGenOptions &Opts);
632 
633   void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
634                            CodeGen::CodeGenModule &CGM) const override;
635 
getDwarfEHStackPointer(CodeGen::CodeGenModule & CGM) const636   int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
637     // Darwin uses different dwarf register numbers for EH.
638     if (CGM.getTarget().getTriple().isOSDarwin()) return 5;
639     return 4;
640   }
641 
642   bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
643                                llvm::Value *Address) const override;
644 
adjustInlineAsmType(CodeGen::CodeGenFunction & CGF,StringRef Constraint,llvm::Type * Ty) const645   llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
646                                   StringRef Constraint,
647                                   llvm::Type* Ty) const override {
648     return X86AdjustInlineAsmType(CGF, Constraint, Ty);
649   }
650 
651   void addReturnRegisterOutputs(CodeGenFunction &CGF, LValue ReturnValue,
652                                 std::string &Constraints,
653                                 std::vector<llvm::Type *> &ResultRegTypes,
654                                 std::vector<llvm::Type *> &ResultTruncRegTypes,
655                                 std::vector<LValue> &ResultRegDests,
656                                 std::string &AsmString,
657                                 unsigned NumOutputs) const override;
658 
659   llvm::Constant *
getUBSanFunctionSignature(CodeGen::CodeGenModule & CGM) const660   getUBSanFunctionSignature(CodeGen::CodeGenModule &CGM) const override {
661     unsigned Sig = (0xeb << 0) |  // jmp rel8
662                    (0x06 << 8) |  //           .+0x08
663                    ('F' << 16) |
664                    ('T' << 24);
665     return llvm::ConstantInt::get(CGM.Int32Ty, Sig);
666   }
667 };
668 
669 }
670 
671 /// Rewrite input constraint references after adding some output constraints.
672 /// In the case where there is one output and one input and we add one output,
673 /// we need to replace all operand references greater than or equal to 1:
674 ///     mov $0, $1
675 ///     mov eax, $1
676 /// The result will be:
677 ///     mov $0, $2
678 ///     mov eax, $2
rewriteInputConstraintReferences(unsigned FirstIn,unsigned NumNewOuts,std::string & AsmString)679 static void rewriteInputConstraintReferences(unsigned FirstIn,
680                                              unsigned NumNewOuts,
681                                              std::string &AsmString) {
682   std::string Buf;
683   llvm::raw_string_ostream OS(Buf);
684   size_t Pos = 0;
685   while (Pos < AsmString.size()) {
686     size_t DollarStart = AsmString.find('$', Pos);
687     if (DollarStart == std::string::npos)
688       DollarStart = AsmString.size();
689     size_t DollarEnd = AsmString.find_first_not_of('$', DollarStart);
690     if (DollarEnd == std::string::npos)
691       DollarEnd = AsmString.size();
692     OS << StringRef(&AsmString[Pos], DollarEnd - Pos);
693     Pos = DollarEnd;
694     size_t NumDollars = DollarEnd - DollarStart;
695     if (NumDollars % 2 != 0 && Pos < AsmString.size()) {
696       // We have an operand reference.
697       size_t DigitStart = Pos;
698       size_t DigitEnd = AsmString.find_first_not_of("0123456789", DigitStart);
699       if (DigitEnd == std::string::npos)
700         DigitEnd = AsmString.size();
701       StringRef OperandStr(&AsmString[DigitStart], DigitEnd - DigitStart);
702       unsigned OperandIndex;
703       if (!OperandStr.getAsInteger(10, OperandIndex)) {
704         if (OperandIndex >= FirstIn)
705           OperandIndex += NumNewOuts;
706         OS << OperandIndex;
707       } else {
708         OS << OperandStr;
709       }
710       Pos = DigitEnd;
711     }
712   }
713   AsmString = std::move(OS.str());
714 }
715 
716 /// Add output constraints for EAX:EDX because they are return registers.
addReturnRegisterOutputs(CodeGenFunction & CGF,LValue ReturnSlot,std::string & Constraints,std::vector<llvm::Type * > & ResultRegTypes,std::vector<llvm::Type * > & ResultTruncRegTypes,std::vector<LValue> & ResultRegDests,std::string & AsmString,unsigned NumOutputs) const717 void X86_32TargetCodeGenInfo::addReturnRegisterOutputs(
718     CodeGenFunction &CGF, LValue ReturnSlot, std::string &Constraints,
719     std::vector<llvm::Type *> &ResultRegTypes,
720     std::vector<llvm::Type *> &ResultTruncRegTypes,
721     std::vector<LValue> &ResultRegDests, std::string &AsmString,
722     unsigned NumOutputs) const {
723   uint64_t RetWidth = CGF.getContext().getTypeSize(ReturnSlot.getType());
724 
725   // Use the EAX constraint if the width is 32 or smaller and EAX:EDX if it is
726   // larger.
727   if (!Constraints.empty())
728     Constraints += ',';
729   if (RetWidth <= 32) {
730     Constraints += "={eax}";
731     ResultRegTypes.push_back(CGF.Int32Ty);
732   } else {
733     // Use the 'A' constraint for EAX:EDX.
734     Constraints += "=A";
735     ResultRegTypes.push_back(CGF.Int64Ty);
736   }
737 
738   // Truncate EAX or EAX:EDX to an integer of the appropriate size.
739   llvm::Type *CoerceTy = llvm::IntegerType::get(CGF.getLLVMContext(), RetWidth);
740   ResultTruncRegTypes.push_back(CoerceTy);
741 
742   // Coerce the integer by bitcasting the return slot pointer.
743   ReturnSlot.setAddress(CGF.Builder.CreateBitCast(ReturnSlot.getAddress(),
744                                                   CoerceTy->getPointerTo()));
745   ResultRegDests.push_back(ReturnSlot);
746 
747   rewriteInputConstraintReferences(NumOutputs, 1, AsmString);
748 }
749 
750 /// shouldReturnTypeInRegister - Determine if the given type should be
751 /// passed in a register (for the Darwin ABI).
shouldReturnTypeInRegister(QualType Ty,ASTContext & Context) const752 bool X86_32ABIInfo::shouldReturnTypeInRegister(QualType Ty,
753                                                ASTContext &Context) const {
754   uint64_t Size = Context.getTypeSize(Ty);
755 
756   // Type must be register sized.
757   if (!isRegisterSize(Size))
758     return false;
759 
760   if (Ty->isVectorType()) {
761     // 64- and 128- bit vectors inside structures are not returned in
762     // registers.
763     if (Size == 64 || Size == 128)
764       return false;
765 
766     return true;
767   }
768 
769   // If this is a builtin, pointer, enum, complex type, member pointer, or
770   // member function pointer it is ok.
771   if (Ty->getAs<BuiltinType>() || Ty->hasPointerRepresentation() ||
772       Ty->isAnyComplexType() || Ty->isEnumeralType() ||
773       Ty->isBlockPointerType() || Ty->isMemberPointerType())
774     return true;
775 
776   // Arrays are treated like records.
777   if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty))
778     return shouldReturnTypeInRegister(AT->getElementType(), Context);
779 
780   // Otherwise, it must be a record type.
781   const RecordType *RT = Ty->getAs<RecordType>();
782   if (!RT) return false;
783 
784   // FIXME: Traverse bases here too.
785 
786   // Structure types are passed in register if all fields would be
787   // passed in a register.
788   for (const auto *FD : RT->getDecl()->fields()) {
789     // Empty fields are ignored.
790     if (isEmptyField(Context, FD, true))
791       continue;
792 
793     // Check fields recursively.
794     if (!shouldReturnTypeInRegister(FD->getType(), Context))
795       return false;
796   }
797   return true;
798 }
799 
getIndirectReturnResult(CCState & State) const800 ABIArgInfo X86_32ABIInfo::getIndirectReturnResult(CCState &State) const {
801   // If the return value is indirect, then the hidden argument is consuming one
802   // integer register.
803   if (State.FreeRegs) {
804     --State.FreeRegs;
805     return ABIArgInfo::getIndirectInReg(/*Align=*/0, /*ByVal=*/false);
806   }
807   return ABIArgInfo::getIndirect(/*Align=*/0, /*ByVal=*/false);
808 }
809 
classifyReturnType(QualType RetTy,CCState & State) const810 ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy, CCState &State) const {
811   if (RetTy->isVoidType())
812     return ABIArgInfo::getIgnore();
813 
814   const Type *Base = nullptr;
815   uint64_t NumElts = 0;
816   if (State.CC == llvm::CallingConv::X86_VectorCall &&
817       isHomogeneousAggregate(RetTy, Base, NumElts)) {
818     // The LLVM struct type for such an aggregate should lower properly.
819     return ABIArgInfo::getDirect();
820   }
821 
822   if (const VectorType *VT = RetTy->getAs<VectorType>()) {
823     // On Darwin, some vectors are returned in registers.
824     if (IsDarwinVectorABI) {
825       uint64_t Size = getContext().getTypeSize(RetTy);
826 
827       // 128-bit vectors are a special case; they are returned in
828       // registers and we need to make sure to pick a type the LLVM
829       // backend will like.
830       if (Size == 128)
831         return ABIArgInfo::getDirect(llvm::VectorType::get(
832                   llvm::Type::getInt64Ty(getVMContext()), 2));
833 
834       // Always return in register if it fits in a general purpose
835       // register, or if it is 64 bits and has a single element.
836       if ((Size == 8 || Size == 16 || Size == 32) ||
837           (Size == 64 && VT->getNumElements() == 1))
838         return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
839                                                             Size));
840 
841       return getIndirectReturnResult(State);
842     }
843 
844     return ABIArgInfo::getDirect();
845   }
846 
847   if (isAggregateTypeForABI(RetTy)) {
848     if (const RecordType *RT = RetTy->getAs<RecordType>()) {
849       // Structures with flexible arrays are always indirect.
850       if (RT->getDecl()->hasFlexibleArrayMember())
851         return getIndirectReturnResult(State);
852     }
853 
854     // If specified, structs and unions are always indirect.
855     if (!IsSmallStructInRegABI && !RetTy->isAnyComplexType())
856       return getIndirectReturnResult(State);
857 
858     // Small structures which are register sized are generally returned
859     // in a register.
860     if (shouldReturnTypeInRegister(RetTy, getContext())) {
861       uint64_t Size = getContext().getTypeSize(RetTy);
862 
863       // As a special-case, if the struct is a "single-element" struct, and
864       // the field is of type "float" or "double", return it in a
865       // floating-point register. (MSVC does not apply this special case.)
866       // We apply a similar transformation for pointer types to improve the
867       // quality of the generated IR.
868       if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext()))
869         if ((!IsWin32StructABI && SeltTy->isRealFloatingType())
870             || SeltTy->hasPointerRepresentation())
871           return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
872 
873       // FIXME: We should be able to narrow this integer in cases with dead
874       // padding.
875       return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),Size));
876     }
877 
878     return getIndirectReturnResult(State);
879   }
880 
881   // Treat an enum type as its underlying type.
882   if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
883     RetTy = EnumTy->getDecl()->getIntegerType();
884 
885   return (RetTy->isPromotableIntegerType() ?
886           ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
887 }
888 
isSSEVectorType(ASTContext & Context,QualType Ty)889 static bool isSSEVectorType(ASTContext &Context, QualType Ty) {
890   return Ty->getAs<VectorType>() && Context.getTypeSize(Ty) == 128;
891 }
892 
isRecordWithSSEVectorType(ASTContext & Context,QualType Ty)893 static bool isRecordWithSSEVectorType(ASTContext &Context, QualType Ty) {
894   const RecordType *RT = Ty->getAs<RecordType>();
895   if (!RT)
896     return 0;
897   const RecordDecl *RD = RT->getDecl();
898 
899   // If this is a C++ record, check the bases first.
900   if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
901     for (const auto &I : CXXRD->bases())
902       if (!isRecordWithSSEVectorType(Context, I.getType()))
903         return false;
904 
905   for (const auto *i : RD->fields()) {
906     QualType FT = i->getType();
907 
908     if (isSSEVectorType(Context, FT))
909       return true;
910 
911     if (isRecordWithSSEVectorType(Context, FT))
912       return true;
913   }
914 
915   return false;
916 }
917 
getTypeStackAlignInBytes(QualType Ty,unsigned Align) const918 unsigned X86_32ABIInfo::getTypeStackAlignInBytes(QualType Ty,
919                                                  unsigned Align) const {
920   // Otherwise, if the alignment is less than or equal to the minimum ABI
921   // alignment, just use the default; the backend will handle this.
922   if (Align <= MinABIStackAlignInBytes)
923     return 0; // Use default alignment.
924 
925   // On non-Darwin, the stack type alignment is always 4.
926   if (!IsDarwinVectorABI) {
927     // Set explicit alignment, since we may need to realign the top.
928     return MinABIStackAlignInBytes;
929   }
930 
931   // Otherwise, if the type contains an SSE vector type, the alignment is 16.
932   if (Align >= 16 && (isSSEVectorType(getContext(), Ty) ||
933                       isRecordWithSSEVectorType(getContext(), Ty)))
934     return 16;
935 
936   return MinABIStackAlignInBytes;
937 }
938 
getIndirectResult(QualType Ty,bool ByVal,CCState & State) const939 ABIArgInfo X86_32ABIInfo::getIndirectResult(QualType Ty, bool ByVal,
940                                             CCState &State) const {
941   if (!ByVal) {
942     if (State.FreeRegs) {
943       --State.FreeRegs; // Non-byval indirects just use one pointer.
944       return ABIArgInfo::getIndirectInReg(0, false);
945     }
946     return ABIArgInfo::getIndirect(0, false);
947   }
948 
949   // Compute the byval alignment.
950   unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8;
951   unsigned StackAlign = getTypeStackAlignInBytes(Ty, TypeAlign);
952   if (StackAlign == 0)
953     return ABIArgInfo::getIndirect(4, /*ByVal=*/true);
954 
955   // If the stack alignment is less than the type alignment, realign the
956   // argument.
957   bool Realign = TypeAlign > StackAlign;
958   return ABIArgInfo::getIndirect(StackAlign, /*ByVal=*/true, Realign);
959 }
960 
classify(QualType Ty) const961 X86_32ABIInfo::Class X86_32ABIInfo::classify(QualType Ty) const {
962   const Type *T = isSingleElementStruct(Ty, getContext());
963   if (!T)
964     T = Ty.getTypePtr();
965 
966   if (const BuiltinType *BT = T->getAs<BuiltinType>()) {
967     BuiltinType::Kind K = BT->getKind();
968     if (K == BuiltinType::Float || K == BuiltinType::Double)
969       return Float;
970   }
971   return Integer;
972 }
973 
shouldUseInReg(QualType Ty,CCState & State,bool & NeedsPadding) const974 bool X86_32ABIInfo::shouldUseInReg(QualType Ty, CCState &State,
975                                    bool &NeedsPadding) const {
976   NeedsPadding = false;
977   Class C = classify(Ty);
978   if (C == Float)
979     return false;
980 
981   unsigned Size = getContext().getTypeSize(Ty);
982   unsigned SizeInRegs = (Size + 31) / 32;
983 
984   if (SizeInRegs == 0)
985     return false;
986 
987   if (SizeInRegs > State.FreeRegs) {
988     State.FreeRegs = 0;
989     return false;
990   }
991 
992   State.FreeRegs -= SizeInRegs;
993 
994   if (State.CC == llvm::CallingConv::X86_FastCall ||
995       State.CC == llvm::CallingConv::X86_VectorCall) {
996     if (Size > 32)
997       return false;
998 
999     if (Ty->isIntegralOrEnumerationType())
1000       return true;
1001 
1002     if (Ty->isPointerType())
1003       return true;
1004 
1005     if (Ty->isReferenceType())
1006       return true;
1007 
1008     if (State.FreeRegs)
1009       NeedsPadding = true;
1010 
1011     return false;
1012   }
1013 
1014   return true;
1015 }
1016 
classifyArgumentType(QualType Ty,CCState & State) const1017 ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty,
1018                                                CCState &State) const {
1019   // FIXME: Set alignment on indirect arguments.
1020 
1021   Ty = useFirstFieldIfTransparentUnion(Ty);
1022 
1023   // Check with the C++ ABI first.
1024   const RecordType *RT = Ty->getAs<RecordType>();
1025   if (RT) {
1026     CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI());
1027     if (RAA == CGCXXABI::RAA_Indirect) {
1028       return getIndirectResult(Ty, false, State);
1029     } else if (RAA == CGCXXABI::RAA_DirectInMemory) {
1030       // The field index doesn't matter, we'll fix it up later.
1031       return ABIArgInfo::getInAlloca(/*FieldIndex=*/0);
1032     }
1033   }
1034 
1035   // vectorcall adds the concept of a homogenous vector aggregate, similar
1036   // to other targets.
1037   const Type *Base = nullptr;
1038   uint64_t NumElts = 0;
1039   if (State.CC == llvm::CallingConv::X86_VectorCall &&
1040       isHomogeneousAggregate(Ty, Base, NumElts)) {
1041     if (State.FreeSSERegs >= NumElts) {
1042       State.FreeSSERegs -= NumElts;
1043       if (Ty->isBuiltinType() || Ty->isVectorType())
1044         return ABIArgInfo::getDirect();
1045       return ABIArgInfo::getExpand();
1046     }
1047     return getIndirectResult(Ty, /*ByVal=*/false, State);
1048   }
1049 
1050   if (isAggregateTypeForABI(Ty)) {
1051     if (RT) {
1052       // Structs are always byval on win32, regardless of what they contain.
1053       if (IsWin32StructABI)
1054         return getIndirectResult(Ty, true, State);
1055 
1056       // Structures with flexible arrays are always indirect.
1057       if (RT->getDecl()->hasFlexibleArrayMember())
1058         return getIndirectResult(Ty, true, State);
1059     }
1060 
1061     // Ignore empty structs/unions.
1062     if (isEmptyRecord(getContext(), Ty, true))
1063       return ABIArgInfo::getIgnore();
1064 
1065     llvm::LLVMContext &LLVMContext = getVMContext();
1066     llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext);
1067     bool NeedsPadding;
1068     if (shouldUseInReg(Ty, State, NeedsPadding)) {
1069       unsigned SizeInRegs = (getContext().getTypeSize(Ty) + 31) / 32;
1070       SmallVector<llvm::Type*, 3> Elements(SizeInRegs, Int32);
1071       llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements);
1072       return ABIArgInfo::getDirectInReg(Result);
1073     }
1074     llvm::IntegerType *PaddingType = NeedsPadding ? Int32 : nullptr;
1075 
1076     // Expand small (<= 128-bit) record types when we know that the stack layout
1077     // of those arguments will match the struct. This is important because the
1078     // LLVM backend isn't smart enough to remove byval, which inhibits many
1079     // optimizations.
1080     if (getContext().getTypeSize(Ty) <= 4*32 &&
1081         canExpandIndirectArgument(Ty, getContext()))
1082       return ABIArgInfo::getExpandWithPadding(
1083           State.CC == llvm::CallingConv::X86_FastCall ||
1084               State.CC == llvm::CallingConv::X86_VectorCall,
1085           PaddingType);
1086 
1087     return getIndirectResult(Ty, true, State);
1088   }
1089 
1090   if (const VectorType *VT = Ty->getAs<VectorType>()) {
1091     // On Darwin, some vectors are passed in memory, we handle this by passing
1092     // it as an i8/i16/i32/i64.
1093     if (IsDarwinVectorABI) {
1094       uint64_t Size = getContext().getTypeSize(Ty);
1095       if ((Size == 8 || Size == 16 || Size == 32) ||
1096           (Size == 64 && VT->getNumElements() == 1))
1097         return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
1098                                                             Size));
1099     }
1100 
1101     if (IsX86_MMXType(CGT.ConvertType(Ty)))
1102       return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 64));
1103 
1104     return ABIArgInfo::getDirect();
1105   }
1106 
1107 
1108   if (const EnumType *EnumTy = Ty->getAs<EnumType>())
1109     Ty = EnumTy->getDecl()->getIntegerType();
1110 
1111   bool NeedsPadding;
1112   bool InReg = shouldUseInReg(Ty, State, NeedsPadding);
1113 
1114   if (Ty->isPromotableIntegerType()) {
1115     if (InReg)
1116       return ABIArgInfo::getExtendInReg();
1117     return ABIArgInfo::getExtend();
1118   }
1119   if (InReg)
1120     return ABIArgInfo::getDirectInReg();
1121   return ABIArgInfo::getDirect();
1122 }
1123 
computeInfo(CGFunctionInfo & FI) const1124 void X86_32ABIInfo::computeInfo(CGFunctionInfo &FI) const {
1125   CCState State(FI.getCallingConvention());
1126   if (State.CC == llvm::CallingConv::X86_FastCall)
1127     State.FreeRegs = 2;
1128   else if (State.CC == llvm::CallingConv::X86_VectorCall) {
1129     State.FreeRegs = 2;
1130     State.FreeSSERegs = 6;
1131   } else if (FI.getHasRegParm())
1132     State.FreeRegs = FI.getRegParm();
1133   else
1134     State.FreeRegs = DefaultNumRegisterParameters;
1135 
1136   if (!getCXXABI().classifyReturnType(FI)) {
1137     FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), State);
1138   } else if (FI.getReturnInfo().isIndirect()) {
1139     // The C++ ABI is not aware of register usage, so we have to check if the
1140     // return value was sret and put it in a register ourselves if appropriate.
1141     if (State.FreeRegs) {
1142       --State.FreeRegs;  // The sret parameter consumes a register.
1143       FI.getReturnInfo().setInReg(true);
1144     }
1145   }
1146 
1147   // The chain argument effectively gives us another free register.
1148   if (FI.isChainCall())
1149     ++State.FreeRegs;
1150 
1151   bool UsedInAlloca = false;
1152   for (auto &I : FI.arguments()) {
1153     I.info = classifyArgumentType(I.type, State);
1154     UsedInAlloca |= (I.info.getKind() == ABIArgInfo::InAlloca);
1155   }
1156 
1157   // If we needed to use inalloca for any argument, do a second pass and rewrite
1158   // all the memory arguments to use inalloca.
1159   if (UsedInAlloca)
1160     rewriteWithInAlloca(FI);
1161 }
1162 
1163 void
addFieldToArgStruct(SmallVector<llvm::Type *,6> & FrameFields,unsigned & StackOffset,ABIArgInfo & Info,QualType Type) const1164 X86_32ABIInfo::addFieldToArgStruct(SmallVector<llvm::Type *, 6> &FrameFields,
1165                                    unsigned &StackOffset,
1166                                    ABIArgInfo &Info, QualType Type) const {
1167   assert(StackOffset % 4U == 0 && "unaligned inalloca struct");
1168   Info = ABIArgInfo::getInAlloca(FrameFields.size());
1169   FrameFields.push_back(CGT.ConvertTypeForMem(Type));
1170   StackOffset += getContext().getTypeSizeInChars(Type).getQuantity();
1171 
1172   // Insert padding bytes to respect alignment.  For x86_32, each argument is 4
1173   // byte aligned.
1174   if (StackOffset % 4U) {
1175     unsigned OldOffset = StackOffset;
1176     StackOffset = llvm::RoundUpToAlignment(StackOffset, 4U);
1177     unsigned NumBytes = StackOffset - OldOffset;
1178     assert(NumBytes);
1179     llvm::Type *Ty = llvm::Type::getInt8Ty(getVMContext());
1180     Ty = llvm::ArrayType::get(Ty, NumBytes);
1181     FrameFields.push_back(Ty);
1182   }
1183 }
1184 
isArgInAlloca(const ABIArgInfo & Info)1185 static bool isArgInAlloca(const ABIArgInfo &Info) {
1186   // Leave ignored and inreg arguments alone.
1187   switch (Info.getKind()) {
1188   case ABIArgInfo::InAlloca:
1189     return true;
1190   case ABIArgInfo::Indirect:
1191     assert(Info.getIndirectByVal());
1192     return true;
1193   case ABIArgInfo::Ignore:
1194     return false;
1195   case ABIArgInfo::Direct:
1196   case ABIArgInfo::Extend:
1197   case ABIArgInfo::Expand:
1198     if (Info.getInReg())
1199       return false;
1200     return true;
1201   }
1202   llvm_unreachable("invalid enum");
1203 }
1204 
rewriteWithInAlloca(CGFunctionInfo & FI) const1205 void X86_32ABIInfo::rewriteWithInAlloca(CGFunctionInfo &FI) const {
1206   assert(IsWin32StructABI && "inalloca only supported on win32");
1207 
1208   // Build a packed struct type for all of the arguments in memory.
1209   SmallVector<llvm::Type *, 6> FrameFields;
1210 
1211   unsigned StackOffset = 0;
1212   CGFunctionInfo::arg_iterator I = FI.arg_begin(), E = FI.arg_end();
1213 
1214   // Put 'this' into the struct before 'sret', if necessary.
1215   bool IsThisCall =
1216       FI.getCallingConvention() == llvm::CallingConv::X86_ThisCall;
1217   ABIArgInfo &Ret = FI.getReturnInfo();
1218   if (Ret.isIndirect() && Ret.isSRetAfterThis() && !IsThisCall &&
1219       isArgInAlloca(I->info)) {
1220     addFieldToArgStruct(FrameFields, StackOffset, I->info, I->type);
1221     ++I;
1222   }
1223 
1224   // Put the sret parameter into the inalloca struct if it's in memory.
1225   if (Ret.isIndirect() && !Ret.getInReg()) {
1226     CanQualType PtrTy = getContext().getPointerType(FI.getReturnType());
1227     addFieldToArgStruct(FrameFields, StackOffset, Ret, PtrTy);
1228     // On Windows, the hidden sret parameter is always returned in eax.
1229     Ret.setInAllocaSRet(IsWin32StructABI);
1230   }
1231 
1232   // Skip the 'this' parameter in ecx.
1233   if (IsThisCall)
1234     ++I;
1235 
1236   // Put arguments passed in memory into the struct.
1237   for (; I != E; ++I) {
1238     if (isArgInAlloca(I->info))
1239       addFieldToArgStruct(FrameFields, StackOffset, I->info, I->type);
1240   }
1241 
1242   FI.setArgStruct(llvm::StructType::get(getVMContext(), FrameFields,
1243                                         /*isPacked=*/true));
1244 }
1245 
EmitVAArg(llvm::Value * VAListAddr,QualType Ty,CodeGenFunction & CGF) const1246 llvm::Value *X86_32ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
1247                                       CodeGenFunction &CGF) const {
1248   llvm::Type *BPP = CGF.Int8PtrPtrTy;
1249 
1250   CGBuilderTy &Builder = CGF.Builder;
1251   llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
1252                                                        "ap");
1253   llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
1254 
1255   // Compute if the address needs to be aligned
1256   unsigned Align = CGF.getContext().getTypeAlignInChars(Ty).getQuantity();
1257   Align = getTypeStackAlignInBytes(Ty, Align);
1258   Align = std::max(Align, 4U);
1259   if (Align > 4) {
1260     // addr = (addr + align - 1) & -align;
1261     llvm::Value *Offset =
1262       llvm::ConstantInt::get(CGF.Int32Ty, Align - 1);
1263     Addr = CGF.Builder.CreateGEP(Addr, Offset);
1264     llvm::Value *AsInt = CGF.Builder.CreatePtrToInt(Addr,
1265                                                     CGF.Int32Ty);
1266     llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int32Ty, -Align);
1267     Addr = CGF.Builder.CreateIntToPtr(CGF.Builder.CreateAnd(AsInt, Mask),
1268                                       Addr->getType(),
1269                                       "ap.cur.aligned");
1270   }
1271 
1272   llvm::Type *PTy =
1273     llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
1274   llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
1275 
1276   uint64_t Offset =
1277     llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, Align);
1278   llvm::Value *NextAddr =
1279     Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset),
1280                       "ap.next");
1281   Builder.CreateStore(NextAddr, VAListAddrAsBPP);
1282 
1283   return AddrTyped;
1284 }
1285 
isStructReturnInRegABI(const llvm::Triple & Triple,const CodeGenOptions & Opts)1286 bool X86_32TargetCodeGenInfo::isStructReturnInRegABI(
1287     const llvm::Triple &Triple, const CodeGenOptions &Opts) {
1288   assert(Triple.getArch() == llvm::Triple::x86);
1289 
1290   switch (Opts.getStructReturnConvention()) {
1291   case CodeGenOptions::SRCK_Default:
1292     break;
1293   case CodeGenOptions::SRCK_OnStack:  // -fpcc-struct-return
1294     return false;
1295   case CodeGenOptions::SRCK_InRegs:  // -freg-struct-return
1296     return true;
1297   }
1298 
1299   if (Triple.isOSDarwin())
1300     return true;
1301 
1302   switch (Triple.getOS()) {
1303   case llvm::Triple::DragonFly:
1304   case llvm::Triple::FreeBSD:
1305   case llvm::Triple::OpenBSD:
1306   case llvm::Triple::Bitrig:
1307   case llvm::Triple::Win32:
1308     return true;
1309   default:
1310     return false;
1311   }
1312 }
1313 
SetTargetAttributes(const Decl * D,llvm::GlobalValue * GV,CodeGen::CodeGenModule & CGM) const1314 void X86_32TargetCodeGenInfo::SetTargetAttributes(const Decl *D,
1315                                                   llvm::GlobalValue *GV,
1316                                             CodeGen::CodeGenModule &CGM) const {
1317   if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
1318     if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
1319       // Get the LLVM function.
1320       llvm::Function *Fn = cast<llvm::Function>(GV);
1321 
1322       // Now add the 'alignstack' attribute with a value of 16.
1323       llvm::AttrBuilder B;
1324       B.addStackAlignmentAttr(16);
1325       Fn->addAttributes(llvm::AttributeSet::FunctionIndex,
1326                       llvm::AttributeSet::get(CGM.getLLVMContext(),
1327                                               llvm::AttributeSet::FunctionIndex,
1328                                               B));
1329     }
1330   }
1331 }
1332 
initDwarfEHRegSizeTable(CodeGen::CodeGenFunction & CGF,llvm::Value * Address) const1333 bool X86_32TargetCodeGenInfo::initDwarfEHRegSizeTable(
1334                                                CodeGen::CodeGenFunction &CGF,
1335                                                llvm::Value *Address) const {
1336   CodeGen::CGBuilderTy &Builder = CGF.Builder;
1337 
1338   llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4);
1339 
1340   // 0-7 are the eight integer registers;  the order is different
1341   //   on Darwin (for EH), but the range is the same.
1342   // 8 is %eip.
1343   AssignToArrayRange(Builder, Address, Four8, 0, 8);
1344 
1345   if (CGF.CGM.getTarget().getTriple().isOSDarwin()) {
1346     // 12-16 are st(0..4).  Not sure why we stop at 4.
1347     // These have size 16, which is sizeof(long double) on
1348     // platforms with 8-byte alignment for that type.
1349     llvm::Value *Sixteen8 = llvm::ConstantInt::get(CGF.Int8Ty, 16);
1350     AssignToArrayRange(Builder, Address, Sixteen8, 12, 16);
1351 
1352   } else {
1353     // 9 is %eflags, which doesn't get a size on Darwin for some
1354     // reason.
1355     Builder.CreateStore(Four8, Builder.CreateConstInBoundsGEP1_32(Address, 9));
1356 
1357     // 11-16 are st(0..5).  Not sure why we stop at 5.
1358     // These have size 12, which is sizeof(long double) on
1359     // platforms with 4-byte alignment for that type.
1360     llvm::Value *Twelve8 = llvm::ConstantInt::get(CGF.Int8Ty, 12);
1361     AssignToArrayRange(Builder, Address, Twelve8, 11, 16);
1362   }
1363 
1364   return false;
1365 }
1366 
1367 //===----------------------------------------------------------------------===//
1368 // X86-64 ABI Implementation
1369 //===----------------------------------------------------------------------===//
1370 
1371 
1372 namespace {
1373 /// X86_64ABIInfo - The X86_64 ABI information.
1374 class X86_64ABIInfo : public ABIInfo {
1375   enum Class {
1376     Integer = 0,
1377     SSE,
1378     SSEUp,
1379     X87,
1380     X87Up,
1381     ComplexX87,
1382     NoClass,
1383     Memory
1384   };
1385 
1386   /// merge - Implement the X86_64 ABI merging algorithm.
1387   ///
1388   /// Merge an accumulating classification \arg Accum with a field
1389   /// classification \arg Field.
1390   ///
1391   /// \param Accum - The accumulating classification. This should
1392   /// always be either NoClass or the result of a previous merge
1393   /// call. In addition, this should never be Memory (the caller
1394   /// should just return Memory for the aggregate).
1395   static Class merge(Class Accum, Class Field);
1396 
1397   /// postMerge - Implement the X86_64 ABI post merging algorithm.
1398   ///
1399   /// Post merger cleanup, reduces a malformed Hi and Lo pair to
1400   /// final MEMORY or SSE classes when necessary.
1401   ///
1402   /// \param AggregateSize - The size of the current aggregate in
1403   /// the classification process.
1404   ///
1405   /// \param Lo - The classification for the parts of the type
1406   /// residing in the low word of the containing object.
1407   ///
1408   /// \param Hi - The classification for the parts of the type
1409   /// residing in the higher words of the containing object.
1410   ///
1411   void postMerge(unsigned AggregateSize, Class &Lo, Class &Hi) const;
1412 
1413   /// classify - Determine the x86_64 register classes in which the
1414   /// given type T should be passed.
1415   ///
1416   /// \param Lo - The classification for the parts of the type
1417   /// residing in the low word of the containing object.
1418   ///
1419   /// \param Hi - The classification for the parts of the type
1420   /// residing in the high word of the containing object.
1421   ///
1422   /// \param OffsetBase - The bit offset of this type in the
1423   /// containing object.  Some parameters are classified different
1424   /// depending on whether they straddle an eightbyte boundary.
1425   ///
1426   /// \param isNamedArg - Whether the argument in question is a "named"
1427   /// argument, as used in AMD64-ABI 3.5.7.
1428   ///
1429   /// If a word is unused its result will be NoClass; if a type should
1430   /// be passed in Memory then at least the classification of \arg Lo
1431   /// will be Memory.
1432   ///
1433   /// The \arg Lo class will be NoClass iff the argument is ignored.
1434   ///
1435   /// If the \arg Lo class is ComplexX87, then the \arg Hi class will
1436   /// also be ComplexX87.
1437   void classify(QualType T, uint64_t OffsetBase, Class &Lo, Class &Hi,
1438                 bool isNamedArg) const;
1439 
1440   llvm::Type *GetByteVectorType(QualType Ty) const;
1441   llvm::Type *GetSSETypeAtOffset(llvm::Type *IRType,
1442                                  unsigned IROffset, QualType SourceTy,
1443                                  unsigned SourceOffset) const;
1444   llvm::Type *GetINTEGERTypeAtOffset(llvm::Type *IRType,
1445                                      unsigned IROffset, QualType SourceTy,
1446                                      unsigned SourceOffset) const;
1447 
1448   /// getIndirectResult - Give a source type \arg Ty, return a suitable result
1449   /// such that the argument will be returned in memory.
1450   ABIArgInfo getIndirectReturnResult(QualType Ty) const;
1451 
1452   /// getIndirectResult - Give a source type \arg Ty, return a suitable result
1453   /// such that the argument will be passed in memory.
1454   ///
1455   /// \param freeIntRegs - The number of free integer registers remaining
1456   /// available.
1457   ABIArgInfo getIndirectResult(QualType Ty, unsigned freeIntRegs) const;
1458 
1459   ABIArgInfo classifyReturnType(QualType RetTy) const;
1460 
1461   ABIArgInfo classifyArgumentType(QualType Ty,
1462                                   unsigned freeIntRegs,
1463                                   unsigned &neededInt,
1464                                   unsigned &neededSSE,
1465                                   bool isNamedArg) const;
1466 
1467   bool IsIllegalVectorType(QualType Ty) const;
1468 
1469   /// The 0.98 ABI revision clarified a lot of ambiguities,
1470   /// unfortunately in ways that were not always consistent with
1471   /// certain previous compilers.  In particular, platforms which
1472   /// required strict binary compatibility with older versions of GCC
1473   /// may need to exempt themselves.
honorsRevision0_98() const1474   bool honorsRevision0_98() const {
1475     return !getTarget().getTriple().isOSDarwin();
1476   }
1477 
1478   bool HasAVX;
1479   // Some ABIs (e.g. X32 ABI and Native Client OS) use 32 bit pointers on
1480   // 64-bit hardware.
1481   bool Has64BitPointers;
1482 
1483 public:
X86_64ABIInfo(CodeGen::CodeGenTypes & CGT,bool hasavx)1484   X86_64ABIInfo(CodeGen::CodeGenTypes &CGT, bool hasavx) :
1485       ABIInfo(CGT), HasAVX(hasavx),
1486       Has64BitPointers(CGT.getDataLayout().getPointerSize(0) == 8) {
1487   }
1488 
isPassedUsingAVXType(QualType type) const1489   bool isPassedUsingAVXType(QualType type) const {
1490     unsigned neededInt, neededSSE;
1491     // The freeIntRegs argument doesn't matter here.
1492     ABIArgInfo info = classifyArgumentType(type, 0, neededInt, neededSSE,
1493                                            /*isNamedArg*/true);
1494     if (info.isDirect()) {
1495       llvm::Type *ty = info.getCoerceToType();
1496       if (llvm::VectorType *vectorTy = dyn_cast_or_null<llvm::VectorType>(ty))
1497         return (vectorTy->getBitWidth() > 128);
1498     }
1499     return false;
1500   }
1501 
1502   void computeInfo(CGFunctionInfo &FI) const override;
1503 
1504   llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
1505                          CodeGenFunction &CGF) const override;
1506 };
1507 
1508 /// WinX86_64ABIInfo - The Windows X86_64 ABI information.
1509 class WinX86_64ABIInfo : public ABIInfo {
1510 
1511   ABIArgInfo classify(QualType Ty, unsigned &FreeSSERegs,
1512                       bool IsReturnType) const;
1513 
1514 public:
WinX86_64ABIInfo(CodeGen::CodeGenTypes & CGT)1515   WinX86_64ABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {}
1516 
1517   void computeInfo(CGFunctionInfo &FI) const override;
1518 
1519   llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
1520                          CodeGenFunction &CGF) const override;
1521 
isHomogeneousAggregateBaseType(QualType Ty) const1522   bool isHomogeneousAggregateBaseType(QualType Ty) const override {
1523     // FIXME: Assumes vectorcall is in use.
1524     return isX86VectorTypeForVectorCall(getContext(), Ty);
1525   }
1526 
isHomogeneousAggregateSmallEnough(const Type * Ty,uint64_t NumMembers) const1527   bool isHomogeneousAggregateSmallEnough(const Type *Ty,
1528                                          uint64_t NumMembers) const override {
1529     // FIXME: Assumes vectorcall is in use.
1530     return isX86VectorCallAggregateSmallEnough(NumMembers);
1531   }
1532 };
1533 
1534 class X86_64TargetCodeGenInfo : public TargetCodeGenInfo {
1535   bool HasAVX;
1536 public:
X86_64TargetCodeGenInfo(CodeGen::CodeGenTypes & CGT,bool HasAVX)1537   X86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool HasAVX)
1538       : TargetCodeGenInfo(new X86_64ABIInfo(CGT, HasAVX)), HasAVX(HasAVX) {}
1539 
getABIInfo() const1540   const X86_64ABIInfo &getABIInfo() const {
1541     return static_cast<const X86_64ABIInfo&>(TargetCodeGenInfo::getABIInfo());
1542   }
1543 
getDwarfEHStackPointer(CodeGen::CodeGenModule & CGM) const1544   int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
1545     return 7;
1546   }
1547 
initDwarfEHRegSizeTable(CodeGen::CodeGenFunction & CGF,llvm::Value * Address) const1548   bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
1549                                llvm::Value *Address) const override {
1550     llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8);
1551 
1552     // 0-15 are the 16 integer registers.
1553     // 16 is %rip.
1554     AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16);
1555     return false;
1556   }
1557 
adjustInlineAsmType(CodeGen::CodeGenFunction & CGF,StringRef Constraint,llvm::Type * Ty) const1558   llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
1559                                   StringRef Constraint,
1560                                   llvm::Type* Ty) const override {
1561     return X86AdjustInlineAsmType(CGF, Constraint, Ty);
1562   }
1563 
isNoProtoCallVariadic(const CallArgList & args,const FunctionNoProtoType * fnType) const1564   bool isNoProtoCallVariadic(const CallArgList &args,
1565                              const FunctionNoProtoType *fnType) const override {
1566     // The default CC on x86-64 sets %al to the number of SSA
1567     // registers used, and GCC sets this when calling an unprototyped
1568     // function, so we override the default behavior.  However, don't do
1569     // that when AVX types are involved: the ABI explicitly states it is
1570     // undefined, and it doesn't work in practice because of how the ABI
1571     // defines varargs anyway.
1572     if (fnType->getCallConv() == CC_C) {
1573       bool HasAVXType = false;
1574       for (CallArgList::const_iterator
1575              it = args.begin(), ie = args.end(); it != ie; ++it) {
1576         if (getABIInfo().isPassedUsingAVXType(it->Ty)) {
1577           HasAVXType = true;
1578           break;
1579         }
1580       }
1581 
1582       if (!HasAVXType)
1583         return true;
1584     }
1585 
1586     return TargetCodeGenInfo::isNoProtoCallVariadic(args, fnType);
1587   }
1588 
1589   llvm::Constant *
getUBSanFunctionSignature(CodeGen::CodeGenModule & CGM) const1590   getUBSanFunctionSignature(CodeGen::CodeGenModule &CGM) const override {
1591     unsigned Sig = (0xeb << 0) |  // jmp rel8
1592                    (0x0a << 8) |  //           .+0x0c
1593                    ('F' << 16) |
1594                    ('T' << 24);
1595     return llvm::ConstantInt::get(CGM.Int32Ty, Sig);
1596   }
1597 
getOpenMPSimdDefaultAlignment(QualType) const1598   unsigned getOpenMPSimdDefaultAlignment(QualType) const override {
1599     return HasAVX ? 32 : 16;
1600   }
1601 };
1602 
qualifyWindowsLibrary(llvm::StringRef Lib)1603 static std::string qualifyWindowsLibrary(llvm::StringRef Lib) {
1604   // If the argument does not end in .lib, automatically add the suffix. This
1605   // matches the behavior of MSVC.
1606   std::string ArgStr = Lib;
1607   if (!Lib.endswith_lower(".lib"))
1608     ArgStr += ".lib";
1609   return ArgStr;
1610 }
1611 
1612 class WinX86_32TargetCodeGenInfo : public X86_32TargetCodeGenInfo {
1613 public:
WinX86_32TargetCodeGenInfo(CodeGen::CodeGenTypes & CGT,bool d,bool p,bool w,unsigned RegParms)1614   WinX86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT,
1615         bool d, bool p, bool w, unsigned RegParms)
1616     : X86_32TargetCodeGenInfo(CGT, d, p, w, RegParms) {}
1617 
getDependentLibraryOption(llvm::StringRef Lib,llvm::SmallString<24> & Opt) const1618   void getDependentLibraryOption(llvm::StringRef Lib,
1619                                  llvm::SmallString<24> &Opt) const override {
1620     Opt = "/DEFAULTLIB:";
1621     Opt += qualifyWindowsLibrary(Lib);
1622   }
1623 
getDetectMismatchOption(llvm::StringRef Name,llvm::StringRef Value,llvm::SmallString<32> & Opt) const1624   void getDetectMismatchOption(llvm::StringRef Name,
1625                                llvm::StringRef Value,
1626                                llvm::SmallString<32> &Opt) const override {
1627     Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\"";
1628   }
1629 };
1630 
1631 class WinX86_64TargetCodeGenInfo : public TargetCodeGenInfo {
1632   bool HasAVX;
1633 public:
WinX86_64TargetCodeGenInfo(CodeGen::CodeGenTypes & CGT,bool HasAVX)1634   WinX86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool HasAVX)
1635     : TargetCodeGenInfo(new WinX86_64ABIInfo(CGT)), HasAVX(HasAVX) {}
1636 
getDwarfEHStackPointer(CodeGen::CodeGenModule & CGM) const1637   int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
1638     return 7;
1639   }
1640 
initDwarfEHRegSizeTable(CodeGen::CodeGenFunction & CGF,llvm::Value * Address) const1641   bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
1642                                llvm::Value *Address) const override {
1643     llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8);
1644 
1645     // 0-15 are the 16 integer registers.
1646     // 16 is %rip.
1647     AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16);
1648     return false;
1649   }
1650 
getDependentLibraryOption(llvm::StringRef Lib,llvm::SmallString<24> & Opt) const1651   void getDependentLibraryOption(llvm::StringRef Lib,
1652                                  llvm::SmallString<24> &Opt) const override {
1653     Opt = "/DEFAULTLIB:";
1654     Opt += qualifyWindowsLibrary(Lib);
1655   }
1656 
getDetectMismatchOption(llvm::StringRef Name,llvm::StringRef Value,llvm::SmallString<32> & Opt) const1657   void getDetectMismatchOption(llvm::StringRef Name,
1658                                llvm::StringRef Value,
1659                                llvm::SmallString<32> &Opt) const override {
1660     Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\"";
1661   }
1662 
getOpenMPSimdDefaultAlignment(QualType) const1663   unsigned getOpenMPSimdDefaultAlignment(QualType) const override {
1664     return HasAVX ? 32 : 16;
1665   }
1666 };
1667 
1668 }
1669 
postMerge(unsigned AggregateSize,Class & Lo,Class & Hi) const1670 void X86_64ABIInfo::postMerge(unsigned AggregateSize, Class &Lo,
1671                               Class &Hi) const {
1672   // AMD64-ABI 3.2.3p2: Rule 5. Then a post merger cleanup is done:
1673   //
1674   // (a) If one of the classes is Memory, the whole argument is passed in
1675   //     memory.
1676   //
1677   // (b) If X87UP is not preceded by X87, the whole argument is passed in
1678   //     memory.
1679   //
1680   // (c) If the size of the aggregate exceeds two eightbytes and the first
1681   //     eightbyte isn't SSE or any other eightbyte isn't SSEUP, the whole
1682   //     argument is passed in memory. NOTE: This is necessary to keep the
1683   //     ABI working for processors that don't support the __m256 type.
1684   //
1685   // (d) If SSEUP is not preceded by SSE or SSEUP, it is converted to SSE.
1686   //
1687   // Some of these are enforced by the merging logic.  Others can arise
1688   // only with unions; for example:
1689   //   union { _Complex double; unsigned; }
1690   //
1691   // Note that clauses (b) and (c) were added in 0.98.
1692   //
1693   if (Hi == Memory)
1694     Lo = Memory;
1695   if (Hi == X87Up && Lo != X87 && honorsRevision0_98())
1696     Lo = Memory;
1697   if (AggregateSize > 128 && (Lo != SSE || Hi != SSEUp))
1698     Lo = Memory;
1699   if (Hi == SSEUp && Lo != SSE)
1700     Hi = SSE;
1701 }
1702 
merge(Class Accum,Class Field)1703 X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum, Class Field) {
1704   // AMD64-ABI 3.2.3p2: Rule 4. Each field of an object is
1705   // classified recursively so that always two fields are
1706   // considered. The resulting class is calculated according to
1707   // the classes of the fields in the eightbyte:
1708   //
1709   // (a) If both classes are equal, this is the resulting class.
1710   //
1711   // (b) If one of the classes is NO_CLASS, the resulting class is
1712   // the other class.
1713   //
1714   // (c) If one of the classes is MEMORY, the result is the MEMORY
1715   // class.
1716   //
1717   // (d) If one of the classes is INTEGER, the result is the
1718   // INTEGER.
1719   //
1720   // (e) If one of the classes is X87, X87UP, COMPLEX_X87 class,
1721   // MEMORY is used as class.
1722   //
1723   // (f) Otherwise class SSE is used.
1724 
1725   // Accum should never be memory (we should have returned) or
1726   // ComplexX87 (because this cannot be passed in a structure).
1727   assert((Accum != Memory && Accum != ComplexX87) &&
1728          "Invalid accumulated classification during merge.");
1729   if (Accum == Field || Field == NoClass)
1730     return Accum;
1731   if (Field == Memory)
1732     return Memory;
1733   if (Accum == NoClass)
1734     return Field;
1735   if (Accum == Integer || Field == Integer)
1736     return Integer;
1737   if (Field == X87 || Field == X87Up || Field == ComplexX87 ||
1738       Accum == X87 || Accum == X87Up)
1739     return Memory;
1740   return SSE;
1741 }
1742 
classify(QualType Ty,uint64_t OffsetBase,Class & Lo,Class & Hi,bool isNamedArg) const1743 void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase,
1744                              Class &Lo, Class &Hi, bool isNamedArg) const {
1745   // FIXME: This code can be simplified by introducing a simple value class for
1746   // Class pairs with appropriate constructor methods for the various
1747   // situations.
1748 
1749   // FIXME: Some of the split computations are wrong; unaligned vectors
1750   // shouldn't be passed in registers for example, so there is no chance they
1751   // can straddle an eightbyte. Verify & simplify.
1752 
1753   Lo = Hi = NoClass;
1754 
1755   Class &Current = OffsetBase < 64 ? Lo : Hi;
1756   Current = Memory;
1757 
1758   if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
1759     BuiltinType::Kind k = BT->getKind();
1760 
1761     if (k == BuiltinType::Void) {
1762       Current = NoClass;
1763     } else if (k == BuiltinType::Int128 || k == BuiltinType::UInt128) {
1764       Lo = Integer;
1765       Hi = Integer;
1766     } else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) {
1767       Current = Integer;
1768     } else if ((k == BuiltinType::Float || k == BuiltinType::Double) ||
1769                (k == BuiltinType::LongDouble &&
1770                 getTarget().getTriple().isOSNaCl())) {
1771       Current = SSE;
1772     } else if (k == BuiltinType::LongDouble) {
1773       Lo = X87;
1774       Hi = X87Up;
1775     }
1776     // FIXME: _Decimal32 and _Decimal64 are SSE.
1777     // FIXME: _float128 and _Decimal128 are (SSE, SSEUp).
1778     return;
1779   }
1780 
1781   if (const EnumType *ET = Ty->getAs<EnumType>()) {
1782     // Classify the underlying integer type.
1783     classify(ET->getDecl()->getIntegerType(), OffsetBase, Lo, Hi, isNamedArg);
1784     return;
1785   }
1786 
1787   if (Ty->hasPointerRepresentation()) {
1788     Current = Integer;
1789     return;
1790   }
1791 
1792   if (Ty->isMemberPointerType()) {
1793     if (Ty->isMemberFunctionPointerType()) {
1794       if (Has64BitPointers) {
1795         // If Has64BitPointers, this is an {i64, i64}, so classify both
1796         // Lo and Hi now.
1797         Lo = Hi = Integer;
1798       } else {
1799         // Otherwise, with 32-bit pointers, this is an {i32, i32}. If that
1800         // straddles an eightbyte boundary, Hi should be classified as well.
1801         uint64_t EB_FuncPtr = (OffsetBase) / 64;
1802         uint64_t EB_ThisAdj = (OffsetBase + 64 - 1) / 64;
1803         if (EB_FuncPtr != EB_ThisAdj) {
1804           Lo = Hi = Integer;
1805         } else {
1806           Current = Integer;
1807         }
1808       }
1809     } else {
1810       Current = Integer;
1811     }
1812     return;
1813   }
1814 
1815   if (const VectorType *VT = Ty->getAs<VectorType>()) {
1816     uint64_t Size = getContext().getTypeSize(VT);
1817     if (Size == 32) {
1818       // gcc passes all <4 x char>, <2 x short>, <1 x int>, <1 x
1819       // float> as integer.
1820       Current = Integer;
1821 
1822       // If this type crosses an eightbyte boundary, it should be
1823       // split.
1824       uint64_t EB_Real = (OffsetBase) / 64;
1825       uint64_t EB_Imag = (OffsetBase + Size - 1) / 64;
1826       if (EB_Real != EB_Imag)
1827         Hi = Lo;
1828     } else if (Size == 64) {
1829       // gcc passes <1 x double> in memory. :(
1830       if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::Double))
1831         return;
1832 
1833       // gcc passes <1 x long long> as INTEGER.
1834       if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::LongLong) ||
1835           VT->getElementType()->isSpecificBuiltinType(BuiltinType::ULongLong) ||
1836           VT->getElementType()->isSpecificBuiltinType(BuiltinType::Long) ||
1837           VT->getElementType()->isSpecificBuiltinType(BuiltinType::ULong))
1838         Current = Integer;
1839       else
1840         Current = SSE;
1841 
1842       // If this type crosses an eightbyte boundary, it should be
1843       // split.
1844       if (OffsetBase && OffsetBase != 64)
1845         Hi = Lo;
1846     } else if (Size == 128 || (HasAVX && isNamedArg && Size == 256)) {
1847       // Arguments of 256-bits are split into four eightbyte chunks. The
1848       // least significant one belongs to class SSE and all the others to class
1849       // SSEUP. The original Lo and Hi design considers that types can't be
1850       // greater than 128-bits, so a 64-bit split in Hi and Lo makes sense.
1851       // This design isn't correct for 256-bits, but since there're no cases
1852       // where the upper parts would need to be inspected, avoid adding
1853       // complexity and just consider Hi to match the 64-256 part.
1854       //
1855       // Note that per 3.5.7 of AMD64-ABI, 256-bit args are only passed in
1856       // registers if they are "named", i.e. not part of the "..." of a
1857       // variadic function.
1858       Lo = SSE;
1859       Hi = SSEUp;
1860     }
1861     return;
1862   }
1863 
1864   if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
1865     QualType ET = getContext().getCanonicalType(CT->getElementType());
1866 
1867     uint64_t Size = getContext().getTypeSize(Ty);
1868     if (ET->isIntegralOrEnumerationType()) {
1869       if (Size <= 64)
1870         Current = Integer;
1871       else if (Size <= 128)
1872         Lo = Hi = Integer;
1873     } else if (ET == getContext().FloatTy)
1874       Current = SSE;
1875     else if (ET == getContext().DoubleTy ||
1876              (ET == getContext().LongDoubleTy &&
1877               getTarget().getTriple().isOSNaCl()))
1878       Lo = Hi = SSE;
1879     else if (ET == getContext().LongDoubleTy)
1880       Current = ComplexX87;
1881 
1882     // If this complex type crosses an eightbyte boundary then it
1883     // should be split.
1884     uint64_t EB_Real = (OffsetBase) / 64;
1885     uint64_t EB_Imag = (OffsetBase + getContext().getTypeSize(ET)) / 64;
1886     if (Hi == NoClass && EB_Real != EB_Imag)
1887       Hi = Lo;
1888 
1889     return;
1890   }
1891 
1892   if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
1893     // Arrays are treated like structures.
1894 
1895     uint64_t Size = getContext().getTypeSize(Ty);
1896 
1897     // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
1898     // than four eightbytes, ..., it has class MEMORY.
1899     if (Size > 256)
1900       return;
1901 
1902     // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned
1903     // fields, it has class MEMORY.
1904     //
1905     // Only need to check alignment of array base.
1906     if (OffsetBase % getContext().getTypeAlign(AT->getElementType()))
1907       return;
1908 
1909     // Otherwise implement simplified merge. We could be smarter about
1910     // this, but it isn't worth it and would be harder to verify.
1911     Current = NoClass;
1912     uint64_t EltSize = getContext().getTypeSize(AT->getElementType());
1913     uint64_t ArraySize = AT->getSize().getZExtValue();
1914 
1915     // The only case a 256-bit wide vector could be used is when the array
1916     // contains a single 256-bit element. Since Lo and Hi logic isn't extended
1917     // to work for sizes wider than 128, early check and fallback to memory.
1918     if (Size > 128 && EltSize != 256)
1919       return;
1920 
1921     for (uint64_t i=0, Offset=OffsetBase; i<ArraySize; ++i, Offset += EltSize) {
1922       Class FieldLo, FieldHi;
1923       classify(AT->getElementType(), Offset, FieldLo, FieldHi, isNamedArg);
1924       Lo = merge(Lo, FieldLo);
1925       Hi = merge(Hi, FieldHi);
1926       if (Lo == Memory || Hi == Memory)
1927         break;
1928     }
1929 
1930     postMerge(Size, Lo, Hi);
1931     assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp array classification.");
1932     return;
1933   }
1934 
1935   if (const RecordType *RT = Ty->getAs<RecordType>()) {
1936     uint64_t Size = getContext().getTypeSize(Ty);
1937 
1938     // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
1939     // than four eightbytes, ..., it has class MEMORY.
1940     if (Size > 256)
1941       return;
1942 
1943     // AMD64-ABI 3.2.3p2: Rule 2. If a C++ object has either a non-trivial
1944     // copy constructor or a non-trivial destructor, it is passed by invisible
1945     // reference.
1946     if (getRecordArgABI(RT, getCXXABI()))
1947       return;
1948 
1949     const RecordDecl *RD = RT->getDecl();
1950 
1951     // Assume variable sized types are passed in memory.
1952     if (RD->hasFlexibleArrayMember())
1953       return;
1954 
1955     const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
1956 
1957     // Reset Lo class, this will be recomputed.
1958     Current = NoClass;
1959 
1960     // If this is a C++ record, classify the bases first.
1961     if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
1962       for (const auto &I : CXXRD->bases()) {
1963         assert(!I.isVirtual() && !I.getType()->isDependentType() &&
1964                "Unexpected base class!");
1965         const CXXRecordDecl *Base =
1966           cast<CXXRecordDecl>(I.getType()->getAs<RecordType>()->getDecl());
1967 
1968         // Classify this field.
1969         //
1970         // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate exceeds a
1971         // single eightbyte, each is classified separately. Each eightbyte gets
1972         // initialized to class NO_CLASS.
1973         Class FieldLo, FieldHi;
1974         uint64_t Offset =
1975           OffsetBase + getContext().toBits(Layout.getBaseClassOffset(Base));
1976         classify(I.getType(), Offset, FieldLo, FieldHi, isNamedArg);
1977         Lo = merge(Lo, FieldLo);
1978         Hi = merge(Hi, FieldHi);
1979         if (Lo == Memory || Hi == Memory)
1980           break;
1981       }
1982     }
1983 
1984     // Classify the fields one at a time, merging the results.
1985     unsigned idx = 0;
1986     for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
1987            i != e; ++i, ++idx) {
1988       uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
1989       bool BitField = i->isBitField();
1990 
1991       // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger than
1992       // four eightbytes, or it contains unaligned fields, it has class MEMORY.
1993       //
1994       // The only case a 256-bit wide vector could be used is when the struct
1995       // contains a single 256-bit element. Since Lo and Hi logic isn't extended
1996       // to work for sizes wider than 128, early check and fallback to memory.
1997       //
1998       if (Size > 128 && getContext().getTypeSize(i->getType()) != 256) {
1999         Lo = Memory;
2000         return;
2001       }
2002       // Note, skip this test for bit-fields, see below.
2003       if (!BitField && Offset % getContext().getTypeAlign(i->getType())) {
2004         Lo = Memory;
2005         return;
2006       }
2007 
2008       // Classify this field.
2009       //
2010       // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate
2011       // exceeds a single eightbyte, each is classified
2012       // separately. Each eightbyte gets initialized to class
2013       // NO_CLASS.
2014       Class FieldLo, FieldHi;
2015 
2016       // Bit-fields require special handling, they do not force the
2017       // structure to be passed in memory even if unaligned, and
2018       // therefore they can straddle an eightbyte.
2019       if (BitField) {
2020         // Ignore padding bit-fields.
2021         if (i->isUnnamedBitfield())
2022           continue;
2023 
2024         uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
2025         uint64_t Size = i->getBitWidthValue(getContext());
2026 
2027         uint64_t EB_Lo = Offset / 64;
2028         uint64_t EB_Hi = (Offset + Size - 1) / 64;
2029 
2030         if (EB_Lo) {
2031           assert(EB_Hi == EB_Lo && "Invalid classification, type > 16 bytes.");
2032           FieldLo = NoClass;
2033           FieldHi = Integer;
2034         } else {
2035           FieldLo = Integer;
2036           FieldHi = EB_Hi ? Integer : NoClass;
2037         }
2038       } else
2039         classify(i->getType(), Offset, FieldLo, FieldHi, isNamedArg);
2040       Lo = merge(Lo, FieldLo);
2041       Hi = merge(Hi, FieldHi);
2042       if (Lo == Memory || Hi == Memory)
2043         break;
2044     }
2045 
2046     postMerge(Size, Lo, Hi);
2047   }
2048 }
2049 
getIndirectReturnResult(QualType Ty) const2050 ABIArgInfo X86_64ABIInfo::getIndirectReturnResult(QualType Ty) const {
2051   // If this is a scalar LLVM value then assume LLVM will pass it in the right
2052   // place naturally.
2053   if (!isAggregateTypeForABI(Ty)) {
2054     // Treat an enum type as its underlying type.
2055     if (const EnumType *EnumTy = Ty->getAs<EnumType>())
2056       Ty = EnumTy->getDecl()->getIntegerType();
2057 
2058     return (Ty->isPromotableIntegerType() ?
2059             ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
2060   }
2061 
2062   return ABIArgInfo::getIndirect(0);
2063 }
2064 
IsIllegalVectorType(QualType Ty) const2065 bool X86_64ABIInfo::IsIllegalVectorType(QualType Ty) const {
2066   if (const VectorType *VecTy = Ty->getAs<VectorType>()) {
2067     uint64_t Size = getContext().getTypeSize(VecTy);
2068     unsigned LargestVector = HasAVX ? 256 : 128;
2069     if (Size <= 64 || Size > LargestVector)
2070       return true;
2071   }
2072 
2073   return false;
2074 }
2075 
getIndirectResult(QualType Ty,unsigned freeIntRegs) const2076 ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty,
2077                                             unsigned freeIntRegs) const {
2078   // If this is a scalar LLVM value then assume LLVM will pass it in the right
2079   // place naturally.
2080   //
2081   // This assumption is optimistic, as there could be free registers available
2082   // when we need to pass this argument in memory, and LLVM could try to pass
2083   // the argument in the free register. This does not seem to happen currently,
2084   // but this code would be much safer if we could mark the argument with
2085   // 'onstack'. See PR12193.
2086   if (!isAggregateTypeForABI(Ty) && !IsIllegalVectorType(Ty)) {
2087     // Treat an enum type as its underlying type.
2088     if (const EnumType *EnumTy = Ty->getAs<EnumType>())
2089       Ty = EnumTy->getDecl()->getIntegerType();
2090 
2091     return (Ty->isPromotableIntegerType() ?
2092             ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
2093   }
2094 
2095   if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
2096     return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
2097 
2098   // Compute the byval alignment. We specify the alignment of the byval in all
2099   // cases so that the mid-level optimizer knows the alignment of the byval.
2100   unsigned Align = std::max(getContext().getTypeAlign(Ty) / 8, 8U);
2101 
2102   // Attempt to avoid passing indirect results using byval when possible. This
2103   // is important for good codegen.
2104   //
2105   // We do this by coercing the value into a scalar type which the backend can
2106   // handle naturally (i.e., without using byval).
2107   //
2108   // For simplicity, we currently only do this when we have exhausted all of the
2109   // free integer registers. Doing this when there are free integer registers
2110   // would require more care, as we would have to ensure that the coerced value
2111   // did not claim the unused register. That would require either reording the
2112   // arguments to the function (so that any subsequent inreg values came first),
2113   // or only doing this optimization when there were no following arguments that
2114   // might be inreg.
2115   //
2116   // We currently expect it to be rare (particularly in well written code) for
2117   // arguments to be passed on the stack when there are still free integer
2118   // registers available (this would typically imply large structs being passed
2119   // by value), so this seems like a fair tradeoff for now.
2120   //
2121   // We can revisit this if the backend grows support for 'onstack' parameter
2122   // attributes. See PR12193.
2123   if (freeIntRegs == 0) {
2124     uint64_t Size = getContext().getTypeSize(Ty);
2125 
2126     // If this type fits in an eightbyte, coerce it into the matching integral
2127     // type, which will end up on the stack (with alignment 8).
2128     if (Align == 8 && Size <= 64)
2129       return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
2130                                                           Size));
2131   }
2132 
2133   return ABIArgInfo::getIndirect(Align);
2134 }
2135 
2136 /// The ABI specifies that a value should be passed in a full vector XMM/YMM
2137 /// register. Pick an LLVM IR type that will be passed as a vector register.
GetByteVectorType(QualType Ty) const2138 llvm::Type *X86_64ABIInfo::GetByteVectorType(QualType Ty) const {
2139   // Wrapper structs/arrays that only contain vectors are passed just like
2140   // vectors; strip them off if present.
2141   if (const Type *InnerTy = isSingleElementStruct(Ty, getContext()))
2142     Ty = QualType(InnerTy, 0);
2143 
2144   llvm::Type *IRType = CGT.ConvertType(Ty);
2145 
2146   // If the preferred type is a 16-byte vector, prefer to pass it.
2147   if (llvm::VectorType *VT = dyn_cast<llvm::VectorType>(IRType)){
2148     llvm::Type *EltTy = VT->getElementType();
2149     unsigned BitWidth = VT->getBitWidth();
2150     if ((BitWidth >= 128 && BitWidth <= 256) &&
2151         (EltTy->isFloatTy() || EltTy->isDoubleTy() ||
2152          EltTy->isIntegerTy(8) || EltTy->isIntegerTy(16) ||
2153          EltTy->isIntegerTy(32) || EltTy->isIntegerTy(64) ||
2154          EltTy->isIntegerTy(128)))
2155       return VT;
2156   }
2157 
2158   return llvm::VectorType::get(llvm::Type::getDoubleTy(getVMContext()), 2);
2159 }
2160 
2161 /// BitsContainNoUserData - Return true if the specified [start,end) bit range
2162 /// is known to either be off the end of the specified type or being in
2163 /// alignment padding.  The user type specified is known to be at most 128 bits
2164 /// in size, and have passed through X86_64ABIInfo::classify with a successful
2165 /// classification that put one of the two halves in the INTEGER class.
2166 ///
2167 /// It is conservatively correct to return false.
BitsContainNoUserData(QualType Ty,unsigned StartBit,unsigned EndBit,ASTContext & Context)2168 static bool BitsContainNoUserData(QualType Ty, unsigned StartBit,
2169                                   unsigned EndBit, ASTContext &Context) {
2170   // If the bytes being queried are off the end of the type, there is no user
2171   // data hiding here.  This handles analysis of builtins, vectors and other
2172   // types that don't contain interesting padding.
2173   unsigned TySize = (unsigned)Context.getTypeSize(Ty);
2174   if (TySize <= StartBit)
2175     return true;
2176 
2177   if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
2178     unsigned EltSize = (unsigned)Context.getTypeSize(AT->getElementType());
2179     unsigned NumElts = (unsigned)AT->getSize().getZExtValue();
2180 
2181     // Check each element to see if the element overlaps with the queried range.
2182     for (unsigned i = 0; i != NumElts; ++i) {
2183       // If the element is after the span we care about, then we're done..
2184       unsigned EltOffset = i*EltSize;
2185       if (EltOffset >= EndBit) break;
2186 
2187       unsigned EltStart = EltOffset < StartBit ? StartBit-EltOffset :0;
2188       if (!BitsContainNoUserData(AT->getElementType(), EltStart,
2189                                  EndBit-EltOffset, Context))
2190         return false;
2191     }
2192     // If it overlaps no elements, then it is safe to process as padding.
2193     return true;
2194   }
2195 
2196   if (const RecordType *RT = Ty->getAs<RecordType>()) {
2197     const RecordDecl *RD = RT->getDecl();
2198     const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
2199 
2200     // If this is a C++ record, check the bases first.
2201     if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
2202       for (const auto &I : CXXRD->bases()) {
2203         assert(!I.isVirtual() && !I.getType()->isDependentType() &&
2204                "Unexpected base class!");
2205         const CXXRecordDecl *Base =
2206           cast<CXXRecordDecl>(I.getType()->getAs<RecordType>()->getDecl());
2207 
2208         // If the base is after the span we care about, ignore it.
2209         unsigned BaseOffset = Context.toBits(Layout.getBaseClassOffset(Base));
2210         if (BaseOffset >= EndBit) continue;
2211 
2212         unsigned BaseStart = BaseOffset < StartBit ? StartBit-BaseOffset :0;
2213         if (!BitsContainNoUserData(I.getType(), BaseStart,
2214                                    EndBit-BaseOffset, Context))
2215           return false;
2216       }
2217     }
2218 
2219     // Verify that no field has data that overlaps the region of interest.  Yes
2220     // this could be sped up a lot by being smarter about queried fields,
2221     // however we're only looking at structs up to 16 bytes, so we don't care
2222     // much.
2223     unsigned idx = 0;
2224     for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
2225          i != e; ++i, ++idx) {
2226       unsigned FieldOffset = (unsigned)Layout.getFieldOffset(idx);
2227 
2228       // If we found a field after the region we care about, then we're done.
2229       if (FieldOffset >= EndBit) break;
2230 
2231       unsigned FieldStart = FieldOffset < StartBit ? StartBit-FieldOffset :0;
2232       if (!BitsContainNoUserData(i->getType(), FieldStart, EndBit-FieldOffset,
2233                                  Context))
2234         return false;
2235     }
2236 
2237     // If nothing in this record overlapped the area of interest, then we're
2238     // clean.
2239     return true;
2240   }
2241 
2242   return false;
2243 }
2244 
2245 /// ContainsFloatAtOffset - Return true if the specified LLVM IR type has a
2246 /// float member at the specified offset.  For example, {int,{float}} has a
2247 /// float at offset 4.  It is conservatively correct for this routine to return
2248 /// false.
ContainsFloatAtOffset(llvm::Type * IRType,unsigned IROffset,const llvm::DataLayout & TD)2249 static bool ContainsFloatAtOffset(llvm::Type *IRType, unsigned IROffset,
2250                                   const llvm::DataLayout &TD) {
2251   // Base case if we find a float.
2252   if (IROffset == 0 && IRType->isFloatTy())
2253     return true;
2254 
2255   // If this is a struct, recurse into the field at the specified offset.
2256   if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
2257     const llvm::StructLayout *SL = TD.getStructLayout(STy);
2258     unsigned Elt = SL->getElementContainingOffset(IROffset);
2259     IROffset -= SL->getElementOffset(Elt);
2260     return ContainsFloatAtOffset(STy->getElementType(Elt), IROffset, TD);
2261   }
2262 
2263   // If this is an array, recurse into the field at the specified offset.
2264   if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
2265     llvm::Type *EltTy = ATy->getElementType();
2266     unsigned EltSize = TD.getTypeAllocSize(EltTy);
2267     IROffset -= IROffset/EltSize*EltSize;
2268     return ContainsFloatAtOffset(EltTy, IROffset, TD);
2269   }
2270 
2271   return false;
2272 }
2273 
2274 
2275 /// GetSSETypeAtOffset - Return a type that will be passed by the backend in the
2276 /// low 8 bytes of an XMM register, corresponding to the SSE class.
2277 llvm::Type *X86_64ABIInfo::
GetSSETypeAtOffset(llvm::Type * IRType,unsigned IROffset,QualType SourceTy,unsigned SourceOffset) const2278 GetSSETypeAtOffset(llvm::Type *IRType, unsigned IROffset,
2279                    QualType SourceTy, unsigned SourceOffset) const {
2280   // The only three choices we have are either double, <2 x float>, or float. We
2281   // pass as float if the last 4 bytes is just padding.  This happens for
2282   // structs that contain 3 floats.
2283   if (BitsContainNoUserData(SourceTy, SourceOffset*8+32,
2284                             SourceOffset*8+64, getContext()))
2285     return llvm::Type::getFloatTy(getVMContext());
2286 
2287   // We want to pass as <2 x float> if the LLVM IR type contains a float at
2288   // offset+0 and offset+4.  Walk the LLVM IR type to find out if this is the
2289   // case.
2290   if (ContainsFloatAtOffset(IRType, IROffset, getDataLayout()) &&
2291       ContainsFloatAtOffset(IRType, IROffset+4, getDataLayout()))
2292     return llvm::VectorType::get(llvm::Type::getFloatTy(getVMContext()), 2);
2293 
2294   return llvm::Type::getDoubleTy(getVMContext());
2295 }
2296 
2297 
2298 /// GetINTEGERTypeAtOffset - The ABI specifies that a value should be passed in
2299 /// an 8-byte GPR.  This means that we either have a scalar or we are talking
2300 /// about the high or low part of an up-to-16-byte struct.  This routine picks
2301 /// the best LLVM IR type to represent this, which may be i64 or may be anything
2302 /// else that the backend will pass in a GPR that works better (e.g. i8, %foo*,
2303 /// etc).
2304 ///
2305 /// PrefType is an LLVM IR type that corresponds to (part of) the IR type for
2306 /// the source type.  IROffset is an offset in bytes into the LLVM IR type that
2307 /// the 8-byte value references.  PrefType may be null.
2308 ///
2309 /// SourceTy is the source-level type for the entire argument.  SourceOffset is
2310 /// an offset into this that we're processing (which is always either 0 or 8).
2311 ///
2312 llvm::Type *X86_64ABIInfo::
GetINTEGERTypeAtOffset(llvm::Type * IRType,unsigned IROffset,QualType SourceTy,unsigned SourceOffset) const2313 GetINTEGERTypeAtOffset(llvm::Type *IRType, unsigned IROffset,
2314                        QualType SourceTy, unsigned SourceOffset) const {
2315   // If we're dealing with an un-offset LLVM IR type, then it means that we're
2316   // returning an 8-byte unit starting with it.  See if we can safely use it.
2317   if (IROffset == 0) {
2318     // Pointers and int64's always fill the 8-byte unit.
2319     if ((isa<llvm::PointerType>(IRType) && Has64BitPointers) ||
2320         IRType->isIntegerTy(64))
2321       return IRType;
2322 
2323     // If we have a 1/2/4-byte integer, we can use it only if the rest of the
2324     // goodness in the source type is just tail padding.  This is allowed to
2325     // kick in for struct {double,int} on the int, but not on
2326     // struct{double,int,int} because we wouldn't return the second int.  We
2327     // have to do this analysis on the source type because we can't depend on
2328     // unions being lowered a specific way etc.
2329     if (IRType->isIntegerTy(8) || IRType->isIntegerTy(16) ||
2330         IRType->isIntegerTy(32) ||
2331         (isa<llvm::PointerType>(IRType) && !Has64BitPointers)) {
2332       unsigned BitWidth = isa<llvm::PointerType>(IRType) ? 32 :
2333           cast<llvm::IntegerType>(IRType)->getBitWidth();
2334 
2335       if (BitsContainNoUserData(SourceTy, SourceOffset*8+BitWidth,
2336                                 SourceOffset*8+64, getContext()))
2337         return IRType;
2338     }
2339   }
2340 
2341   if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
2342     // If this is a struct, recurse into the field at the specified offset.
2343     const llvm::StructLayout *SL = getDataLayout().getStructLayout(STy);
2344     if (IROffset < SL->getSizeInBytes()) {
2345       unsigned FieldIdx = SL->getElementContainingOffset(IROffset);
2346       IROffset -= SL->getElementOffset(FieldIdx);
2347 
2348       return GetINTEGERTypeAtOffset(STy->getElementType(FieldIdx), IROffset,
2349                                     SourceTy, SourceOffset);
2350     }
2351   }
2352 
2353   if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
2354     llvm::Type *EltTy = ATy->getElementType();
2355     unsigned EltSize = getDataLayout().getTypeAllocSize(EltTy);
2356     unsigned EltOffset = IROffset/EltSize*EltSize;
2357     return GetINTEGERTypeAtOffset(EltTy, IROffset-EltOffset, SourceTy,
2358                                   SourceOffset);
2359   }
2360 
2361   // Okay, we don't have any better idea of what to pass, so we pass this in an
2362   // integer register that isn't too big to fit the rest of the struct.
2363   unsigned TySizeInBytes =
2364     (unsigned)getContext().getTypeSizeInChars(SourceTy).getQuantity();
2365 
2366   assert(TySizeInBytes != SourceOffset && "Empty field?");
2367 
2368   // It is always safe to classify this as an integer type up to i64 that
2369   // isn't larger than the structure.
2370   return llvm::IntegerType::get(getVMContext(),
2371                                 std::min(TySizeInBytes-SourceOffset, 8U)*8);
2372 }
2373 
2374 
2375 /// GetX86_64ByValArgumentPair - Given a high and low type that can ideally
2376 /// be used as elements of a two register pair to pass or return, return a
2377 /// first class aggregate to represent them.  For example, if the low part of
2378 /// a by-value argument should be passed as i32* and the high part as float,
2379 /// return {i32*, float}.
2380 static llvm::Type *
GetX86_64ByValArgumentPair(llvm::Type * Lo,llvm::Type * Hi,const llvm::DataLayout & TD)2381 GetX86_64ByValArgumentPair(llvm::Type *Lo, llvm::Type *Hi,
2382                            const llvm::DataLayout &TD) {
2383   // In order to correctly satisfy the ABI, we need to the high part to start
2384   // at offset 8.  If the high and low parts we inferred are both 4-byte types
2385   // (e.g. i32 and i32) then the resultant struct type ({i32,i32}) won't have
2386   // the second element at offset 8.  Check for this:
2387   unsigned LoSize = (unsigned)TD.getTypeAllocSize(Lo);
2388   unsigned HiAlign = TD.getABITypeAlignment(Hi);
2389   unsigned HiStart = llvm::RoundUpToAlignment(LoSize, HiAlign);
2390   assert(HiStart != 0 && HiStart <= 8 && "Invalid x86-64 argument pair!");
2391 
2392   // To handle this, we have to increase the size of the low part so that the
2393   // second element will start at an 8 byte offset.  We can't increase the size
2394   // of the second element because it might make us access off the end of the
2395   // struct.
2396   if (HiStart != 8) {
2397     // There are only two sorts of types the ABI generation code can produce for
2398     // the low part of a pair that aren't 8 bytes in size: float or i8/i16/i32.
2399     // Promote these to a larger type.
2400     if (Lo->isFloatTy())
2401       Lo = llvm::Type::getDoubleTy(Lo->getContext());
2402     else {
2403       assert(Lo->isIntegerTy() && "Invalid/unknown lo type");
2404       Lo = llvm::Type::getInt64Ty(Lo->getContext());
2405     }
2406   }
2407 
2408   llvm::StructType *Result = llvm::StructType::get(Lo, Hi, nullptr);
2409 
2410 
2411   // Verify that the second element is at an 8-byte offset.
2412   assert(TD.getStructLayout(Result)->getElementOffset(1) == 8 &&
2413          "Invalid x86-64 argument pair!");
2414   return Result;
2415 }
2416 
2417 ABIArgInfo X86_64ABIInfo::
classifyReturnType(QualType RetTy) const2418 classifyReturnType(QualType RetTy) const {
2419   // AMD64-ABI 3.2.3p4: Rule 1. Classify the return type with the
2420   // classification algorithm.
2421   X86_64ABIInfo::Class Lo, Hi;
2422   classify(RetTy, 0, Lo, Hi, /*isNamedArg*/ true);
2423 
2424   // Check some invariants.
2425   assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
2426   assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
2427 
2428   llvm::Type *ResType = nullptr;
2429   switch (Lo) {
2430   case NoClass:
2431     if (Hi == NoClass)
2432       return ABIArgInfo::getIgnore();
2433     // If the low part is just padding, it takes no register, leave ResType
2434     // null.
2435     assert((Hi == SSE || Hi == Integer || Hi == X87Up) &&
2436            "Unknown missing lo part");
2437     break;
2438 
2439   case SSEUp:
2440   case X87Up:
2441     llvm_unreachable("Invalid classification for lo word.");
2442 
2443     // AMD64-ABI 3.2.3p4: Rule 2. Types of class memory are returned via
2444     // hidden argument.
2445   case Memory:
2446     return getIndirectReturnResult(RetTy);
2447 
2448     // AMD64-ABI 3.2.3p4: Rule 3. If the class is INTEGER, the next
2449     // available register of the sequence %rax, %rdx is used.
2450   case Integer:
2451     ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0);
2452 
2453     // If we have a sign or zero extended integer, make sure to return Extend
2454     // so that the parameter gets the right LLVM IR attributes.
2455     if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) {
2456       // Treat an enum type as its underlying type.
2457       if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
2458         RetTy = EnumTy->getDecl()->getIntegerType();
2459 
2460       if (RetTy->isIntegralOrEnumerationType() &&
2461           RetTy->isPromotableIntegerType())
2462         return ABIArgInfo::getExtend();
2463     }
2464     break;
2465 
2466     // AMD64-ABI 3.2.3p4: Rule 4. If the class is SSE, the next
2467     // available SSE register of the sequence %xmm0, %xmm1 is used.
2468   case SSE:
2469     ResType = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0);
2470     break;
2471 
2472     // AMD64-ABI 3.2.3p4: Rule 6. If the class is X87, the value is
2473     // returned on the X87 stack in %st0 as 80-bit x87 number.
2474   case X87:
2475     ResType = llvm::Type::getX86_FP80Ty(getVMContext());
2476     break;
2477 
2478     // AMD64-ABI 3.2.3p4: Rule 8. If the class is COMPLEX_X87, the real
2479     // part of the value is returned in %st0 and the imaginary part in
2480     // %st1.
2481   case ComplexX87:
2482     assert(Hi == ComplexX87 && "Unexpected ComplexX87 classification.");
2483     ResType = llvm::StructType::get(llvm::Type::getX86_FP80Ty(getVMContext()),
2484                                     llvm::Type::getX86_FP80Ty(getVMContext()),
2485                                     nullptr);
2486     break;
2487   }
2488 
2489   llvm::Type *HighPart = nullptr;
2490   switch (Hi) {
2491     // Memory was handled previously and X87 should
2492     // never occur as a hi class.
2493   case Memory:
2494   case X87:
2495     llvm_unreachable("Invalid classification for hi word.");
2496 
2497   case ComplexX87: // Previously handled.
2498   case NoClass:
2499     break;
2500 
2501   case Integer:
2502     HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
2503     if (Lo == NoClass)  // Return HighPart at offset 8 in memory.
2504       return ABIArgInfo::getDirect(HighPart, 8);
2505     break;
2506   case SSE:
2507     HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
2508     if (Lo == NoClass)  // Return HighPart at offset 8 in memory.
2509       return ABIArgInfo::getDirect(HighPart, 8);
2510     break;
2511 
2512     // AMD64-ABI 3.2.3p4: Rule 5. If the class is SSEUP, the eightbyte
2513     // is passed in the next available eightbyte chunk if the last used
2514     // vector register.
2515     //
2516     // SSEUP should always be preceded by SSE, just widen.
2517   case SSEUp:
2518     assert(Lo == SSE && "Unexpected SSEUp classification.");
2519     ResType = GetByteVectorType(RetTy);
2520     break;
2521 
2522     // AMD64-ABI 3.2.3p4: Rule 7. If the class is X87UP, the value is
2523     // returned together with the previous X87 value in %st0.
2524   case X87Up:
2525     // If X87Up is preceded by X87, we don't need to do
2526     // anything. However, in some cases with unions it may not be
2527     // preceded by X87. In such situations we follow gcc and pass the
2528     // extra bits in an SSE reg.
2529     if (Lo != X87) {
2530       HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
2531       if (Lo == NoClass)  // Return HighPart at offset 8 in memory.
2532         return ABIArgInfo::getDirect(HighPart, 8);
2533     }
2534     break;
2535   }
2536 
2537   // If a high part was specified, merge it together with the low part.  It is
2538   // known to pass in the high eightbyte of the result.  We do this by forming a
2539   // first class struct aggregate with the high and low part: {low, high}
2540   if (HighPart)
2541     ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout());
2542 
2543   return ABIArgInfo::getDirect(ResType);
2544 }
2545 
classifyArgumentType(QualType Ty,unsigned freeIntRegs,unsigned & neededInt,unsigned & neededSSE,bool isNamedArg) const2546 ABIArgInfo X86_64ABIInfo::classifyArgumentType(
2547   QualType Ty, unsigned freeIntRegs, unsigned &neededInt, unsigned &neededSSE,
2548   bool isNamedArg)
2549   const
2550 {
2551   Ty = useFirstFieldIfTransparentUnion(Ty);
2552 
2553   X86_64ABIInfo::Class Lo, Hi;
2554   classify(Ty, 0, Lo, Hi, isNamedArg);
2555 
2556   // Check some invariants.
2557   // FIXME: Enforce these by construction.
2558   assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
2559   assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
2560 
2561   neededInt = 0;
2562   neededSSE = 0;
2563   llvm::Type *ResType = nullptr;
2564   switch (Lo) {
2565   case NoClass:
2566     if (Hi == NoClass)
2567       return ABIArgInfo::getIgnore();
2568     // If the low part is just padding, it takes no register, leave ResType
2569     // null.
2570     assert((Hi == SSE || Hi == Integer || Hi == X87Up) &&
2571            "Unknown missing lo part");
2572     break;
2573 
2574     // AMD64-ABI 3.2.3p3: Rule 1. If the class is MEMORY, pass the argument
2575     // on the stack.
2576   case Memory:
2577 
2578     // AMD64-ABI 3.2.3p3: Rule 5. If the class is X87, X87UP or
2579     // COMPLEX_X87, it is passed in memory.
2580   case X87:
2581   case ComplexX87:
2582     if (getRecordArgABI(Ty, getCXXABI()) == CGCXXABI::RAA_Indirect)
2583       ++neededInt;
2584     return getIndirectResult(Ty, freeIntRegs);
2585 
2586   case SSEUp:
2587   case X87Up:
2588     llvm_unreachable("Invalid classification for lo word.");
2589 
2590     // AMD64-ABI 3.2.3p3: Rule 2. If the class is INTEGER, the next
2591     // available register of the sequence %rdi, %rsi, %rdx, %rcx, %r8
2592     // and %r9 is used.
2593   case Integer:
2594     ++neededInt;
2595 
2596     // Pick an 8-byte type based on the preferred type.
2597     ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 0, Ty, 0);
2598 
2599     // If we have a sign or zero extended integer, make sure to return Extend
2600     // so that the parameter gets the right LLVM IR attributes.
2601     if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) {
2602       // Treat an enum type as its underlying type.
2603       if (const EnumType *EnumTy = Ty->getAs<EnumType>())
2604         Ty = EnumTy->getDecl()->getIntegerType();
2605 
2606       if (Ty->isIntegralOrEnumerationType() &&
2607           Ty->isPromotableIntegerType())
2608         return ABIArgInfo::getExtend();
2609     }
2610 
2611     break;
2612 
2613     // AMD64-ABI 3.2.3p3: Rule 3. If the class is SSE, the next
2614     // available SSE register is used, the registers are taken in the
2615     // order from %xmm0 to %xmm7.
2616   case SSE: {
2617     llvm::Type *IRType = CGT.ConvertType(Ty);
2618     ResType = GetSSETypeAtOffset(IRType, 0, Ty, 0);
2619     ++neededSSE;
2620     break;
2621   }
2622   }
2623 
2624   llvm::Type *HighPart = nullptr;
2625   switch (Hi) {
2626     // Memory was handled previously, ComplexX87 and X87 should
2627     // never occur as hi classes, and X87Up must be preceded by X87,
2628     // which is passed in memory.
2629   case Memory:
2630   case X87:
2631   case ComplexX87:
2632     llvm_unreachable("Invalid classification for hi word.");
2633 
2634   case NoClass: break;
2635 
2636   case Integer:
2637     ++neededInt;
2638     // Pick an 8-byte type based on the preferred type.
2639     HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8);
2640 
2641     if (Lo == NoClass)  // Pass HighPart at offset 8 in memory.
2642       return ABIArgInfo::getDirect(HighPart, 8);
2643     break;
2644 
2645     // X87Up generally doesn't occur here (long double is passed in
2646     // memory), except in situations involving unions.
2647   case X87Up:
2648   case SSE:
2649     HighPart = GetSSETypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8);
2650 
2651     if (Lo == NoClass)  // Pass HighPart at offset 8 in memory.
2652       return ABIArgInfo::getDirect(HighPart, 8);
2653 
2654     ++neededSSE;
2655     break;
2656 
2657     // AMD64-ABI 3.2.3p3: Rule 4. If the class is SSEUP, the
2658     // eightbyte is passed in the upper half of the last used SSE
2659     // register.  This only happens when 128-bit vectors are passed.
2660   case SSEUp:
2661     assert(Lo == SSE && "Unexpected SSEUp classification");
2662     ResType = GetByteVectorType(Ty);
2663     break;
2664   }
2665 
2666   // If a high part was specified, merge it together with the low part.  It is
2667   // known to pass in the high eightbyte of the result.  We do this by forming a
2668   // first class struct aggregate with the high and low part: {low, high}
2669   if (HighPart)
2670     ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout());
2671 
2672   return ABIArgInfo::getDirect(ResType);
2673 }
2674 
computeInfo(CGFunctionInfo & FI) const2675 void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
2676 
2677   if (!getCXXABI().classifyReturnType(FI))
2678     FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
2679 
2680   // Keep track of the number of assigned registers.
2681   unsigned freeIntRegs = 6, freeSSERegs = 8;
2682 
2683   // If the return value is indirect, then the hidden argument is consuming one
2684   // integer register.
2685   if (FI.getReturnInfo().isIndirect())
2686     --freeIntRegs;
2687 
2688   // The chain argument effectively gives us another free register.
2689   if (FI.isChainCall())
2690     ++freeIntRegs;
2691 
2692   unsigned NumRequiredArgs = FI.getNumRequiredArgs();
2693   // AMD64-ABI 3.2.3p3: Once arguments are classified, the registers
2694   // get assigned (in left-to-right order) for passing as follows...
2695   unsigned ArgNo = 0;
2696   for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
2697        it != ie; ++it, ++ArgNo) {
2698     bool IsNamedArg = ArgNo < NumRequiredArgs;
2699 
2700     unsigned neededInt, neededSSE;
2701     it->info = classifyArgumentType(it->type, freeIntRegs, neededInt,
2702                                     neededSSE, IsNamedArg);
2703 
2704     // AMD64-ABI 3.2.3p3: If there are no registers available for any
2705     // eightbyte of an argument, the whole argument is passed on the
2706     // stack. If registers have already been assigned for some
2707     // eightbytes of such an argument, the assignments get reverted.
2708     if (freeIntRegs >= neededInt && freeSSERegs >= neededSSE) {
2709       freeIntRegs -= neededInt;
2710       freeSSERegs -= neededSSE;
2711     } else {
2712       it->info = getIndirectResult(it->type, freeIntRegs);
2713     }
2714   }
2715 }
2716 
EmitVAArgFromMemory(llvm::Value * VAListAddr,QualType Ty,CodeGenFunction & CGF)2717 static llvm::Value *EmitVAArgFromMemory(llvm::Value *VAListAddr,
2718                                         QualType Ty,
2719                                         CodeGenFunction &CGF) {
2720   llvm::Value *overflow_arg_area_p =
2721     CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_p");
2722   llvm::Value *overflow_arg_area =
2723     CGF.Builder.CreateLoad(overflow_arg_area_p, "overflow_arg_area");
2724 
2725   // AMD64-ABI 3.5.7p5: Step 7. Align l->overflow_arg_area upwards to a 16
2726   // byte boundary if alignment needed by type exceeds 8 byte boundary.
2727   // It isn't stated explicitly in the standard, but in practice we use
2728   // alignment greater than 16 where necessary.
2729   uint64_t Align = CGF.getContext().getTypeAlign(Ty) / 8;
2730   if (Align > 8) {
2731     // overflow_arg_area = (overflow_arg_area + align - 1) & -align;
2732     llvm::Value *Offset =
2733       llvm::ConstantInt::get(CGF.Int64Ty, Align - 1);
2734     overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset);
2735     llvm::Value *AsInt = CGF.Builder.CreatePtrToInt(overflow_arg_area,
2736                                                     CGF.Int64Ty);
2737     llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int64Ty, -(uint64_t)Align);
2738     overflow_arg_area =
2739       CGF.Builder.CreateIntToPtr(CGF.Builder.CreateAnd(AsInt, Mask),
2740                                  overflow_arg_area->getType(),
2741                                  "overflow_arg_area.align");
2742   }
2743 
2744   // AMD64-ABI 3.5.7p5: Step 8. Fetch type from l->overflow_arg_area.
2745   llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
2746   llvm::Value *Res =
2747     CGF.Builder.CreateBitCast(overflow_arg_area,
2748                               llvm::PointerType::getUnqual(LTy));
2749 
2750   // AMD64-ABI 3.5.7p5: Step 9. Set l->overflow_arg_area to:
2751   // l->overflow_arg_area + sizeof(type).
2752   // AMD64-ABI 3.5.7p5: Step 10. Align l->overflow_arg_area upwards to
2753   // an 8 byte boundary.
2754 
2755   uint64_t SizeInBytes = (CGF.getContext().getTypeSize(Ty) + 7) / 8;
2756   llvm::Value *Offset =
2757       llvm::ConstantInt::get(CGF.Int32Ty, (SizeInBytes + 7)  & ~7);
2758   overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset,
2759                                             "overflow_arg_area.next");
2760   CGF.Builder.CreateStore(overflow_arg_area, overflow_arg_area_p);
2761 
2762   // AMD64-ABI 3.5.7p5: Step 11. Return the fetched type.
2763   return Res;
2764 }
2765 
EmitVAArg(llvm::Value * VAListAddr,QualType Ty,CodeGenFunction & CGF) const2766 llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
2767                                       CodeGenFunction &CGF) const {
2768   // Assume that va_list type is correct; should be pointer to LLVM type:
2769   // struct {
2770   //   i32 gp_offset;
2771   //   i32 fp_offset;
2772   //   i8* overflow_arg_area;
2773   //   i8* reg_save_area;
2774   // };
2775   unsigned neededInt, neededSSE;
2776 
2777   Ty = CGF.getContext().getCanonicalType(Ty);
2778   ABIArgInfo AI = classifyArgumentType(Ty, 0, neededInt, neededSSE,
2779                                        /*isNamedArg*/false);
2780 
2781   // AMD64-ABI 3.5.7p5: Step 1. Determine whether type may be passed
2782   // in the registers. If not go to step 7.
2783   if (!neededInt && !neededSSE)
2784     return EmitVAArgFromMemory(VAListAddr, Ty, CGF);
2785 
2786   // AMD64-ABI 3.5.7p5: Step 2. Compute num_gp to hold the number of
2787   // general purpose registers needed to pass type and num_fp to hold
2788   // the number of floating point registers needed.
2789 
2790   // AMD64-ABI 3.5.7p5: Step 3. Verify whether arguments fit into
2791   // registers. In the case: l->gp_offset > 48 - num_gp * 8 or
2792   // l->fp_offset > 304 - num_fp * 16 go to step 7.
2793   //
2794   // NOTE: 304 is a typo, there are (6 * 8 + 8 * 16) = 176 bytes of
2795   // register save space).
2796 
2797   llvm::Value *InRegs = nullptr;
2798   llvm::Value *gp_offset_p = nullptr, *gp_offset = nullptr;
2799   llvm::Value *fp_offset_p = nullptr, *fp_offset = nullptr;
2800   if (neededInt) {
2801     gp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "gp_offset_p");
2802     gp_offset = CGF.Builder.CreateLoad(gp_offset_p, "gp_offset");
2803     InRegs = llvm::ConstantInt::get(CGF.Int32Ty, 48 - neededInt * 8);
2804     InRegs = CGF.Builder.CreateICmpULE(gp_offset, InRegs, "fits_in_gp");
2805   }
2806 
2807   if (neededSSE) {
2808     fp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 1, "fp_offset_p");
2809     fp_offset = CGF.Builder.CreateLoad(fp_offset_p, "fp_offset");
2810     llvm::Value *FitsInFP =
2811       llvm::ConstantInt::get(CGF.Int32Ty, 176 - neededSSE * 16);
2812     FitsInFP = CGF.Builder.CreateICmpULE(fp_offset, FitsInFP, "fits_in_fp");
2813     InRegs = InRegs ? CGF.Builder.CreateAnd(InRegs, FitsInFP) : FitsInFP;
2814   }
2815 
2816   llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
2817   llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem");
2818   llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
2819   CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock);
2820 
2821   // Emit code to load the value if it was passed in registers.
2822 
2823   CGF.EmitBlock(InRegBlock);
2824 
2825   // AMD64-ABI 3.5.7p5: Step 4. Fetch type from l->reg_save_area with
2826   // an offset of l->gp_offset and/or l->fp_offset. This may require
2827   // copying to a temporary location in case the parameter is passed
2828   // in different register classes or requires an alignment greater
2829   // than 8 for general purpose registers and 16 for XMM registers.
2830   //
2831   // FIXME: This really results in shameful code when we end up needing to
2832   // collect arguments from different places; often what should result in a
2833   // simple assembling of a structure from scattered addresses has many more
2834   // loads than necessary. Can we clean this up?
2835   llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
2836   llvm::Value *RegAddr =
2837     CGF.Builder.CreateLoad(CGF.Builder.CreateStructGEP(VAListAddr, 3),
2838                            "reg_save_area");
2839   if (neededInt && neededSSE) {
2840     // FIXME: Cleanup.
2841     assert(AI.isDirect() && "Unexpected ABI info for mixed regs");
2842     llvm::StructType *ST = cast<llvm::StructType>(AI.getCoerceToType());
2843     llvm::Value *Tmp = CGF.CreateMemTemp(Ty);
2844     Tmp = CGF.Builder.CreateBitCast(Tmp, ST->getPointerTo());
2845     assert(ST->getNumElements() == 2 && "Unexpected ABI info for mixed regs");
2846     llvm::Type *TyLo = ST->getElementType(0);
2847     llvm::Type *TyHi = ST->getElementType(1);
2848     assert((TyLo->isFPOrFPVectorTy() ^ TyHi->isFPOrFPVectorTy()) &&
2849            "Unexpected ABI info for mixed regs");
2850     llvm::Type *PTyLo = llvm::PointerType::getUnqual(TyLo);
2851     llvm::Type *PTyHi = llvm::PointerType::getUnqual(TyHi);
2852     llvm::Value *GPAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset);
2853     llvm::Value *FPAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset);
2854     llvm::Value *RegLoAddr = TyLo->isFPOrFPVectorTy() ? FPAddr : GPAddr;
2855     llvm::Value *RegHiAddr = TyLo->isFPOrFPVectorTy() ? GPAddr : FPAddr;
2856     llvm::Value *V =
2857       CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegLoAddr, PTyLo));
2858     CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0));
2859     V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegHiAddr, PTyHi));
2860     CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1));
2861 
2862     RegAddr = CGF.Builder.CreateBitCast(Tmp,
2863                                         llvm::PointerType::getUnqual(LTy));
2864   } else if (neededInt) {
2865     RegAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset);
2866     RegAddr = CGF.Builder.CreateBitCast(RegAddr,
2867                                         llvm::PointerType::getUnqual(LTy));
2868 
2869     // Copy to a temporary if necessary to ensure the appropriate alignment.
2870     std::pair<CharUnits, CharUnits> SizeAlign =
2871         CGF.getContext().getTypeInfoInChars(Ty);
2872     uint64_t TySize = SizeAlign.first.getQuantity();
2873     unsigned TyAlign = SizeAlign.second.getQuantity();
2874     if (TyAlign > 8) {
2875       llvm::Value *Tmp = CGF.CreateMemTemp(Ty);
2876       CGF.Builder.CreateMemCpy(Tmp, RegAddr, TySize, 8, false);
2877       RegAddr = Tmp;
2878     }
2879   } else if (neededSSE == 1) {
2880     RegAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset);
2881     RegAddr = CGF.Builder.CreateBitCast(RegAddr,
2882                                         llvm::PointerType::getUnqual(LTy));
2883   } else {
2884     assert(neededSSE == 2 && "Invalid number of needed registers!");
2885     // SSE registers are spaced 16 bytes apart in the register save
2886     // area, we need to collect the two eightbytes together.
2887     llvm::Value *RegAddrLo = CGF.Builder.CreateGEP(RegAddr, fp_offset);
2888     llvm::Value *RegAddrHi = CGF.Builder.CreateConstGEP1_32(RegAddrLo, 16);
2889     llvm::Type *DoubleTy = CGF.DoubleTy;
2890     llvm::Type *DblPtrTy =
2891       llvm::PointerType::getUnqual(DoubleTy);
2892     llvm::StructType *ST = llvm::StructType::get(DoubleTy, DoubleTy, nullptr);
2893     llvm::Value *V, *Tmp = CGF.CreateMemTemp(Ty);
2894     Tmp = CGF.Builder.CreateBitCast(Tmp, ST->getPointerTo());
2895     V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrLo,
2896                                                          DblPtrTy));
2897     CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0));
2898     V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrHi,
2899                                                          DblPtrTy));
2900     CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1));
2901     RegAddr = CGF.Builder.CreateBitCast(Tmp,
2902                                         llvm::PointerType::getUnqual(LTy));
2903   }
2904 
2905   // AMD64-ABI 3.5.7p5: Step 5. Set:
2906   // l->gp_offset = l->gp_offset + num_gp * 8
2907   // l->fp_offset = l->fp_offset + num_fp * 16.
2908   if (neededInt) {
2909     llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededInt * 8);
2910     CGF.Builder.CreateStore(CGF.Builder.CreateAdd(gp_offset, Offset),
2911                             gp_offset_p);
2912   }
2913   if (neededSSE) {
2914     llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededSSE * 16);
2915     CGF.Builder.CreateStore(CGF.Builder.CreateAdd(fp_offset, Offset),
2916                             fp_offset_p);
2917   }
2918   CGF.EmitBranch(ContBlock);
2919 
2920   // Emit code to load the value if it was passed in memory.
2921 
2922   CGF.EmitBlock(InMemBlock);
2923   llvm::Value *MemAddr = EmitVAArgFromMemory(VAListAddr, Ty, CGF);
2924 
2925   // Return the appropriate result.
2926 
2927   CGF.EmitBlock(ContBlock);
2928   llvm::PHINode *ResAddr = CGF.Builder.CreatePHI(RegAddr->getType(), 2,
2929                                                  "vaarg.addr");
2930   ResAddr->addIncoming(RegAddr, InRegBlock);
2931   ResAddr->addIncoming(MemAddr, InMemBlock);
2932   return ResAddr;
2933 }
2934 
classify(QualType Ty,unsigned & FreeSSERegs,bool IsReturnType) const2935 ABIArgInfo WinX86_64ABIInfo::classify(QualType Ty, unsigned &FreeSSERegs,
2936                                       bool IsReturnType) const {
2937 
2938   if (Ty->isVoidType())
2939     return ABIArgInfo::getIgnore();
2940 
2941   if (const EnumType *EnumTy = Ty->getAs<EnumType>())
2942     Ty = EnumTy->getDecl()->getIntegerType();
2943 
2944   TypeInfo Info = getContext().getTypeInfo(Ty);
2945   uint64_t Width = Info.Width;
2946   unsigned Align = getContext().toCharUnitsFromBits(Info.Align).getQuantity();
2947 
2948   const RecordType *RT = Ty->getAs<RecordType>();
2949   if (RT) {
2950     if (!IsReturnType) {
2951       if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI()))
2952         return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
2953     }
2954 
2955     if (RT->getDecl()->hasFlexibleArrayMember())
2956       return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
2957 
2958     // FIXME: mingw-w64-gcc emits 128-bit struct as i128
2959     if (Width == 128 && getTarget().getTriple().isWindowsGNUEnvironment())
2960       return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
2961                                                           Width));
2962   }
2963 
2964   // vectorcall adds the concept of a homogenous vector aggregate, similar to
2965   // other targets.
2966   const Type *Base = nullptr;
2967   uint64_t NumElts = 0;
2968   if (FreeSSERegs && isHomogeneousAggregate(Ty, Base, NumElts)) {
2969     if (FreeSSERegs >= NumElts) {
2970       FreeSSERegs -= NumElts;
2971       if (IsReturnType || Ty->isBuiltinType() || Ty->isVectorType())
2972         return ABIArgInfo::getDirect();
2973       return ABIArgInfo::getExpand();
2974     }
2975     return ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
2976   }
2977 
2978 
2979   if (Ty->isMemberPointerType()) {
2980     // If the member pointer is represented by an LLVM int or ptr, pass it
2981     // directly.
2982     llvm::Type *LLTy = CGT.ConvertType(Ty);
2983     if (LLTy->isPointerTy() || LLTy->isIntegerTy())
2984       return ABIArgInfo::getDirect();
2985   }
2986 
2987   if (RT || Ty->isMemberPointerType()) {
2988     // MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is
2989     // not 1, 2, 4, or 8 bytes, must be passed by reference."
2990     if (Width > 64 || !llvm::isPowerOf2_64(Width))
2991       return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
2992 
2993     // Otherwise, coerce it to a small integer.
2994     return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Width));
2995   }
2996 
2997   // Bool type is always extended to the ABI, other builtin types are not
2998   // extended.
2999   const BuiltinType *BT = Ty->getAs<BuiltinType>();
3000   if (BT && BT->getKind() == BuiltinType::Bool)
3001     return ABIArgInfo::getExtend();
3002 
3003   return ABIArgInfo::getDirect();
3004 }
3005 
computeInfo(CGFunctionInfo & FI) const3006 void WinX86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
3007   bool IsVectorCall =
3008       FI.getCallingConvention() == llvm::CallingConv::X86_VectorCall;
3009 
3010   // We can use up to 4 SSE return registers with vectorcall.
3011   unsigned FreeSSERegs = IsVectorCall ? 4 : 0;
3012   if (!getCXXABI().classifyReturnType(FI))
3013     FI.getReturnInfo() = classify(FI.getReturnType(), FreeSSERegs, true);
3014 
3015   // We can use up to 6 SSE register parameters with vectorcall.
3016   FreeSSERegs = IsVectorCall ? 6 : 0;
3017   for (auto &I : FI.arguments())
3018     I.info = classify(I.type, FreeSSERegs, false);
3019 }
3020 
EmitVAArg(llvm::Value * VAListAddr,QualType Ty,CodeGenFunction & CGF) const3021 llvm::Value *WinX86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
3022                                       CodeGenFunction &CGF) const {
3023   llvm::Type *BPP = CGF.Int8PtrPtrTy;
3024 
3025   CGBuilderTy &Builder = CGF.Builder;
3026   llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
3027                                                        "ap");
3028   llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
3029   llvm::Type *PTy =
3030     llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
3031   llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
3032 
3033   uint64_t Offset =
3034     llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 8);
3035   llvm::Value *NextAddr =
3036     Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset),
3037                       "ap.next");
3038   Builder.CreateStore(NextAddr, VAListAddrAsBPP);
3039 
3040   return AddrTyped;
3041 }
3042 
3043 namespace {
3044 
3045 class NaClX86_64ABIInfo : public ABIInfo {
3046  public:
NaClX86_64ABIInfo(CodeGen::CodeGenTypes & CGT,bool HasAVX)3047   NaClX86_64ABIInfo(CodeGen::CodeGenTypes &CGT, bool HasAVX)
3048       : ABIInfo(CGT), PInfo(CGT), NInfo(CGT, HasAVX) {}
3049   void computeInfo(CGFunctionInfo &FI) const override;
3050   llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
3051                          CodeGenFunction &CGF) const override;
3052  private:
3053   PNaClABIInfo PInfo;  // Used for generating calls with pnaclcall callingconv.
3054   X86_64ABIInfo NInfo; // Used for everything else.
3055 };
3056 
3057 class NaClX86_64TargetCodeGenInfo : public TargetCodeGenInfo  {
3058   bool HasAVX;
3059  public:
NaClX86_64TargetCodeGenInfo(CodeGen::CodeGenTypes & CGT,bool HasAVX)3060    NaClX86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool HasAVX)
3061        : TargetCodeGenInfo(new NaClX86_64ABIInfo(CGT, HasAVX)), HasAVX(HasAVX) {
3062    }
getOpenMPSimdDefaultAlignment(QualType) const3063    unsigned getOpenMPSimdDefaultAlignment(QualType) const override {
3064      return HasAVX ? 32 : 16;
3065    }
3066 };
3067 
3068 }
3069 
computeInfo(CGFunctionInfo & FI) const3070 void NaClX86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
3071   if (FI.getASTCallingConvention() == CC_PnaclCall)
3072     PInfo.computeInfo(FI);
3073   else
3074     NInfo.computeInfo(FI);
3075 }
3076 
EmitVAArg(llvm::Value * VAListAddr,QualType Ty,CodeGenFunction & CGF) const3077 llvm::Value *NaClX86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
3078                                           CodeGenFunction &CGF) const {
3079   // Always use the native convention; calling pnacl-style varargs functions
3080   // is unuspported.
3081   return NInfo.EmitVAArg(VAListAddr, Ty, CGF);
3082 }
3083 
3084 
3085 // PowerPC-32
3086 namespace {
3087 /// PPC32_SVR4_ABIInfo - The 32-bit PowerPC ELF (SVR4) ABI information.
3088 class PPC32_SVR4_ABIInfo : public DefaultABIInfo {
3089 public:
PPC32_SVR4_ABIInfo(CodeGen::CodeGenTypes & CGT)3090   PPC32_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
3091 
3092   llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
3093                          CodeGenFunction &CGF) const override;
3094 };
3095 
3096 class PPC32TargetCodeGenInfo : public TargetCodeGenInfo {
3097 public:
PPC32TargetCodeGenInfo(CodeGenTypes & CGT)3098   PPC32TargetCodeGenInfo(CodeGenTypes &CGT) : TargetCodeGenInfo(new PPC32_SVR4_ABIInfo(CGT)) {}
3099 
getDwarfEHStackPointer(CodeGen::CodeGenModule & M) const3100   int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
3101     // This is recovered from gcc output.
3102     return 1; // r1 is the dedicated stack pointer
3103   }
3104 
3105   bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
3106                                llvm::Value *Address) const override;
3107 
getOpenMPSimdDefaultAlignment(QualType) const3108   unsigned getOpenMPSimdDefaultAlignment(QualType) const override {
3109     return 16; // Natural alignment for Altivec vectors.
3110   }
3111 };
3112 
3113 }
3114 
EmitVAArg(llvm::Value * VAListAddr,QualType Ty,CodeGenFunction & CGF) const3115 llvm::Value *PPC32_SVR4_ABIInfo::EmitVAArg(llvm::Value *VAListAddr,
3116                                            QualType Ty,
3117                                            CodeGenFunction &CGF) const {
3118   if (const ComplexType *CTy = Ty->getAs<ComplexType>()) {
3119     // TODO: Implement this. For now ignore.
3120     (void)CTy;
3121     return nullptr;
3122   }
3123 
3124   bool isI64 = Ty->isIntegerType() && getContext().getTypeSize(Ty) == 64;
3125   bool isInt = Ty->isIntegerType() || Ty->isPointerType() || Ty->isAggregateType();
3126   llvm::Type *CharPtr = CGF.Int8PtrTy;
3127   llvm::Type *CharPtrPtr = CGF.Int8PtrPtrTy;
3128 
3129   CGBuilderTy &Builder = CGF.Builder;
3130   llvm::Value *GPRPtr = Builder.CreateBitCast(VAListAddr, CharPtr, "gprptr");
3131   llvm::Value *GPRPtrAsInt = Builder.CreatePtrToInt(GPRPtr, CGF.Int32Ty);
3132   llvm::Value *FPRPtrAsInt = Builder.CreateAdd(GPRPtrAsInt, Builder.getInt32(1));
3133   llvm::Value *FPRPtr = Builder.CreateIntToPtr(FPRPtrAsInt, CharPtr);
3134   llvm::Value *OverflowAreaPtrAsInt = Builder.CreateAdd(FPRPtrAsInt, Builder.getInt32(3));
3135   llvm::Value *OverflowAreaPtr = Builder.CreateIntToPtr(OverflowAreaPtrAsInt, CharPtrPtr);
3136   llvm::Value *RegsaveAreaPtrAsInt = Builder.CreateAdd(OverflowAreaPtrAsInt, Builder.getInt32(4));
3137   llvm::Value *RegsaveAreaPtr = Builder.CreateIntToPtr(RegsaveAreaPtrAsInt, CharPtrPtr);
3138   llvm::Value *GPR = Builder.CreateLoad(GPRPtr, false, "gpr");
3139   // Align GPR when TY is i64.
3140   if (isI64) {
3141     llvm::Value *GPRAnd = Builder.CreateAnd(GPR, Builder.getInt8(1));
3142     llvm::Value *CC64 = Builder.CreateICmpEQ(GPRAnd, Builder.getInt8(1));
3143     llvm::Value *GPRPlusOne = Builder.CreateAdd(GPR, Builder.getInt8(1));
3144     GPR = Builder.CreateSelect(CC64, GPRPlusOne, GPR);
3145   }
3146   llvm::Value *FPR = Builder.CreateLoad(FPRPtr, false, "fpr");
3147   llvm::Value *OverflowArea = Builder.CreateLoad(OverflowAreaPtr, false, "overflow_area");
3148   llvm::Value *OverflowAreaAsInt = Builder.CreatePtrToInt(OverflowArea, CGF.Int32Ty);
3149   llvm::Value *RegsaveArea = Builder.CreateLoad(RegsaveAreaPtr, false, "regsave_area");
3150   llvm::Value *RegsaveAreaAsInt = Builder.CreatePtrToInt(RegsaveArea, CGF.Int32Ty);
3151 
3152   llvm::Value *CC = Builder.CreateICmpULT(isInt ? GPR : FPR,
3153                                           Builder.getInt8(8), "cond");
3154 
3155   llvm::Value *RegConstant = Builder.CreateMul(isInt ? GPR : FPR,
3156                                                Builder.getInt8(isInt ? 4 : 8));
3157 
3158   llvm::Value *OurReg = Builder.CreateAdd(RegsaveAreaAsInt, Builder.CreateSExt(RegConstant, CGF.Int32Ty));
3159 
3160   if (Ty->isFloatingType())
3161     OurReg = Builder.CreateAdd(OurReg, Builder.getInt32(32));
3162 
3163   llvm::BasicBlock *UsingRegs = CGF.createBasicBlock("using_regs");
3164   llvm::BasicBlock *UsingOverflow = CGF.createBasicBlock("using_overflow");
3165   llvm::BasicBlock *Cont = CGF.createBasicBlock("cont");
3166 
3167   Builder.CreateCondBr(CC, UsingRegs, UsingOverflow);
3168 
3169   CGF.EmitBlock(UsingRegs);
3170 
3171   llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
3172   llvm::Value *Result1 = Builder.CreateIntToPtr(OurReg, PTy);
3173   // Increase the GPR/FPR indexes.
3174   if (isInt) {
3175     GPR = Builder.CreateAdd(GPR, Builder.getInt8(isI64 ? 2 : 1));
3176     Builder.CreateStore(GPR, GPRPtr);
3177   } else {
3178     FPR = Builder.CreateAdd(FPR, Builder.getInt8(1));
3179     Builder.CreateStore(FPR, FPRPtr);
3180   }
3181   CGF.EmitBranch(Cont);
3182 
3183   CGF.EmitBlock(UsingOverflow);
3184 
3185   // Increase the overflow area.
3186   llvm::Value *Result2 = Builder.CreateIntToPtr(OverflowAreaAsInt, PTy);
3187   OverflowAreaAsInt = Builder.CreateAdd(OverflowAreaAsInt, Builder.getInt32(isInt ? 4 : 8));
3188   Builder.CreateStore(Builder.CreateIntToPtr(OverflowAreaAsInt, CharPtr), OverflowAreaPtr);
3189   CGF.EmitBranch(Cont);
3190 
3191   CGF.EmitBlock(Cont);
3192 
3193   llvm::PHINode *Result = CGF.Builder.CreatePHI(PTy, 2, "vaarg.addr");
3194   Result->addIncoming(Result1, UsingRegs);
3195   Result->addIncoming(Result2, UsingOverflow);
3196 
3197   if (Ty->isAggregateType()) {
3198     llvm::Value *AGGPtr = Builder.CreateBitCast(Result, CharPtrPtr, "aggrptr")  ;
3199     return Builder.CreateLoad(AGGPtr, false, "aggr");
3200   }
3201 
3202   return Result;
3203 }
3204 
3205 bool
initDwarfEHRegSizeTable(CodeGen::CodeGenFunction & CGF,llvm::Value * Address) const3206 PPC32TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
3207                                                 llvm::Value *Address) const {
3208   // This is calculated from the LLVM and GCC tables and verified
3209   // against gcc output.  AFAIK all ABIs use the same encoding.
3210 
3211   CodeGen::CGBuilderTy &Builder = CGF.Builder;
3212 
3213   llvm::IntegerType *i8 = CGF.Int8Ty;
3214   llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
3215   llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
3216   llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16);
3217 
3218   // 0-31: r0-31, the 4-byte general-purpose registers
3219   AssignToArrayRange(Builder, Address, Four8, 0, 31);
3220 
3221   // 32-63: fp0-31, the 8-byte floating-point registers
3222   AssignToArrayRange(Builder, Address, Eight8, 32, 63);
3223 
3224   // 64-76 are various 4-byte special-purpose registers:
3225   // 64: mq
3226   // 65: lr
3227   // 66: ctr
3228   // 67: ap
3229   // 68-75 cr0-7
3230   // 76: xer
3231   AssignToArrayRange(Builder, Address, Four8, 64, 76);
3232 
3233   // 77-108: v0-31, the 16-byte vector registers
3234   AssignToArrayRange(Builder, Address, Sixteen8, 77, 108);
3235 
3236   // 109: vrsave
3237   // 110: vscr
3238   // 111: spe_acc
3239   // 112: spefscr
3240   // 113: sfp
3241   AssignToArrayRange(Builder, Address, Four8, 109, 113);
3242 
3243   return false;
3244 }
3245 
3246 // PowerPC-64
3247 
3248 namespace {
3249 /// PPC64_SVR4_ABIInfo - The 64-bit PowerPC ELF (SVR4) ABI information.
3250 class PPC64_SVR4_ABIInfo : public DefaultABIInfo {
3251 public:
3252   enum ABIKind {
3253     ELFv1 = 0,
3254     ELFv2
3255   };
3256 
3257 private:
3258   static const unsigned GPRBits = 64;
3259   ABIKind Kind;
3260 
3261 public:
PPC64_SVR4_ABIInfo(CodeGen::CodeGenTypes & CGT,ABIKind Kind)3262   PPC64_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT, ABIKind Kind)
3263     : DefaultABIInfo(CGT), Kind(Kind) {}
3264 
3265   bool isPromotableTypeForABI(QualType Ty) const;
3266   bool isAlignedParamType(QualType Ty) const;
3267 
3268   ABIArgInfo classifyReturnType(QualType RetTy) const;
3269   ABIArgInfo classifyArgumentType(QualType Ty) const;
3270 
3271   bool isHomogeneousAggregateBaseType(QualType Ty) const override;
3272   bool isHomogeneousAggregateSmallEnough(const Type *Ty,
3273                                          uint64_t Members) const override;
3274 
3275   // TODO: We can add more logic to computeInfo to improve performance.
3276   // Example: For aggregate arguments that fit in a register, we could
3277   // use getDirectInReg (as is done below for structs containing a single
3278   // floating-point value) to avoid pushing them to memory on function
3279   // entry.  This would require changing the logic in PPCISelLowering
3280   // when lowering the parameters in the caller and args in the callee.
computeInfo(CGFunctionInfo & FI) const3281   void computeInfo(CGFunctionInfo &FI) const override {
3282     if (!getCXXABI().classifyReturnType(FI))
3283       FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
3284     for (auto &I : FI.arguments()) {
3285       // We rely on the default argument classification for the most part.
3286       // One exception:  An aggregate containing a single floating-point
3287       // or vector item must be passed in a register if one is available.
3288       const Type *T = isSingleElementStruct(I.type, getContext());
3289       if (T) {
3290         const BuiltinType *BT = T->getAs<BuiltinType>();
3291         if ((T->isVectorType() && getContext().getTypeSize(T) == 128) ||
3292             (BT && BT->isFloatingPoint())) {
3293           QualType QT(T, 0);
3294           I.info = ABIArgInfo::getDirectInReg(CGT.ConvertType(QT));
3295           continue;
3296         }
3297       }
3298       I.info = classifyArgumentType(I.type);
3299     }
3300   }
3301 
3302   llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
3303                          CodeGenFunction &CGF) const override;
3304 };
3305 
3306 class PPC64_SVR4_TargetCodeGenInfo : public TargetCodeGenInfo {
3307 public:
PPC64_SVR4_TargetCodeGenInfo(CodeGenTypes & CGT,PPC64_SVR4_ABIInfo::ABIKind Kind)3308   PPC64_SVR4_TargetCodeGenInfo(CodeGenTypes &CGT,
3309                                PPC64_SVR4_ABIInfo::ABIKind Kind)
3310     : TargetCodeGenInfo(new PPC64_SVR4_ABIInfo(CGT, Kind)) {}
3311 
getDwarfEHStackPointer(CodeGen::CodeGenModule & M) const3312   int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
3313     // This is recovered from gcc output.
3314     return 1; // r1 is the dedicated stack pointer
3315   }
3316 
3317   bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
3318                                llvm::Value *Address) const override;
3319 
getOpenMPSimdDefaultAlignment(QualType) const3320   unsigned getOpenMPSimdDefaultAlignment(QualType) const override {
3321     return 16; // Natural alignment for Altivec and VSX vectors.
3322   }
3323 };
3324 
3325 class PPC64TargetCodeGenInfo : public DefaultTargetCodeGenInfo {
3326 public:
PPC64TargetCodeGenInfo(CodeGenTypes & CGT)3327   PPC64TargetCodeGenInfo(CodeGenTypes &CGT) : DefaultTargetCodeGenInfo(CGT) {}
3328 
getDwarfEHStackPointer(CodeGen::CodeGenModule & M) const3329   int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
3330     // This is recovered from gcc output.
3331     return 1; // r1 is the dedicated stack pointer
3332   }
3333 
3334   bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
3335                                llvm::Value *Address) const override;
3336 
getOpenMPSimdDefaultAlignment(QualType) const3337   unsigned getOpenMPSimdDefaultAlignment(QualType) const override {
3338     return 16; // Natural alignment for Altivec vectors.
3339   }
3340 };
3341 
3342 }
3343 
3344 // Return true if the ABI requires Ty to be passed sign- or zero-
3345 // extended to 64 bits.
3346 bool
isPromotableTypeForABI(QualType Ty) const3347 PPC64_SVR4_ABIInfo::isPromotableTypeForABI(QualType Ty) const {
3348   // Treat an enum type as its underlying type.
3349   if (const EnumType *EnumTy = Ty->getAs<EnumType>())
3350     Ty = EnumTy->getDecl()->getIntegerType();
3351 
3352   // Promotable integer types are required to be promoted by the ABI.
3353   if (Ty->isPromotableIntegerType())
3354     return true;
3355 
3356   // In addition to the usual promotable integer types, we also need to
3357   // extend all 32-bit types, since the ABI requires promotion to 64 bits.
3358   if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
3359     switch (BT->getKind()) {
3360     case BuiltinType::Int:
3361     case BuiltinType::UInt:
3362       return true;
3363     default:
3364       break;
3365     }
3366 
3367   return false;
3368 }
3369 
3370 /// isAlignedParamType - Determine whether a type requires 16-byte
3371 /// alignment in the parameter area.
3372 bool
isAlignedParamType(QualType Ty) const3373 PPC64_SVR4_ABIInfo::isAlignedParamType(QualType Ty) const {
3374   // Complex types are passed just like their elements.
3375   if (const ComplexType *CTy = Ty->getAs<ComplexType>())
3376     Ty = CTy->getElementType();
3377 
3378   // Only vector types of size 16 bytes need alignment (larger types are
3379   // passed via reference, smaller types are not aligned).
3380   if (Ty->isVectorType())
3381     return getContext().getTypeSize(Ty) == 128;
3382 
3383   // For single-element float/vector structs, we consider the whole type
3384   // to have the same alignment requirements as its single element.
3385   const Type *AlignAsType = nullptr;
3386   const Type *EltType = isSingleElementStruct(Ty, getContext());
3387   if (EltType) {
3388     const BuiltinType *BT = EltType->getAs<BuiltinType>();
3389     if ((EltType->isVectorType() &&
3390          getContext().getTypeSize(EltType) == 128) ||
3391         (BT && BT->isFloatingPoint()))
3392       AlignAsType = EltType;
3393   }
3394 
3395   // Likewise for ELFv2 homogeneous aggregates.
3396   const Type *Base = nullptr;
3397   uint64_t Members = 0;
3398   if (!AlignAsType && Kind == ELFv2 &&
3399       isAggregateTypeForABI(Ty) && isHomogeneousAggregate(Ty, Base, Members))
3400     AlignAsType = Base;
3401 
3402   // With special case aggregates, only vector base types need alignment.
3403   if (AlignAsType)
3404     return AlignAsType->isVectorType();
3405 
3406   // Otherwise, we only need alignment for any aggregate type that
3407   // has an alignment requirement of >= 16 bytes.
3408   if (isAggregateTypeForABI(Ty) && getContext().getTypeAlign(Ty) >= 128)
3409     return true;
3410 
3411   return false;
3412 }
3413 
3414 /// isHomogeneousAggregate - Return true if a type is an ELFv2 homogeneous
3415 /// aggregate.  Base is set to the base element type, and Members is set
3416 /// to the number of base elements.
isHomogeneousAggregate(QualType Ty,const Type * & Base,uint64_t & Members) const3417 bool ABIInfo::isHomogeneousAggregate(QualType Ty, const Type *&Base,
3418                                      uint64_t &Members) const {
3419   if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
3420     uint64_t NElements = AT->getSize().getZExtValue();
3421     if (NElements == 0)
3422       return false;
3423     if (!isHomogeneousAggregate(AT->getElementType(), Base, Members))
3424       return false;
3425     Members *= NElements;
3426   } else if (const RecordType *RT = Ty->getAs<RecordType>()) {
3427     const RecordDecl *RD = RT->getDecl();
3428     if (RD->hasFlexibleArrayMember())
3429       return false;
3430 
3431     Members = 0;
3432 
3433     // If this is a C++ record, check the bases first.
3434     if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
3435       for (const auto &I : CXXRD->bases()) {
3436         // Ignore empty records.
3437         if (isEmptyRecord(getContext(), I.getType(), true))
3438           continue;
3439 
3440         uint64_t FldMembers;
3441         if (!isHomogeneousAggregate(I.getType(), Base, FldMembers))
3442           return false;
3443 
3444         Members += FldMembers;
3445       }
3446     }
3447 
3448     for (const auto *FD : RD->fields()) {
3449       // Ignore (non-zero arrays of) empty records.
3450       QualType FT = FD->getType();
3451       while (const ConstantArrayType *AT =
3452              getContext().getAsConstantArrayType(FT)) {
3453         if (AT->getSize().getZExtValue() == 0)
3454           return false;
3455         FT = AT->getElementType();
3456       }
3457       if (isEmptyRecord(getContext(), FT, true))
3458         continue;
3459 
3460       // For compatibility with GCC, ignore empty bitfields in C++ mode.
3461       if (getContext().getLangOpts().CPlusPlus &&
3462           FD->isBitField() && FD->getBitWidthValue(getContext()) == 0)
3463         continue;
3464 
3465       uint64_t FldMembers;
3466       if (!isHomogeneousAggregate(FD->getType(), Base, FldMembers))
3467         return false;
3468 
3469       Members = (RD->isUnion() ?
3470                  std::max(Members, FldMembers) : Members + FldMembers);
3471     }
3472 
3473     if (!Base)
3474       return false;
3475 
3476     // Ensure there is no padding.
3477     if (getContext().getTypeSize(Base) * Members !=
3478         getContext().getTypeSize(Ty))
3479       return false;
3480   } else {
3481     Members = 1;
3482     if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
3483       Members = 2;
3484       Ty = CT->getElementType();
3485     }
3486 
3487     // Most ABIs only support float, double, and some vector type widths.
3488     if (!isHomogeneousAggregateBaseType(Ty))
3489       return false;
3490 
3491     // The base type must be the same for all members.  Types that
3492     // agree in both total size and mode (float vs. vector) are
3493     // treated as being equivalent here.
3494     const Type *TyPtr = Ty.getTypePtr();
3495     if (!Base)
3496       Base = TyPtr;
3497 
3498     if (Base->isVectorType() != TyPtr->isVectorType() ||
3499         getContext().getTypeSize(Base) != getContext().getTypeSize(TyPtr))
3500       return false;
3501   }
3502   return Members > 0 && isHomogeneousAggregateSmallEnough(Base, Members);
3503 }
3504 
isHomogeneousAggregateBaseType(QualType Ty) const3505 bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
3506   // Homogeneous aggregates for ELFv2 must have base types of float,
3507   // double, long double, or 128-bit vectors.
3508   if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
3509     if (BT->getKind() == BuiltinType::Float ||
3510         BT->getKind() == BuiltinType::Double ||
3511         BT->getKind() == BuiltinType::LongDouble)
3512       return true;
3513   }
3514   if (const VectorType *VT = Ty->getAs<VectorType>()) {
3515     if (getContext().getTypeSize(VT) == 128)
3516       return true;
3517   }
3518   return false;
3519 }
3520 
isHomogeneousAggregateSmallEnough(const Type * Base,uint64_t Members) const3521 bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateSmallEnough(
3522     const Type *Base, uint64_t Members) const {
3523   // Vector types require one register, floating point types require one
3524   // or two registers depending on their size.
3525   uint32_t NumRegs =
3526       Base->isVectorType() ? 1 : (getContext().getTypeSize(Base) + 63) / 64;
3527 
3528   // Homogeneous Aggregates may occupy at most 8 registers.
3529   return Members * NumRegs <= 8;
3530 }
3531 
3532 ABIArgInfo
classifyArgumentType(QualType Ty) const3533 PPC64_SVR4_ABIInfo::classifyArgumentType(QualType Ty) const {
3534   Ty = useFirstFieldIfTransparentUnion(Ty);
3535 
3536   if (Ty->isAnyComplexType())
3537     return ABIArgInfo::getDirect();
3538 
3539   // Non-Altivec vector types are passed in GPRs (smaller than 16 bytes)
3540   // or via reference (larger than 16 bytes).
3541   if (Ty->isVectorType()) {
3542     uint64_t Size = getContext().getTypeSize(Ty);
3543     if (Size > 128)
3544       return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
3545     else if (Size < 128) {
3546       llvm::Type *CoerceTy = llvm::IntegerType::get(getVMContext(), Size);
3547       return ABIArgInfo::getDirect(CoerceTy);
3548     }
3549   }
3550 
3551   if (isAggregateTypeForABI(Ty)) {
3552     if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
3553       return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
3554 
3555     uint64_t ABIAlign = isAlignedParamType(Ty)? 16 : 8;
3556     uint64_t TyAlign = getContext().getTypeAlign(Ty) / 8;
3557 
3558     // ELFv2 homogeneous aggregates are passed as array types.
3559     const Type *Base = nullptr;
3560     uint64_t Members = 0;
3561     if (Kind == ELFv2 &&
3562         isHomogeneousAggregate(Ty, Base, Members)) {
3563       llvm::Type *BaseTy = CGT.ConvertType(QualType(Base, 0));
3564       llvm::Type *CoerceTy = llvm::ArrayType::get(BaseTy, Members);
3565       return ABIArgInfo::getDirect(CoerceTy);
3566     }
3567 
3568     // If an aggregate may end up fully in registers, we do not
3569     // use the ByVal method, but pass the aggregate as array.
3570     // This is usually beneficial since we avoid forcing the
3571     // back-end to store the argument to memory.
3572     uint64_t Bits = getContext().getTypeSize(Ty);
3573     if (Bits > 0 && Bits <= 8 * GPRBits) {
3574       llvm::Type *CoerceTy;
3575 
3576       // Types up to 8 bytes are passed as integer type (which will be
3577       // properly aligned in the argument save area doubleword).
3578       if (Bits <= GPRBits)
3579         CoerceTy = llvm::IntegerType::get(getVMContext(),
3580                                           llvm::RoundUpToAlignment(Bits, 8));
3581       // Larger types are passed as arrays, with the base type selected
3582       // according to the required alignment in the save area.
3583       else {
3584         uint64_t RegBits = ABIAlign * 8;
3585         uint64_t NumRegs = llvm::RoundUpToAlignment(Bits, RegBits) / RegBits;
3586         llvm::Type *RegTy = llvm::IntegerType::get(getVMContext(), RegBits);
3587         CoerceTy = llvm::ArrayType::get(RegTy, NumRegs);
3588       }
3589 
3590       return ABIArgInfo::getDirect(CoerceTy);
3591     }
3592 
3593     // All other aggregates are passed ByVal.
3594     return ABIArgInfo::getIndirect(ABIAlign, /*ByVal=*/true,
3595                                    /*Realign=*/TyAlign > ABIAlign);
3596   }
3597 
3598   return (isPromotableTypeForABI(Ty) ?
3599           ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
3600 }
3601 
3602 ABIArgInfo
classifyReturnType(QualType RetTy) const3603 PPC64_SVR4_ABIInfo::classifyReturnType(QualType RetTy) const {
3604   if (RetTy->isVoidType())
3605     return ABIArgInfo::getIgnore();
3606 
3607   if (RetTy->isAnyComplexType())
3608     return ABIArgInfo::getDirect();
3609 
3610   // Non-Altivec vector types are returned in GPRs (smaller than 16 bytes)
3611   // or via reference (larger than 16 bytes).
3612   if (RetTy->isVectorType()) {
3613     uint64_t Size = getContext().getTypeSize(RetTy);
3614     if (Size > 128)
3615       return ABIArgInfo::getIndirect(0);
3616     else if (Size < 128) {
3617       llvm::Type *CoerceTy = llvm::IntegerType::get(getVMContext(), Size);
3618       return ABIArgInfo::getDirect(CoerceTy);
3619     }
3620   }
3621 
3622   if (isAggregateTypeForABI(RetTy)) {
3623     // ELFv2 homogeneous aggregates are returned as array types.
3624     const Type *Base = nullptr;
3625     uint64_t Members = 0;
3626     if (Kind == ELFv2 &&
3627         isHomogeneousAggregate(RetTy, Base, Members)) {
3628       llvm::Type *BaseTy = CGT.ConvertType(QualType(Base, 0));
3629       llvm::Type *CoerceTy = llvm::ArrayType::get(BaseTy, Members);
3630       return ABIArgInfo::getDirect(CoerceTy);
3631     }
3632 
3633     // ELFv2 small aggregates are returned in up to two registers.
3634     uint64_t Bits = getContext().getTypeSize(RetTy);
3635     if (Kind == ELFv2 && Bits <= 2 * GPRBits) {
3636       if (Bits == 0)
3637         return ABIArgInfo::getIgnore();
3638 
3639       llvm::Type *CoerceTy;
3640       if (Bits > GPRBits) {
3641         CoerceTy = llvm::IntegerType::get(getVMContext(), GPRBits);
3642         CoerceTy = llvm::StructType::get(CoerceTy, CoerceTy, nullptr);
3643       } else
3644         CoerceTy = llvm::IntegerType::get(getVMContext(),
3645                                           llvm::RoundUpToAlignment(Bits, 8));
3646       return ABIArgInfo::getDirect(CoerceTy);
3647     }
3648 
3649     // All other aggregates are returned indirectly.
3650     return ABIArgInfo::getIndirect(0);
3651   }
3652 
3653   return (isPromotableTypeForABI(RetTy) ?
3654           ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
3655 }
3656 
3657 // Based on ARMABIInfo::EmitVAArg, adjusted for 64-bit machine.
EmitVAArg(llvm::Value * VAListAddr,QualType Ty,CodeGenFunction & CGF) const3658 llvm::Value *PPC64_SVR4_ABIInfo::EmitVAArg(llvm::Value *VAListAddr,
3659                                            QualType Ty,
3660                                            CodeGenFunction &CGF) const {
3661   llvm::Type *BP = CGF.Int8PtrTy;
3662   llvm::Type *BPP = CGF.Int8PtrPtrTy;
3663 
3664   CGBuilderTy &Builder = CGF.Builder;
3665   llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap");
3666   llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
3667 
3668   // Handle types that require 16-byte alignment in the parameter save area.
3669   if (isAlignedParamType(Ty)) {
3670     llvm::Value *AddrAsInt = Builder.CreatePtrToInt(Addr, CGF.Int64Ty);
3671     AddrAsInt = Builder.CreateAdd(AddrAsInt, Builder.getInt64(15));
3672     AddrAsInt = Builder.CreateAnd(AddrAsInt, Builder.getInt64(-16));
3673     Addr = Builder.CreateIntToPtr(AddrAsInt, BP, "ap.align");
3674   }
3675 
3676   // Update the va_list pointer.  The pointer should be bumped by the
3677   // size of the object.  We can trust getTypeSize() except for a complex
3678   // type whose base type is smaller than a doubleword.  For these, the
3679   // size of the object is 16 bytes; see below for further explanation.
3680   unsigned SizeInBytes = CGF.getContext().getTypeSize(Ty) / 8;
3681   QualType BaseTy;
3682   unsigned CplxBaseSize = 0;
3683 
3684   if (const ComplexType *CTy = Ty->getAs<ComplexType>()) {
3685     BaseTy = CTy->getElementType();
3686     CplxBaseSize = CGF.getContext().getTypeSize(BaseTy) / 8;
3687     if (CplxBaseSize < 8)
3688       SizeInBytes = 16;
3689   }
3690 
3691   unsigned Offset = llvm::RoundUpToAlignment(SizeInBytes, 8);
3692   llvm::Value *NextAddr =
3693     Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int64Ty, Offset),
3694                       "ap.next");
3695   Builder.CreateStore(NextAddr, VAListAddrAsBPP);
3696 
3697   // If we have a complex type and the base type is smaller than 8 bytes,
3698   // the ABI calls for the real and imaginary parts to be right-adjusted
3699   // in separate doublewords.  However, Clang expects us to produce a
3700   // pointer to a structure with the two parts packed tightly.  So generate
3701   // loads of the real and imaginary parts relative to the va_list pointer,
3702   // and store them to a temporary structure.
3703   if (CplxBaseSize && CplxBaseSize < 8) {
3704     llvm::Value *RealAddr = Builder.CreatePtrToInt(Addr, CGF.Int64Ty);
3705     llvm::Value *ImagAddr = RealAddr;
3706     if (CGF.CGM.getDataLayout().isBigEndian()) {
3707       RealAddr = Builder.CreateAdd(RealAddr, Builder.getInt64(8 - CplxBaseSize));
3708       ImagAddr = Builder.CreateAdd(ImagAddr, Builder.getInt64(16 - CplxBaseSize));
3709     } else {
3710       ImagAddr = Builder.CreateAdd(ImagAddr, Builder.getInt64(8));
3711     }
3712     llvm::Type *PBaseTy = llvm::PointerType::getUnqual(CGF.ConvertType(BaseTy));
3713     RealAddr = Builder.CreateIntToPtr(RealAddr, PBaseTy);
3714     ImagAddr = Builder.CreateIntToPtr(ImagAddr, PBaseTy);
3715     llvm::Value *Real = Builder.CreateLoad(RealAddr, false, ".vareal");
3716     llvm::Value *Imag = Builder.CreateLoad(ImagAddr, false, ".vaimag");
3717     llvm::Value *Ptr = CGF.CreateTempAlloca(CGT.ConvertTypeForMem(Ty),
3718                                             "vacplx");
3719     llvm::Value *RealPtr = Builder.CreateStructGEP(Ptr, 0, ".real");
3720     llvm::Value *ImagPtr = Builder.CreateStructGEP(Ptr, 1, ".imag");
3721     Builder.CreateStore(Real, RealPtr, false);
3722     Builder.CreateStore(Imag, ImagPtr, false);
3723     return Ptr;
3724   }
3725 
3726   // If the argument is smaller than 8 bytes, it is right-adjusted in
3727   // its doubleword slot.  Adjust the pointer to pick it up from the
3728   // correct offset.
3729   if (SizeInBytes < 8 && CGF.CGM.getDataLayout().isBigEndian()) {
3730     llvm::Value *AddrAsInt = Builder.CreatePtrToInt(Addr, CGF.Int64Ty);
3731     AddrAsInt = Builder.CreateAdd(AddrAsInt, Builder.getInt64(8 - SizeInBytes));
3732     Addr = Builder.CreateIntToPtr(AddrAsInt, BP);
3733   }
3734 
3735   llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
3736   return Builder.CreateBitCast(Addr, PTy);
3737 }
3738 
3739 static bool
PPC64_initDwarfEHRegSizeTable(CodeGen::CodeGenFunction & CGF,llvm::Value * Address)3740 PPC64_initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
3741                               llvm::Value *Address) {
3742   // This is calculated from the LLVM and GCC tables and verified
3743   // against gcc output.  AFAIK all ABIs use the same encoding.
3744 
3745   CodeGen::CGBuilderTy &Builder = CGF.Builder;
3746 
3747   llvm::IntegerType *i8 = CGF.Int8Ty;
3748   llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
3749   llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
3750   llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16);
3751 
3752   // 0-31: r0-31, the 8-byte general-purpose registers
3753   AssignToArrayRange(Builder, Address, Eight8, 0, 31);
3754 
3755   // 32-63: fp0-31, the 8-byte floating-point registers
3756   AssignToArrayRange(Builder, Address, Eight8, 32, 63);
3757 
3758   // 64-76 are various 4-byte special-purpose registers:
3759   // 64: mq
3760   // 65: lr
3761   // 66: ctr
3762   // 67: ap
3763   // 68-75 cr0-7
3764   // 76: xer
3765   AssignToArrayRange(Builder, Address, Four8, 64, 76);
3766 
3767   // 77-108: v0-31, the 16-byte vector registers
3768   AssignToArrayRange(Builder, Address, Sixteen8, 77, 108);
3769 
3770   // 109: vrsave
3771   // 110: vscr
3772   // 111: spe_acc
3773   // 112: spefscr
3774   // 113: sfp
3775   AssignToArrayRange(Builder, Address, Four8, 109, 113);
3776 
3777   return false;
3778 }
3779 
3780 bool
initDwarfEHRegSizeTable(CodeGen::CodeGenFunction & CGF,llvm::Value * Address) const3781 PPC64_SVR4_TargetCodeGenInfo::initDwarfEHRegSizeTable(
3782   CodeGen::CodeGenFunction &CGF,
3783   llvm::Value *Address) const {
3784 
3785   return PPC64_initDwarfEHRegSizeTable(CGF, Address);
3786 }
3787 
3788 bool
initDwarfEHRegSizeTable(CodeGen::CodeGenFunction & CGF,llvm::Value * Address) const3789 PPC64TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
3790                                                 llvm::Value *Address) const {
3791 
3792   return PPC64_initDwarfEHRegSizeTable(CGF, Address);
3793 }
3794 
3795 //===----------------------------------------------------------------------===//
3796 // AArch64 ABI Implementation
3797 //===----------------------------------------------------------------------===//
3798 
3799 namespace {
3800 
3801 class AArch64ABIInfo : public ABIInfo {
3802 public:
3803   enum ABIKind {
3804     AAPCS = 0,
3805     DarwinPCS
3806   };
3807 
3808 private:
3809   ABIKind Kind;
3810 
3811 public:
AArch64ABIInfo(CodeGenTypes & CGT,ABIKind Kind)3812   AArch64ABIInfo(CodeGenTypes &CGT, ABIKind Kind) : ABIInfo(CGT), Kind(Kind) {}
3813 
3814 private:
getABIKind() const3815   ABIKind getABIKind() const { return Kind; }
isDarwinPCS() const3816   bool isDarwinPCS() const { return Kind == DarwinPCS; }
3817 
3818   ABIArgInfo classifyReturnType(QualType RetTy) const;
3819   ABIArgInfo classifyArgumentType(QualType RetTy) const;
3820   bool isHomogeneousAggregateBaseType(QualType Ty) const override;
3821   bool isHomogeneousAggregateSmallEnough(const Type *Ty,
3822                                          uint64_t Members) const override;
3823 
3824   bool isIllegalVectorType(QualType Ty) const;
3825 
computeInfo(CGFunctionInfo & FI) const3826   void computeInfo(CGFunctionInfo &FI) const override {
3827     if (!getCXXABI().classifyReturnType(FI))
3828       FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
3829 
3830     for (auto &it : FI.arguments())
3831       it.info = classifyArgumentType(it.type);
3832   }
3833 
3834   llvm::Value *EmitDarwinVAArg(llvm::Value *VAListAddr, QualType Ty,
3835                                CodeGenFunction &CGF) const;
3836 
3837   llvm::Value *EmitAAPCSVAArg(llvm::Value *VAListAddr, QualType Ty,
3838                               CodeGenFunction &CGF) const;
3839 
EmitVAArg(llvm::Value * VAListAddr,QualType Ty,CodeGenFunction & CGF) const3840   virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
3841                                  CodeGenFunction &CGF) const override {
3842     return isDarwinPCS() ? EmitDarwinVAArg(VAListAddr, Ty, CGF)
3843                          : EmitAAPCSVAArg(VAListAddr, Ty, CGF);
3844   }
3845 };
3846 
3847 class AArch64TargetCodeGenInfo : public TargetCodeGenInfo {
3848 public:
AArch64TargetCodeGenInfo(CodeGenTypes & CGT,AArch64ABIInfo::ABIKind Kind)3849   AArch64TargetCodeGenInfo(CodeGenTypes &CGT, AArch64ABIInfo::ABIKind Kind)
3850       : TargetCodeGenInfo(new AArch64ABIInfo(CGT, Kind)) {}
3851 
getARCRetainAutoreleasedReturnValueMarker() const3852   StringRef getARCRetainAutoreleasedReturnValueMarker() const {
3853     return "mov\tfp, fp\t\t; marker for objc_retainAutoreleaseReturnValue";
3854   }
3855 
getDwarfEHStackPointer(CodeGen::CodeGenModule & M) const3856   int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const { return 31; }
3857 
doesReturnSlotInterfereWithArgs() const3858   virtual bool doesReturnSlotInterfereWithArgs() const { return false; }
3859 };
3860 }
3861 
classifyArgumentType(QualType Ty) const3862 ABIArgInfo AArch64ABIInfo::classifyArgumentType(QualType Ty) const {
3863   Ty = useFirstFieldIfTransparentUnion(Ty);
3864 
3865   // Handle illegal vector types here.
3866   if (isIllegalVectorType(Ty)) {
3867     uint64_t Size = getContext().getTypeSize(Ty);
3868     if (Size <= 32) {
3869       llvm::Type *ResType = llvm::Type::getInt32Ty(getVMContext());
3870       return ABIArgInfo::getDirect(ResType);
3871     }
3872     if (Size == 64) {
3873       llvm::Type *ResType =
3874           llvm::VectorType::get(llvm::Type::getInt32Ty(getVMContext()), 2);
3875       return ABIArgInfo::getDirect(ResType);
3876     }
3877     if (Size == 128) {
3878       llvm::Type *ResType =
3879           llvm::VectorType::get(llvm::Type::getInt32Ty(getVMContext()), 4);
3880       return ABIArgInfo::getDirect(ResType);
3881     }
3882     return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
3883   }
3884 
3885   if (!isAggregateTypeForABI(Ty)) {
3886     // Treat an enum type as its underlying type.
3887     if (const EnumType *EnumTy = Ty->getAs<EnumType>())
3888       Ty = EnumTy->getDecl()->getIntegerType();
3889 
3890     return (Ty->isPromotableIntegerType() && isDarwinPCS()
3891                 ? ABIArgInfo::getExtend()
3892                 : ABIArgInfo::getDirect());
3893   }
3894 
3895   // Structures with either a non-trivial destructor or a non-trivial
3896   // copy constructor are always indirect.
3897   if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
3898     return ABIArgInfo::getIndirect(0, /*ByVal=*/RAA ==
3899                                    CGCXXABI::RAA_DirectInMemory);
3900   }
3901 
3902   // Empty records are always ignored on Darwin, but actually passed in C++ mode
3903   // elsewhere for GNU compatibility.
3904   if (isEmptyRecord(getContext(), Ty, true)) {
3905     if (!getContext().getLangOpts().CPlusPlus || isDarwinPCS())
3906       return ABIArgInfo::getIgnore();
3907 
3908     return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
3909   }
3910 
3911   // Homogeneous Floating-point Aggregates (HFAs) need to be expanded.
3912   const Type *Base = nullptr;
3913   uint64_t Members = 0;
3914   if (isHomogeneousAggregate(Ty, Base, Members)) {
3915     return ABIArgInfo::getDirect(
3916         llvm::ArrayType::get(CGT.ConvertType(QualType(Base, 0)), Members));
3917   }
3918 
3919   // Aggregates <= 16 bytes are passed directly in registers or on the stack.
3920   uint64_t Size = getContext().getTypeSize(Ty);
3921   if (Size <= 128) {
3922     unsigned Alignment = getContext().getTypeAlign(Ty);
3923     Size = 64 * ((Size + 63) / 64); // round up to multiple of 8 bytes
3924 
3925     // We use a pair of i64 for 16-byte aggregate with 8-byte alignment.
3926     // For aggregates with 16-byte alignment, we use i128.
3927     if (Alignment < 128 && Size == 128) {
3928       llvm::Type *BaseTy = llvm::Type::getInt64Ty(getVMContext());
3929       return ABIArgInfo::getDirect(llvm::ArrayType::get(BaseTy, Size / 64));
3930     }
3931     return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Size));
3932   }
3933 
3934   return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
3935 }
3936 
classifyReturnType(QualType RetTy) const3937 ABIArgInfo AArch64ABIInfo::classifyReturnType(QualType RetTy) const {
3938   if (RetTy->isVoidType())
3939     return ABIArgInfo::getIgnore();
3940 
3941   // Large vector types should be returned via memory.
3942   if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 128)
3943     return ABIArgInfo::getIndirect(0);
3944 
3945   if (!isAggregateTypeForABI(RetTy)) {
3946     // Treat an enum type as its underlying type.
3947     if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
3948       RetTy = EnumTy->getDecl()->getIntegerType();
3949 
3950     return (RetTy->isPromotableIntegerType() && isDarwinPCS()
3951                 ? ABIArgInfo::getExtend()
3952                 : ABIArgInfo::getDirect());
3953   }
3954 
3955   if (isEmptyRecord(getContext(), RetTy, true))
3956     return ABIArgInfo::getIgnore();
3957 
3958   const Type *Base = nullptr;
3959   uint64_t Members = 0;
3960   if (isHomogeneousAggregate(RetTy, Base, Members))
3961     // Homogeneous Floating-point Aggregates (HFAs) are returned directly.
3962     return ABIArgInfo::getDirect();
3963 
3964   // Aggregates <= 16 bytes are returned directly in registers or on the stack.
3965   uint64_t Size = getContext().getTypeSize(RetTy);
3966   if (Size <= 128) {
3967     Size = 64 * ((Size + 63) / 64); // round up to multiple of 8 bytes
3968     return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Size));
3969   }
3970 
3971   return ABIArgInfo::getIndirect(0);
3972 }
3973 
3974 /// isIllegalVectorType - check whether the vector type is legal for AArch64.
isIllegalVectorType(QualType Ty) const3975 bool AArch64ABIInfo::isIllegalVectorType(QualType Ty) const {
3976   if (const VectorType *VT = Ty->getAs<VectorType>()) {
3977     // Check whether VT is legal.
3978     unsigned NumElements = VT->getNumElements();
3979     uint64_t Size = getContext().getTypeSize(VT);
3980     // NumElements should be power of 2 between 1 and 16.
3981     if ((NumElements & (NumElements - 1)) != 0 || NumElements > 16)
3982       return true;
3983     return Size != 64 && (Size != 128 || NumElements == 1);
3984   }
3985   return false;
3986 }
3987 
isHomogeneousAggregateBaseType(QualType Ty) const3988 bool AArch64ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
3989   // Homogeneous aggregates for AAPCS64 must have base types of a floating
3990   // point type or a short-vector type. This is the same as the 32-bit ABI,
3991   // but with the difference that any floating-point type is allowed,
3992   // including __fp16.
3993   if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
3994     if (BT->isFloatingPoint())
3995       return true;
3996   } else if (const VectorType *VT = Ty->getAs<VectorType>()) {
3997     unsigned VecSize = getContext().getTypeSize(VT);
3998     if (VecSize == 64 || VecSize == 128)
3999       return true;
4000   }
4001   return false;
4002 }
4003 
isHomogeneousAggregateSmallEnough(const Type * Base,uint64_t Members) const4004 bool AArch64ABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base,
4005                                                        uint64_t Members) const {
4006   return Members <= 4;
4007 }
4008 
EmitAAPCSVAArg(llvm::Value * VAListAddr,QualType Ty,CodeGenFunction & CGF) const4009 llvm::Value *AArch64ABIInfo::EmitAAPCSVAArg(llvm::Value *VAListAddr,
4010                                             QualType Ty,
4011                                             CodeGenFunction &CGF) const {
4012   ABIArgInfo AI = classifyArgumentType(Ty);
4013   bool IsIndirect = AI.isIndirect();
4014 
4015   llvm::Type *BaseTy = CGF.ConvertType(Ty);
4016   if (IsIndirect)
4017     BaseTy = llvm::PointerType::getUnqual(BaseTy);
4018   else if (AI.getCoerceToType())
4019     BaseTy = AI.getCoerceToType();
4020 
4021   unsigned NumRegs = 1;
4022   if (llvm::ArrayType *ArrTy = dyn_cast<llvm::ArrayType>(BaseTy)) {
4023     BaseTy = ArrTy->getElementType();
4024     NumRegs = ArrTy->getNumElements();
4025   }
4026   bool IsFPR = BaseTy->isFloatingPointTy() || BaseTy->isVectorTy();
4027 
4028   // The AArch64 va_list type and handling is specified in the Procedure Call
4029   // Standard, section B.4:
4030   //
4031   // struct {
4032   //   void *__stack;
4033   //   void *__gr_top;
4034   //   void *__vr_top;
4035   //   int __gr_offs;
4036   //   int __vr_offs;
4037   // };
4038 
4039   llvm::BasicBlock *MaybeRegBlock = CGF.createBasicBlock("vaarg.maybe_reg");
4040   llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
4041   llvm::BasicBlock *OnStackBlock = CGF.createBasicBlock("vaarg.on_stack");
4042   llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
4043   auto &Ctx = CGF.getContext();
4044 
4045   llvm::Value *reg_offs_p = nullptr, *reg_offs = nullptr;
4046   int reg_top_index;
4047   int RegSize = IsIndirect ? 8 : getContext().getTypeSize(Ty) / 8;
4048   if (!IsFPR) {
4049     // 3 is the field number of __gr_offs
4050     reg_offs_p = CGF.Builder.CreateStructGEP(VAListAddr, 3, "gr_offs_p");
4051     reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "gr_offs");
4052     reg_top_index = 1; // field number for __gr_top
4053     RegSize = llvm::RoundUpToAlignment(RegSize, 8);
4054   } else {
4055     // 4 is the field number of __vr_offs.
4056     reg_offs_p = CGF.Builder.CreateStructGEP(VAListAddr, 4, "vr_offs_p");
4057     reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "vr_offs");
4058     reg_top_index = 2; // field number for __vr_top
4059     RegSize = 16 * NumRegs;
4060   }
4061 
4062   //=======================================
4063   // Find out where argument was passed
4064   //=======================================
4065 
4066   // If reg_offs >= 0 we're already using the stack for this type of
4067   // argument. We don't want to keep updating reg_offs (in case it overflows,
4068   // though anyone passing 2GB of arguments, each at most 16 bytes, deserves
4069   // whatever they get).
4070   llvm::Value *UsingStack = nullptr;
4071   UsingStack = CGF.Builder.CreateICmpSGE(
4072       reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, 0));
4073 
4074   CGF.Builder.CreateCondBr(UsingStack, OnStackBlock, MaybeRegBlock);
4075 
4076   // Otherwise, at least some kind of argument could go in these registers, the
4077   // question is whether this particular type is too big.
4078   CGF.EmitBlock(MaybeRegBlock);
4079 
4080   // Integer arguments may need to correct register alignment (for example a
4081   // "struct { __int128 a; };" gets passed in x_2N, x_{2N+1}). In this case we
4082   // align __gr_offs to calculate the potential address.
4083   if (!IsFPR && !IsIndirect && Ctx.getTypeAlign(Ty) > 64) {
4084     int Align = Ctx.getTypeAlign(Ty) / 8;
4085 
4086     reg_offs = CGF.Builder.CreateAdd(
4087         reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, Align - 1),
4088         "align_regoffs");
4089     reg_offs = CGF.Builder.CreateAnd(
4090         reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, -Align),
4091         "aligned_regoffs");
4092   }
4093 
4094   // Update the gr_offs/vr_offs pointer for next call to va_arg on this va_list.
4095   llvm::Value *NewOffset = nullptr;
4096   NewOffset = CGF.Builder.CreateAdd(
4097       reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, RegSize), "new_reg_offs");
4098   CGF.Builder.CreateStore(NewOffset, reg_offs_p);
4099 
4100   // Now we're in a position to decide whether this argument really was in
4101   // registers or not.
4102   llvm::Value *InRegs = nullptr;
4103   InRegs = CGF.Builder.CreateICmpSLE(
4104       NewOffset, llvm::ConstantInt::get(CGF.Int32Ty, 0), "inreg");
4105 
4106   CGF.Builder.CreateCondBr(InRegs, InRegBlock, OnStackBlock);
4107 
4108   //=======================================
4109   // Argument was in registers
4110   //=======================================
4111 
4112   // Now we emit the code for if the argument was originally passed in
4113   // registers. First start the appropriate block:
4114   CGF.EmitBlock(InRegBlock);
4115 
4116   llvm::Value *reg_top_p = nullptr, *reg_top = nullptr;
4117   reg_top_p =
4118       CGF.Builder.CreateStructGEP(VAListAddr, reg_top_index, "reg_top_p");
4119   reg_top = CGF.Builder.CreateLoad(reg_top_p, "reg_top");
4120   llvm::Value *BaseAddr = CGF.Builder.CreateGEP(reg_top, reg_offs);
4121   llvm::Value *RegAddr = nullptr;
4122   llvm::Type *MemTy = llvm::PointerType::getUnqual(CGF.ConvertTypeForMem(Ty));
4123 
4124   if (IsIndirect) {
4125     // If it's been passed indirectly (actually a struct), whatever we find from
4126     // stored registers or on the stack will actually be a struct **.
4127     MemTy = llvm::PointerType::getUnqual(MemTy);
4128   }
4129 
4130   const Type *Base = nullptr;
4131   uint64_t NumMembers = 0;
4132   bool IsHFA = isHomogeneousAggregate(Ty, Base, NumMembers);
4133   if (IsHFA && NumMembers > 1) {
4134     // Homogeneous aggregates passed in registers will have their elements split
4135     // and stored 16-bytes apart regardless of size (they're notionally in qN,
4136     // qN+1, ...). We reload and store into a temporary local variable
4137     // contiguously.
4138     assert(!IsIndirect && "Homogeneous aggregates should be passed directly");
4139     llvm::Type *BaseTy = CGF.ConvertType(QualType(Base, 0));
4140     llvm::Type *HFATy = llvm::ArrayType::get(BaseTy, NumMembers);
4141     llvm::Value *Tmp = CGF.CreateTempAlloca(HFATy);
4142     int Offset = 0;
4143 
4144     if (CGF.CGM.getDataLayout().isBigEndian() && Ctx.getTypeSize(Base) < 128)
4145       Offset = 16 - Ctx.getTypeSize(Base) / 8;
4146     for (unsigned i = 0; i < NumMembers; ++i) {
4147       llvm::Value *BaseOffset =
4148           llvm::ConstantInt::get(CGF.Int32Ty, 16 * i + Offset);
4149       llvm::Value *LoadAddr = CGF.Builder.CreateGEP(BaseAddr, BaseOffset);
4150       LoadAddr = CGF.Builder.CreateBitCast(
4151           LoadAddr, llvm::PointerType::getUnqual(BaseTy));
4152       llvm::Value *StoreAddr = CGF.Builder.CreateStructGEP(Tmp, i);
4153 
4154       llvm::Value *Elem = CGF.Builder.CreateLoad(LoadAddr);
4155       CGF.Builder.CreateStore(Elem, StoreAddr);
4156     }
4157 
4158     RegAddr = CGF.Builder.CreateBitCast(Tmp, MemTy);
4159   } else {
4160     // Otherwise the object is contiguous in memory
4161     unsigned BeAlign = reg_top_index == 2 ? 16 : 8;
4162     if (CGF.CGM.getDataLayout().isBigEndian() &&
4163         (IsHFA || !isAggregateTypeForABI(Ty)) &&
4164         Ctx.getTypeSize(Ty) < (BeAlign * 8)) {
4165       int Offset = BeAlign - Ctx.getTypeSize(Ty) / 8;
4166       BaseAddr = CGF.Builder.CreatePtrToInt(BaseAddr, CGF.Int64Ty);
4167 
4168       BaseAddr = CGF.Builder.CreateAdd(
4169           BaseAddr, llvm::ConstantInt::get(CGF.Int64Ty, Offset), "align_be");
4170 
4171       BaseAddr = CGF.Builder.CreateIntToPtr(BaseAddr, CGF.Int8PtrTy);
4172     }
4173 
4174     RegAddr = CGF.Builder.CreateBitCast(BaseAddr, MemTy);
4175   }
4176 
4177   CGF.EmitBranch(ContBlock);
4178 
4179   //=======================================
4180   // Argument was on the stack
4181   //=======================================
4182   CGF.EmitBlock(OnStackBlock);
4183 
4184   llvm::Value *stack_p = nullptr, *OnStackAddr = nullptr;
4185   stack_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "stack_p");
4186   OnStackAddr = CGF.Builder.CreateLoad(stack_p, "stack");
4187 
4188   // Again, stack arguments may need realigmnent. In this case both integer and
4189   // floating-point ones might be affected.
4190   if (!IsIndirect && Ctx.getTypeAlign(Ty) > 64) {
4191     int Align = Ctx.getTypeAlign(Ty) / 8;
4192 
4193     OnStackAddr = CGF.Builder.CreatePtrToInt(OnStackAddr, CGF.Int64Ty);
4194 
4195     OnStackAddr = CGF.Builder.CreateAdd(
4196         OnStackAddr, llvm::ConstantInt::get(CGF.Int64Ty, Align - 1),
4197         "align_stack");
4198     OnStackAddr = CGF.Builder.CreateAnd(
4199         OnStackAddr, llvm::ConstantInt::get(CGF.Int64Ty, -Align),
4200         "align_stack");
4201 
4202     OnStackAddr = CGF.Builder.CreateIntToPtr(OnStackAddr, CGF.Int8PtrTy);
4203   }
4204 
4205   uint64_t StackSize;
4206   if (IsIndirect)
4207     StackSize = 8;
4208   else
4209     StackSize = Ctx.getTypeSize(Ty) / 8;
4210 
4211   // All stack slots are 8 bytes
4212   StackSize = llvm::RoundUpToAlignment(StackSize, 8);
4213 
4214   llvm::Value *StackSizeC = llvm::ConstantInt::get(CGF.Int32Ty, StackSize);
4215   llvm::Value *NewStack =
4216       CGF.Builder.CreateGEP(OnStackAddr, StackSizeC, "new_stack");
4217 
4218   // Write the new value of __stack for the next call to va_arg
4219   CGF.Builder.CreateStore(NewStack, stack_p);
4220 
4221   if (CGF.CGM.getDataLayout().isBigEndian() && !isAggregateTypeForABI(Ty) &&
4222       Ctx.getTypeSize(Ty) < 64) {
4223     int Offset = 8 - Ctx.getTypeSize(Ty) / 8;
4224     OnStackAddr = CGF.Builder.CreatePtrToInt(OnStackAddr, CGF.Int64Ty);
4225 
4226     OnStackAddr = CGF.Builder.CreateAdd(
4227         OnStackAddr, llvm::ConstantInt::get(CGF.Int64Ty, Offset), "align_be");
4228 
4229     OnStackAddr = CGF.Builder.CreateIntToPtr(OnStackAddr, CGF.Int8PtrTy);
4230   }
4231 
4232   OnStackAddr = CGF.Builder.CreateBitCast(OnStackAddr, MemTy);
4233 
4234   CGF.EmitBranch(ContBlock);
4235 
4236   //=======================================
4237   // Tidy up
4238   //=======================================
4239   CGF.EmitBlock(ContBlock);
4240 
4241   llvm::PHINode *ResAddr = CGF.Builder.CreatePHI(MemTy, 2, "vaarg.addr");
4242   ResAddr->addIncoming(RegAddr, InRegBlock);
4243   ResAddr->addIncoming(OnStackAddr, OnStackBlock);
4244 
4245   if (IsIndirect)
4246     return CGF.Builder.CreateLoad(ResAddr, "vaarg.addr");
4247 
4248   return ResAddr;
4249 }
4250 
EmitDarwinVAArg(llvm::Value * VAListAddr,QualType Ty,CodeGenFunction & CGF) const4251 llvm::Value *AArch64ABIInfo::EmitDarwinVAArg(llvm::Value *VAListAddr, QualType Ty,
4252                                            CodeGenFunction &CGF) const {
4253   // We do not support va_arg for aggregates or illegal vector types.
4254   // Lower VAArg here for these cases and use the LLVM va_arg instruction for
4255   // other cases.
4256   if (!isAggregateTypeForABI(Ty) && !isIllegalVectorType(Ty))
4257     return nullptr;
4258 
4259   uint64_t Size = CGF.getContext().getTypeSize(Ty) / 8;
4260   uint64_t Align = CGF.getContext().getTypeAlign(Ty) / 8;
4261 
4262   const Type *Base = nullptr;
4263   uint64_t Members = 0;
4264   bool isHA = isHomogeneousAggregate(Ty, Base, Members);
4265 
4266   bool isIndirect = false;
4267   // Arguments bigger than 16 bytes which aren't homogeneous aggregates should
4268   // be passed indirectly.
4269   if (Size > 16 && !isHA) {
4270     isIndirect = true;
4271     Size = 8;
4272     Align = 8;
4273   }
4274 
4275   llvm::Type *BP = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
4276   llvm::Type *BPP = llvm::PointerType::getUnqual(BP);
4277 
4278   CGBuilderTy &Builder = CGF.Builder;
4279   llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap");
4280   llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
4281 
4282   if (isEmptyRecord(getContext(), Ty, true)) {
4283     // These are ignored for parameter passing purposes.
4284     llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
4285     return Builder.CreateBitCast(Addr, PTy);
4286   }
4287 
4288   const uint64_t MinABIAlign = 8;
4289   if (Align > MinABIAlign) {
4290     llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, Align - 1);
4291     Addr = Builder.CreateGEP(Addr, Offset);
4292     llvm::Value *AsInt = Builder.CreatePtrToInt(Addr, CGF.Int64Ty);
4293     llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int64Ty, ~(Align - 1));
4294     llvm::Value *Aligned = Builder.CreateAnd(AsInt, Mask);
4295     Addr = Builder.CreateIntToPtr(Aligned, BP, "ap.align");
4296   }
4297 
4298   uint64_t Offset = llvm::RoundUpToAlignment(Size, MinABIAlign);
4299   llvm::Value *NextAddr = Builder.CreateGEP(
4300       Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset), "ap.next");
4301   Builder.CreateStore(NextAddr, VAListAddrAsBPP);
4302 
4303   if (isIndirect)
4304     Addr = Builder.CreateLoad(Builder.CreateBitCast(Addr, BPP));
4305   llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
4306   llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
4307 
4308   return AddrTyped;
4309 }
4310 
4311 //===----------------------------------------------------------------------===//
4312 // ARM ABI Implementation
4313 //===----------------------------------------------------------------------===//
4314 
4315 namespace {
4316 
4317 class ARMABIInfo : public ABIInfo {
4318 public:
4319   enum ABIKind {
4320     APCS = 0,
4321     AAPCS = 1,
4322     AAPCS_VFP
4323   };
4324 
4325 private:
4326   ABIKind Kind;
4327   mutable int VFPRegs[16];
4328   const unsigned NumVFPs;
4329   const unsigned NumGPRs;
4330   mutable unsigned AllocatedGPRs;
4331   mutable unsigned AllocatedVFPs;
4332 
4333 public:
ARMABIInfo(CodeGenTypes & CGT,ABIKind _Kind)4334   ARMABIInfo(CodeGenTypes &CGT, ABIKind _Kind) : ABIInfo(CGT), Kind(_Kind),
4335     NumVFPs(16), NumGPRs(4) {
4336     setCCs();
4337     resetAllocatedRegs();
4338   }
4339 
isEABI() const4340   bool isEABI() const {
4341     switch (getTarget().getTriple().getEnvironment()) {
4342     case llvm::Triple::Android:
4343     case llvm::Triple::EABI:
4344     case llvm::Triple::EABIHF:
4345     case llvm::Triple::GNUEABI:
4346     case llvm::Triple::GNUEABIHF:
4347       return true;
4348     default:
4349       return false;
4350     }
4351   }
4352 
isEABIHF() const4353   bool isEABIHF() const {
4354     switch (getTarget().getTriple().getEnvironment()) {
4355     case llvm::Triple::EABIHF:
4356     case llvm::Triple::GNUEABIHF:
4357       return true;
4358     default:
4359       return false;
4360     }
4361   }
4362 
getABIKind() const4363   ABIKind getABIKind() const { return Kind; }
4364 
4365 private:
4366   ABIArgInfo classifyReturnType(QualType RetTy, bool isVariadic) const;
4367   ABIArgInfo classifyArgumentType(QualType RetTy, bool isVariadic,
4368                                   bool &IsCPRC) const;
4369   bool isIllegalVectorType(QualType Ty) const;
4370 
4371   bool isHomogeneousAggregateBaseType(QualType Ty) const override;
4372   bool isHomogeneousAggregateSmallEnough(const Type *Ty,
4373                                          uint64_t Members) const override;
4374 
4375   void computeInfo(CGFunctionInfo &FI) const override;
4376 
4377   llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
4378                          CodeGenFunction &CGF) const override;
4379 
4380   llvm::CallingConv::ID getLLVMDefaultCC() const;
4381   llvm::CallingConv::ID getABIDefaultCC() const;
4382   void setCCs();
4383 
4384   void markAllocatedGPRs(unsigned Alignment, unsigned NumRequired) const;
4385   void markAllocatedVFPs(unsigned Alignment, unsigned NumRequired) const;
4386   void resetAllocatedRegs(void) const;
4387 };
4388 
4389 class ARMTargetCodeGenInfo : public TargetCodeGenInfo {
4390 public:
ARMTargetCodeGenInfo(CodeGenTypes & CGT,ARMABIInfo::ABIKind K)4391   ARMTargetCodeGenInfo(CodeGenTypes &CGT, ARMABIInfo::ABIKind K)
4392     :TargetCodeGenInfo(new ARMABIInfo(CGT, K)) {}
4393 
getABIInfo() const4394   const ARMABIInfo &getABIInfo() const {
4395     return static_cast<const ARMABIInfo&>(TargetCodeGenInfo::getABIInfo());
4396   }
4397 
getDwarfEHStackPointer(CodeGen::CodeGenModule & M) const4398   int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
4399     return 13;
4400   }
4401 
getARCRetainAutoreleasedReturnValueMarker() const4402   StringRef getARCRetainAutoreleasedReturnValueMarker() const override {
4403     return "mov\tr7, r7\t\t@ marker for objc_retainAutoreleaseReturnValue";
4404   }
4405 
initDwarfEHRegSizeTable(CodeGen::CodeGenFunction & CGF,llvm::Value * Address) const4406   bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
4407                                llvm::Value *Address) const override {
4408     llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4);
4409 
4410     // 0-15 are the 16 integer registers.
4411     AssignToArrayRange(CGF.Builder, Address, Four8, 0, 15);
4412     return false;
4413   }
4414 
getSizeOfUnwindException() const4415   unsigned getSizeOfUnwindException() const override {
4416     if (getABIInfo().isEABI()) return 88;
4417     return TargetCodeGenInfo::getSizeOfUnwindException();
4418   }
4419 
SetTargetAttributes(const Decl * D,llvm::GlobalValue * GV,CodeGen::CodeGenModule & CGM) const4420   void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
4421                            CodeGen::CodeGenModule &CGM) const override {
4422     const FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
4423     if (!FD)
4424       return;
4425 
4426     const ARMInterruptAttr *Attr = FD->getAttr<ARMInterruptAttr>();
4427     if (!Attr)
4428       return;
4429 
4430     const char *Kind;
4431     switch (Attr->getInterrupt()) {
4432     case ARMInterruptAttr::Generic: Kind = ""; break;
4433     case ARMInterruptAttr::IRQ:     Kind = "IRQ"; break;
4434     case ARMInterruptAttr::FIQ:     Kind = "FIQ"; break;
4435     case ARMInterruptAttr::SWI:     Kind = "SWI"; break;
4436     case ARMInterruptAttr::ABORT:   Kind = "ABORT"; break;
4437     case ARMInterruptAttr::UNDEF:   Kind = "UNDEF"; break;
4438     }
4439 
4440     llvm::Function *Fn = cast<llvm::Function>(GV);
4441 
4442     Fn->addFnAttr("interrupt", Kind);
4443 
4444     if (cast<ARMABIInfo>(getABIInfo()).getABIKind() == ARMABIInfo::APCS)
4445       return;
4446 
4447     // AAPCS guarantees that sp will be 8-byte aligned on any public interface,
4448     // however this is not necessarily true on taking any interrupt. Instruct
4449     // the backend to perform a realignment as part of the function prologue.
4450     llvm::AttrBuilder B;
4451     B.addStackAlignmentAttr(8);
4452     Fn->addAttributes(llvm::AttributeSet::FunctionIndex,
4453                       llvm::AttributeSet::get(CGM.getLLVMContext(),
4454                                               llvm::AttributeSet::FunctionIndex,
4455                                               B));
4456   }
4457 };
4458 
4459 }
4460 
computeInfo(CGFunctionInfo & FI) const4461 void ARMABIInfo::computeInfo(CGFunctionInfo &FI) const {
4462   // To correctly handle Homogeneous Aggregate, we need to keep track of the
4463   // VFP registers allocated so far.
4464   // C.1.vfp If the argument is a VFP CPRC and there are sufficient consecutive
4465   // VFP registers of the appropriate type unallocated then the argument is
4466   // allocated to the lowest-numbered sequence of such registers.
4467   // C.2.vfp If the argument is a VFP CPRC then any VFP registers that are
4468   // unallocated are marked as unavailable.
4469   resetAllocatedRegs();
4470 
4471   if (getCXXABI().classifyReturnType(FI)) {
4472     if (FI.getReturnInfo().isIndirect())
4473       markAllocatedGPRs(1, 1);
4474   } else {
4475     FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), FI.isVariadic());
4476   }
4477   for (auto &I : FI.arguments()) {
4478     unsigned PreAllocationVFPs = AllocatedVFPs;
4479     unsigned PreAllocationGPRs = AllocatedGPRs;
4480     bool IsCPRC = false;
4481     // 6.1.2.3 There is one VFP co-processor register class using registers
4482     // s0-s15 (d0-d7) for passing arguments.
4483     I.info = classifyArgumentType(I.type, FI.isVariadic(), IsCPRC);
4484 
4485     // If we have allocated some arguments onto the stack (due to running
4486     // out of VFP registers), we cannot split an argument between GPRs and
4487     // the stack. If this situation occurs, we add padding to prevent the
4488     // GPRs from being used. In this situation, the current argument could
4489     // only be allocated by rule C.8, so rule C.6 would mark these GPRs as
4490     // unusable anyway.
4491     // We do not have to do this if the argument is being passed ByVal, as the
4492     // backend can handle that situation correctly.
4493     const bool StackUsed = PreAllocationGPRs > NumGPRs || PreAllocationVFPs > NumVFPs;
4494     const bool IsByVal = I.info.isIndirect() && I.info.getIndirectByVal();
4495     if (!IsCPRC && PreAllocationGPRs < NumGPRs && AllocatedGPRs > NumGPRs &&
4496         StackUsed && !IsByVal) {
4497       llvm::Type *PaddingTy = llvm::ArrayType::get(
4498           llvm::Type::getInt32Ty(getVMContext()), NumGPRs - PreAllocationGPRs);
4499       if (I.info.canHaveCoerceToType()) {
4500         I.info = ABIArgInfo::getDirect(I.info.getCoerceToType() /* type */,
4501                                        0 /* offset */, PaddingTy, true);
4502       } else {
4503         I.info = ABIArgInfo::getDirect(nullptr /* type */, 0 /* offset */,
4504                                        PaddingTy, true);
4505       }
4506     }
4507   }
4508 
4509   // Always honor user-specified calling convention.
4510   if (FI.getCallingConvention() != llvm::CallingConv::C)
4511     return;
4512 
4513   llvm::CallingConv::ID cc = getRuntimeCC();
4514   if (cc != llvm::CallingConv::C)
4515     FI.setEffectiveCallingConvention(cc);
4516 }
4517 
4518 /// Return the default calling convention that LLVM will use.
getLLVMDefaultCC() const4519 llvm::CallingConv::ID ARMABIInfo::getLLVMDefaultCC() const {
4520   // The default calling convention that LLVM will infer.
4521   if (isEABIHF())
4522     return llvm::CallingConv::ARM_AAPCS_VFP;
4523   else if (isEABI())
4524     return llvm::CallingConv::ARM_AAPCS;
4525   else
4526     return llvm::CallingConv::ARM_APCS;
4527 }
4528 
4529 /// Return the calling convention that our ABI would like us to use
4530 /// as the C calling convention.
getABIDefaultCC() const4531 llvm::CallingConv::ID ARMABIInfo::getABIDefaultCC() const {
4532   switch (getABIKind()) {
4533   case APCS: return llvm::CallingConv::ARM_APCS;
4534   case AAPCS: return llvm::CallingConv::ARM_AAPCS;
4535   case AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
4536   }
4537   llvm_unreachable("bad ABI kind");
4538 }
4539 
setCCs()4540 void ARMABIInfo::setCCs() {
4541   assert(getRuntimeCC() == llvm::CallingConv::C);
4542 
4543   // Don't muddy up the IR with a ton of explicit annotations if
4544   // they'd just match what LLVM will infer from the triple.
4545   llvm::CallingConv::ID abiCC = getABIDefaultCC();
4546   if (abiCC != getLLVMDefaultCC())
4547     RuntimeCC = abiCC;
4548 
4549   BuiltinCC = (getABIKind() == APCS ?
4550                llvm::CallingConv::ARM_APCS : llvm::CallingConv::ARM_AAPCS);
4551 }
4552 
4553 /// markAllocatedVFPs - update VFPRegs according to the alignment and
4554 /// number of VFP registers (unit is S register) requested.
markAllocatedVFPs(unsigned Alignment,unsigned NumRequired) const4555 void ARMABIInfo::markAllocatedVFPs(unsigned Alignment,
4556                                    unsigned NumRequired) const {
4557   // Early Exit.
4558   if (AllocatedVFPs >= 16) {
4559     // We use AllocatedVFP > 16 to signal that some CPRCs were allocated on
4560     // the stack.
4561     AllocatedVFPs = 17;
4562     return;
4563   }
4564   // C.1.vfp If the argument is a VFP CPRC and there are sufficient consecutive
4565   // VFP registers of the appropriate type unallocated then the argument is
4566   // allocated to the lowest-numbered sequence of such registers.
4567   for (unsigned I = 0; I < 16; I += Alignment) {
4568     bool FoundSlot = true;
4569     for (unsigned J = I, JEnd = I + NumRequired; J < JEnd; J++)
4570       if (J >= 16 || VFPRegs[J]) {
4571          FoundSlot = false;
4572          break;
4573       }
4574     if (FoundSlot) {
4575       for (unsigned J = I, JEnd = I + NumRequired; J < JEnd; J++)
4576         VFPRegs[J] = 1;
4577       AllocatedVFPs += NumRequired;
4578       return;
4579     }
4580   }
4581   // C.2.vfp If the argument is a VFP CPRC then any VFP registers that are
4582   // unallocated are marked as unavailable.
4583   for (unsigned I = 0; I < 16; I++)
4584     VFPRegs[I] = 1;
4585   AllocatedVFPs = 17; // We do not have enough VFP registers.
4586 }
4587 
4588 /// Update AllocatedGPRs to record the number of general purpose registers
4589 /// which have been allocated. It is valid for AllocatedGPRs to go above 4,
4590 /// this represents arguments being stored on the stack.
markAllocatedGPRs(unsigned Alignment,unsigned NumRequired) const4591 void ARMABIInfo::markAllocatedGPRs(unsigned Alignment,
4592                                    unsigned NumRequired) const {
4593   assert((Alignment == 1 || Alignment == 2) && "Alignment must be 4 or 8 bytes");
4594 
4595   if (Alignment == 2 && AllocatedGPRs & 0x1)
4596     AllocatedGPRs += 1;
4597 
4598   AllocatedGPRs += NumRequired;
4599 }
4600 
resetAllocatedRegs(void) const4601 void ARMABIInfo::resetAllocatedRegs(void) const {
4602   AllocatedGPRs = 0;
4603   AllocatedVFPs = 0;
4604   for (unsigned i = 0; i < NumVFPs; ++i)
4605     VFPRegs[i] = 0;
4606 }
4607 
classifyArgumentType(QualType Ty,bool isVariadic,bool & IsCPRC) const4608 ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty, bool isVariadic,
4609                                             bool &IsCPRC) const {
4610   // We update number of allocated VFPs according to
4611   // 6.1.2.1 The following argument types are VFP CPRCs:
4612   //   A single-precision floating-point type (including promoted
4613   //   half-precision types); A double-precision floating-point type;
4614   //   A 64-bit or 128-bit containerized vector type; Homogeneous Aggregate
4615   //   with a Base Type of a single- or double-precision floating-point type,
4616   //   64-bit containerized vectors or 128-bit containerized vectors with one
4617   //   to four Elements.
4618   bool IsEffectivelyAAPCS_VFP = getABIKind() == AAPCS_VFP && !isVariadic;
4619 
4620   Ty = useFirstFieldIfTransparentUnion(Ty);
4621 
4622   // Handle illegal vector types here.
4623   if (isIllegalVectorType(Ty)) {
4624     uint64_t Size = getContext().getTypeSize(Ty);
4625     if (Size <= 32) {
4626       llvm::Type *ResType =
4627           llvm::Type::getInt32Ty(getVMContext());
4628       markAllocatedGPRs(1, 1);
4629       return ABIArgInfo::getDirect(ResType);
4630     }
4631     if (Size == 64) {
4632       llvm::Type *ResType = llvm::VectorType::get(
4633           llvm::Type::getInt32Ty(getVMContext()), 2);
4634       if (getABIKind() == ARMABIInfo::AAPCS || isVariadic){
4635         markAllocatedGPRs(2, 2);
4636       } else {
4637         markAllocatedVFPs(2, 2);
4638         IsCPRC = true;
4639       }
4640       return ABIArgInfo::getDirect(ResType);
4641     }
4642     if (Size == 128) {
4643       llvm::Type *ResType = llvm::VectorType::get(
4644           llvm::Type::getInt32Ty(getVMContext()), 4);
4645       if (getABIKind() == ARMABIInfo::AAPCS || isVariadic) {
4646         markAllocatedGPRs(2, 4);
4647       } else {
4648         markAllocatedVFPs(4, 4);
4649         IsCPRC = true;
4650       }
4651       return ABIArgInfo::getDirect(ResType);
4652     }
4653     markAllocatedGPRs(1, 1);
4654     return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
4655   }
4656   // Update VFPRegs for legal vector types.
4657   if (getABIKind() == ARMABIInfo::AAPCS_VFP && !isVariadic) {
4658     if (const VectorType *VT = Ty->getAs<VectorType>()) {
4659       uint64_t Size = getContext().getTypeSize(VT);
4660       // Size of a legal vector should be power of 2 and above 64.
4661       markAllocatedVFPs(Size >= 128 ? 4 : 2, Size / 32);
4662       IsCPRC = true;
4663     }
4664   }
4665   // Update VFPRegs for floating point types.
4666   if (getABIKind() == ARMABIInfo::AAPCS_VFP && !isVariadic) {
4667     if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
4668       if (BT->getKind() == BuiltinType::Half ||
4669           BT->getKind() == BuiltinType::Float) {
4670         markAllocatedVFPs(1, 1);
4671         IsCPRC = true;
4672       }
4673       if (BT->getKind() == BuiltinType::Double ||
4674           BT->getKind() == BuiltinType::LongDouble) {
4675         markAllocatedVFPs(2, 2);
4676         IsCPRC = true;
4677       }
4678     }
4679   }
4680 
4681   if (!isAggregateTypeForABI(Ty)) {
4682     // Treat an enum type as its underlying type.
4683     if (const EnumType *EnumTy = Ty->getAs<EnumType>()) {
4684       Ty = EnumTy->getDecl()->getIntegerType();
4685     }
4686 
4687     unsigned Size = getContext().getTypeSize(Ty);
4688     if (!IsCPRC)
4689       markAllocatedGPRs(Size > 32 ? 2 : 1, (Size + 31) / 32);
4690     return (Ty->isPromotableIntegerType() ? ABIArgInfo::getExtend()
4691                                           : ABIArgInfo::getDirect());
4692   }
4693 
4694   if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
4695     markAllocatedGPRs(1, 1);
4696     return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
4697   }
4698 
4699   // Ignore empty records.
4700   if (isEmptyRecord(getContext(), Ty, true))
4701     return ABIArgInfo::getIgnore();
4702 
4703   if (IsEffectivelyAAPCS_VFP) {
4704     // Homogeneous Aggregates need to be expanded when we can fit the aggregate
4705     // into VFP registers.
4706     const Type *Base = nullptr;
4707     uint64_t Members = 0;
4708     if (isHomogeneousAggregate(Ty, Base, Members)) {
4709       assert(Base && "Base class should be set for homogeneous aggregate");
4710       // Base can be a floating-point or a vector.
4711       if (Base->isVectorType()) {
4712         // ElementSize is in number of floats.
4713         unsigned ElementSize = getContext().getTypeSize(Base) == 64 ? 2 : 4;
4714         markAllocatedVFPs(ElementSize,
4715                           Members * ElementSize);
4716       } else if (Base->isSpecificBuiltinType(BuiltinType::Float))
4717         markAllocatedVFPs(1, Members);
4718       else {
4719         assert(Base->isSpecificBuiltinType(BuiltinType::Double) ||
4720                Base->isSpecificBuiltinType(BuiltinType::LongDouble));
4721         markAllocatedVFPs(2, Members * 2);
4722       }
4723       IsCPRC = true;
4724       return ABIArgInfo::getDirect(nullptr, 0, nullptr, false);
4725     }
4726   }
4727 
4728   // Support byval for ARM.
4729   // The ABI alignment for APCS is 4-byte and for AAPCS at least 4-byte and at
4730   // most 8-byte. We realign the indirect argument if type alignment is bigger
4731   // than ABI alignment.
4732   uint64_t ABIAlign = 4;
4733   uint64_t TyAlign = getContext().getTypeAlign(Ty) / 8;
4734   if (getABIKind() == ARMABIInfo::AAPCS_VFP ||
4735       getABIKind() == ARMABIInfo::AAPCS)
4736     ABIAlign = std::min(std::max(TyAlign, (uint64_t)4), (uint64_t)8);
4737   if (getContext().getTypeSizeInChars(Ty) > CharUnits::fromQuantity(64)) {
4738     // Update Allocated GPRs. Since this is only used when the size of the
4739     // argument is greater than 64 bytes, this will always use up any available
4740     // registers (of which there are 4). We also don't care about getting the
4741     // alignment right, because general-purpose registers cannot be back-filled.
4742     markAllocatedGPRs(1, 4);
4743     return ABIArgInfo::getIndirect(TyAlign, /*ByVal=*/true,
4744            /*Realign=*/TyAlign > ABIAlign);
4745   }
4746 
4747   // Otherwise, pass by coercing to a structure of the appropriate size.
4748   llvm::Type* ElemTy;
4749   unsigned SizeRegs;
4750   // FIXME: Try to match the types of the arguments more accurately where
4751   // we can.
4752   if (getContext().getTypeAlign(Ty) <= 32) {
4753     ElemTy = llvm::Type::getInt32Ty(getVMContext());
4754     SizeRegs = (getContext().getTypeSize(Ty) + 31) / 32;
4755     markAllocatedGPRs(1, SizeRegs);
4756   } else {
4757     ElemTy = llvm::Type::getInt64Ty(getVMContext());
4758     SizeRegs = (getContext().getTypeSize(Ty) + 63) / 64;
4759     markAllocatedGPRs(2, SizeRegs * 2);
4760   }
4761 
4762   return ABIArgInfo::getDirect(llvm::ArrayType::get(ElemTy, SizeRegs));
4763 }
4764 
isIntegerLikeType(QualType Ty,ASTContext & Context,llvm::LLVMContext & VMContext)4765 static bool isIntegerLikeType(QualType Ty, ASTContext &Context,
4766                               llvm::LLVMContext &VMContext) {
4767   // APCS, C Language Calling Conventions, Non-Simple Return Values: A structure
4768   // is called integer-like if its size is less than or equal to one word, and
4769   // the offset of each of its addressable sub-fields is zero.
4770 
4771   uint64_t Size = Context.getTypeSize(Ty);
4772 
4773   // Check that the type fits in a word.
4774   if (Size > 32)
4775     return false;
4776 
4777   // FIXME: Handle vector types!
4778   if (Ty->isVectorType())
4779     return false;
4780 
4781   // Float types are never treated as "integer like".
4782   if (Ty->isRealFloatingType())
4783     return false;
4784 
4785   // If this is a builtin or pointer type then it is ok.
4786   if (Ty->getAs<BuiltinType>() || Ty->isPointerType())
4787     return true;
4788 
4789   // Small complex integer types are "integer like".
4790   if (const ComplexType *CT = Ty->getAs<ComplexType>())
4791     return isIntegerLikeType(CT->getElementType(), Context, VMContext);
4792 
4793   // Single element and zero sized arrays should be allowed, by the definition
4794   // above, but they are not.
4795 
4796   // Otherwise, it must be a record type.
4797   const RecordType *RT = Ty->getAs<RecordType>();
4798   if (!RT) return false;
4799 
4800   // Ignore records with flexible arrays.
4801   const RecordDecl *RD = RT->getDecl();
4802   if (RD->hasFlexibleArrayMember())
4803     return false;
4804 
4805   // Check that all sub-fields are at offset 0, and are themselves "integer
4806   // like".
4807   const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
4808 
4809   bool HadField = false;
4810   unsigned idx = 0;
4811   for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
4812        i != e; ++i, ++idx) {
4813     const FieldDecl *FD = *i;
4814 
4815     // Bit-fields are not addressable, we only need to verify they are "integer
4816     // like". We still have to disallow a subsequent non-bitfield, for example:
4817     //   struct { int : 0; int x }
4818     // is non-integer like according to gcc.
4819     if (FD->isBitField()) {
4820       if (!RD->isUnion())
4821         HadField = true;
4822 
4823       if (!isIntegerLikeType(FD->getType(), Context, VMContext))
4824         return false;
4825 
4826       continue;
4827     }
4828 
4829     // Check if this field is at offset 0.
4830     if (Layout.getFieldOffset(idx) != 0)
4831       return false;
4832 
4833     if (!isIntegerLikeType(FD->getType(), Context, VMContext))
4834       return false;
4835 
4836     // Only allow at most one field in a structure. This doesn't match the
4837     // wording above, but follows gcc in situations with a field following an
4838     // empty structure.
4839     if (!RD->isUnion()) {
4840       if (HadField)
4841         return false;
4842 
4843       HadField = true;
4844     }
4845   }
4846 
4847   return true;
4848 }
4849 
classifyReturnType(QualType RetTy,bool isVariadic) const4850 ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy,
4851                                           bool isVariadic) const {
4852   bool IsEffectivelyAAPCS_VFP = getABIKind() == AAPCS_VFP && !isVariadic;
4853 
4854   if (RetTy->isVoidType())
4855     return ABIArgInfo::getIgnore();
4856 
4857   // Large vector types should be returned via memory.
4858   if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 128) {
4859     markAllocatedGPRs(1, 1);
4860     return ABIArgInfo::getIndirect(0);
4861   }
4862 
4863   if (!isAggregateTypeForABI(RetTy)) {
4864     // Treat an enum type as its underlying type.
4865     if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
4866       RetTy = EnumTy->getDecl()->getIntegerType();
4867 
4868     return RetTy->isPromotableIntegerType() ? ABIArgInfo::getExtend()
4869                                             : ABIArgInfo::getDirect();
4870   }
4871 
4872   // Are we following APCS?
4873   if (getABIKind() == APCS) {
4874     if (isEmptyRecord(getContext(), RetTy, false))
4875       return ABIArgInfo::getIgnore();
4876 
4877     // Complex types are all returned as packed integers.
4878     //
4879     // FIXME: Consider using 2 x vector types if the back end handles them
4880     // correctly.
4881     if (RetTy->isAnyComplexType())
4882       return ABIArgInfo::getDirect(llvm::IntegerType::get(
4883           getVMContext(), getContext().getTypeSize(RetTy)));
4884 
4885     // Integer like structures are returned in r0.
4886     if (isIntegerLikeType(RetTy, getContext(), getVMContext())) {
4887       // Return in the smallest viable integer type.
4888       uint64_t Size = getContext().getTypeSize(RetTy);
4889       if (Size <= 8)
4890         return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
4891       if (Size <= 16)
4892         return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
4893       return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
4894     }
4895 
4896     // Otherwise return in memory.
4897     markAllocatedGPRs(1, 1);
4898     return ABIArgInfo::getIndirect(0);
4899   }
4900 
4901   // Otherwise this is an AAPCS variant.
4902 
4903   if (isEmptyRecord(getContext(), RetTy, true))
4904     return ABIArgInfo::getIgnore();
4905 
4906   // Check for homogeneous aggregates with AAPCS-VFP.
4907   if (IsEffectivelyAAPCS_VFP) {
4908     const Type *Base = nullptr;
4909     uint64_t Members;
4910     if (isHomogeneousAggregate(RetTy, Base, Members)) {
4911       assert(Base && "Base class should be set for homogeneous aggregate");
4912       // Homogeneous Aggregates are returned directly.
4913       return ABIArgInfo::getDirect(nullptr, 0, nullptr, false);
4914     }
4915   }
4916 
4917   // Aggregates <= 4 bytes are returned in r0; other aggregates
4918   // are returned indirectly.
4919   uint64_t Size = getContext().getTypeSize(RetTy);
4920   if (Size <= 32) {
4921     if (getDataLayout().isBigEndian())
4922       // Return in 32 bit integer integer type (as if loaded by LDR, AAPCS 5.4)
4923       return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
4924 
4925     // Return in the smallest viable integer type.
4926     if (Size <= 8)
4927       return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
4928     if (Size <= 16)
4929       return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
4930     return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
4931   }
4932 
4933   markAllocatedGPRs(1, 1);
4934   return ABIArgInfo::getIndirect(0);
4935 }
4936 
4937 /// isIllegalVector - check whether Ty is an illegal vector type.
isIllegalVectorType(QualType Ty) const4938 bool ARMABIInfo::isIllegalVectorType(QualType Ty) const {
4939   if (const VectorType *VT = Ty->getAs<VectorType>()) {
4940     // Check whether VT is legal.
4941     unsigned NumElements = VT->getNumElements();
4942     uint64_t Size = getContext().getTypeSize(VT);
4943     // NumElements should be power of 2.
4944     if ((NumElements & (NumElements - 1)) != 0)
4945       return true;
4946     // Size should be greater than 32 bits.
4947     return Size <= 32;
4948   }
4949   return false;
4950 }
4951 
isHomogeneousAggregateBaseType(QualType Ty) const4952 bool ARMABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
4953   // Homogeneous aggregates for AAPCS-VFP must have base types of float,
4954   // double, or 64-bit or 128-bit vectors.
4955   if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
4956     if (BT->getKind() == BuiltinType::Float ||
4957         BT->getKind() == BuiltinType::Double ||
4958         BT->getKind() == BuiltinType::LongDouble)
4959       return true;
4960   } else if (const VectorType *VT = Ty->getAs<VectorType>()) {
4961     unsigned VecSize = getContext().getTypeSize(VT);
4962     if (VecSize == 64 || VecSize == 128)
4963       return true;
4964   }
4965   return false;
4966 }
4967 
isHomogeneousAggregateSmallEnough(const Type * Base,uint64_t Members) const4968 bool ARMABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base,
4969                                                    uint64_t Members) const {
4970   return Members <= 4;
4971 }
4972 
EmitVAArg(llvm::Value * VAListAddr,QualType Ty,CodeGenFunction & CGF) const4973 llvm::Value *ARMABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
4974                                    CodeGenFunction &CGF) const {
4975   llvm::Type *BP = CGF.Int8PtrTy;
4976   llvm::Type *BPP = CGF.Int8PtrPtrTy;
4977 
4978   CGBuilderTy &Builder = CGF.Builder;
4979   llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap");
4980   llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
4981 
4982   if (isEmptyRecord(getContext(), Ty, true)) {
4983     // These are ignored for parameter passing purposes.
4984     llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
4985     return Builder.CreateBitCast(Addr, PTy);
4986   }
4987 
4988   uint64_t Size = CGF.getContext().getTypeSize(Ty) / 8;
4989   uint64_t TyAlign = CGF.getContext().getTypeAlign(Ty) / 8;
4990   bool IsIndirect = false;
4991 
4992   // The ABI alignment for 64-bit or 128-bit vectors is 8 for AAPCS and 4 for
4993   // APCS. For AAPCS, the ABI alignment is at least 4-byte and at most 8-byte.
4994   if (getABIKind() == ARMABIInfo::AAPCS_VFP ||
4995       getABIKind() == ARMABIInfo::AAPCS)
4996     TyAlign = std::min(std::max(TyAlign, (uint64_t)4), (uint64_t)8);
4997   else
4998     TyAlign = 4;
4999   // Use indirect if size of the illegal vector is bigger than 16 bytes.
5000   if (isIllegalVectorType(Ty) && Size > 16) {
5001     IsIndirect = true;
5002     Size = 4;
5003     TyAlign = 4;
5004   }
5005 
5006   // Handle address alignment for ABI alignment > 4 bytes.
5007   if (TyAlign > 4) {
5008     assert((TyAlign & (TyAlign - 1)) == 0 &&
5009            "Alignment is not power of 2!");
5010     llvm::Value *AddrAsInt = Builder.CreatePtrToInt(Addr, CGF.Int32Ty);
5011     AddrAsInt = Builder.CreateAdd(AddrAsInt, Builder.getInt32(TyAlign - 1));
5012     AddrAsInt = Builder.CreateAnd(AddrAsInt, Builder.getInt32(~(TyAlign - 1)));
5013     Addr = Builder.CreateIntToPtr(AddrAsInt, BP, "ap.align");
5014   }
5015 
5016   uint64_t Offset =
5017     llvm::RoundUpToAlignment(Size, 4);
5018   llvm::Value *NextAddr =
5019     Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset),
5020                       "ap.next");
5021   Builder.CreateStore(NextAddr, VAListAddrAsBPP);
5022 
5023   if (IsIndirect)
5024     Addr = Builder.CreateLoad(Builder.CreateBitCast(Addr, BPP));
5025   else if (TyAlign < CGF.getContext().getTypeAlign(Ty) / 8) {
5026     // We can't directly cast ap.cur to pointer to a vector type, since ap.cur
5027     // may not be correctly aligned for the vector type. We create an aligned
5028     // temporary space and copy the content over from ap.cur to the temporary
5029     // space. This is necessary if the natural alignment of the type is greater
5030     // than the ABI alignment.
5031     llvm::Type *I8PtrTy = Builder.getInt8PtrTy();
5032     CharUnits CharSize = getContext().getTypeSizeInChars(Ty);
5033     llvm::Value *AlignedTemp = CGF.CreateTempAlloca(CGF.ConvertType(Ty),
5034                                                     "var.align");
5035     llvm::Value *Dst = Builder.CreateBitCast(AlignedTemp, I8PtrTy);
5036     llvm::Value *Src = Builder.CreateBitCast(Addr, I8PtrTy);
5037     Builder.CreateMemCpy(Dst, Src,
5038         llvm::ConstantInt::get(CGF.IntPtrTy, CharSize.getQuantity()),
5039         TyAlign, false);
5040     Addr = AlignedTemp; //The content is in aligned location.
5041   }
5042   llvm::Type *PTy =
5043     llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
5044   llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
5045 
5046   return AddrTyped;
5047 }
5048 
5049 namespace {
5050 
5051 class NaClARMABIInfo : public ABIInfo {
5052  public:
NaClARMABIInfo(CodeGen::CodeGenTypes & CGT,ARMABIInfo::ABIKind Kind)5053   NaClARMABIInfo(CodeGen::CodeGenTypes &CGT, ARMABIInfo::ABIKind Kind)
5054       : ABIInfo(CGT), PInfo(CGT), NInfo(CGT, Kind) {}
5055   void computeInfo(CGFunctionInfo &FI) const override;
5056   llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
5057                          CodeGenFunction &CGF) const override;
5058  private:
5059   PNaClABIInfo PInfo; // Used for generating calls with pnaclcall callingconv.
5060   ARMABIInfo NInfo; // Used for everything else.
5061 };
5062 
5063 class NaClARMTargetCodeGenInfo : public TargetCodeGenInfo  {
5064  public:
NaClARMTargetCodeGenInfo(CodeGen::CodeGenTypes & CGT,ARMABIInfo::ABIKind Kind)5065   NaClARMTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, ARMABIInfo::ABIKind Kind)
5066       : TargetCodeGenInfo(new NaClARMABIInfo(CGT, Kind)) {}
5067 };
5068 
5069 }
5070 
computeInfo(CGFunctionInfo & FI) const5071 void NaClARMABIInfo::computeInfo(CGFunctionInfo &FI) const {
5072   if (FI.getASTCallingConvention() == CC_PnaclCall)
5073     PInfo.computeInfo(FI);
5074   else
5075     static_cast<const ABIInfo&>(NInfo).computeInfo(FI);
5076 }
5077 
EmitVAArg(llvm::Value * VAListAddr,QualType Ty,CodeGenFunction & CGF) const5078 llvm::Value *NaClARMABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
5079                                        CodeGenFunction &CGF) const {
5080   // Always use the native convention; calling pnacl-style varargs functions
5081   // is unsupported.
5082   return static_cast<const ABIInfo&>(NInfo).EmitVAArg(VAListAddr, Ty, CGF);
5083 }
5084 
5085 //===----------------------------------------------------------------------===//
5086 // NVPTX ABI Implementation
5087 //===----------------------------------------------------------------------===//
5088 
5089 namespace {
5090 
5091 class NVPTXABIInfo : public ABIInfo {
5092 public:
NVPTXABIInfo(CodeGenTypes & CGT)5093   NVPTXABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
5094 
5095   ABIArgInfo classifyReturnType(QualType RetTy) const;
5096   ABIArgInfo classifyArgumentType(QualType Ty) const;
5097 
5098   void computeInfo(CGFunctionInfo &FI) const override;
5099   llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
5100                          CodeGenFunction &CFG) const override;
5101 };
5102 
5103 class NVPTXTargetCodeGenInfo : public TargetCodeGenInfo {
5104 public:
NVPTXTargetCodeGenInfo(CodeGenTypes & CGT)5105   NVPTXTargetCodeGenInfo(CodeGenTypes &CGT)
5106     : TargetCodeGenInfo(new NVPTXABIInfo(CGT)) {}
5107 
5108   void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
5109                            CodeGen::CodeGenModule &M) const override;
5110 private:
5111   // Adds a NamedMDNode with F, Name, and Operand as operands, and adds the
5112   // resulting MDNode to the nvvm.annotations MDNode.
5113   static void addNVVMMetadata(llvm::Function *F, StringRef Name, int Operand);
5114 };
5115 
classifyReturnType(QualType RetTy) const5116 ABIArgInfo NVPTXABIInfo::classifyReturnType(QualType RetTy) const {
5117   if (RetTy->isVoidType())
5118     return ABIArgInfo::getIgnore();
5119 
5120   // note: this is different from default ABI
5121   if (!RetTy->isScalarType())
5122     return ABIArgInfo::getDirect();
5123 
5124   // Treat an enum type as its underlying type.
5125   if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
5126     RetTy = EnumTy->getDecl()->getIntegerType();
5127 
5128   return (RetTy->isPromotableIntegerType() ?
5129           ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
5130 }
5131 
classifyArgumentType(QualType Ty) const5132 ABIArgInfo NVPTXABIInfo::classifyArgumentType(QualType Ty) const {
5133   // Treat an enum type as its underlying type.
5134   if (const EnumType *EnumTy = Ty->getAs<EnumType>())
5135     Ty = EnumTy->getDecl()->getIntegerType();
5136 
5137   // Return aggregates type as indirect by value
5138   if (isAggregateTypeForABI(Ty))
5139     return ABIArgInfo::getIndirect(0, /* byval */ true);
5140 
5141   return (Ty->isPromotableIntegerType() ?
5142           ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
5143 }
5144 
computeInfo(CGFunctionInfo & FI) const5145 void NVPTXABIInfo::computeInfo(CGFunctionInfo &FI) const {
5146   if (!getCXXABI().classifyReturnType(FI))
5147     FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
5148   for (auto &I : FI.arguments())
5149     I.info = classifyArgumentType(I.type);
5150 
5151   // Always honor user-specified calling convention.
5152   if (FI.getCallingConvention() != llvm::CallingConv::C)
5153     return;
5154 
5155   FI.setEffectiveCallingConvention(getRuntimeCC());
5156 }
5157 
EmitVAArg(llvm::Value * VAListAddr,QualType Ty,CodeGenFunction & CFG) const5158 llvm::Value *NVPTXABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
5159                                      CodeGenFunction &CFG) const {
5160   llvm_unreachable("NVPTX does not support varargs");
5161 }
5162 
5163 void NVPTXTargetCodeGenInfo::
SetTargetAttributes(const Decl * D,llvm::GlobalValue * GV,CodeGen::CodeGenModule & M) const5164 SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
5165                     CodeGen::CodeGenModule &M) const{
5166   const FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
5167   if (!FD) return;
5168 
5169   llvm::Function *F = cast<llvm::Function>(GV);
5170 
5171   // Perform special handling in OpenCL mode
5172   if (M.getLangOpts().OpenCL) {
5173     // Use OpenCL function attributes to check for kernel functions
5174     // By default, all functions are device functions
5175     if (FD->hasAttr<OpenCLKernelAttr>()) {
5176       // OpenCL __kernel functions get kernel metadata
5177       // Create !{<func-ref>, metadata !"kernel", i32 1} node
5178       addNVVMMetadata(F, "kernel", 1);
5179       // And kernel functions are not subject to inlining
5180       F->addFnAttr(llvm::Attribute::NoInline);
5181     }
5182   }
5183 
5184   // Perform special handling in CUDA mode.
5185   if (M.getLangOpts().CUDA) {
5186     // CUDA __global__ functions get a kernel metadata entry.  Since
5187     // __global__ functions cannot be called from the device, we do not
5188     // need to set the noinline attribute.
5189     if (FD->hasAttr<CUDAGlobalAttr>()) {
5190       // Create !{<func-ref>, metadata !"kernel", i32 1} node
5191       addNVVMMetadata(F, "kernel", 1);
5192     }
5193     if (FD->hasAttr<CUDALaunchBoundsAttr>()) {
5194       // Create !{<func-ref>, metadata !"maxntidx", i32 <val>} node
5195       addNVVMMetadata(F, "maxntidx",
5196                       FD->getAttr<CUDALaunchBoundsAttr>()->getMaxThreads());
5197       // min blocks is a default argument for CUDALaunchBoundsAttr, so getting a
5198       // zero value from getMinBlocks either means it was not specified in
5199       // __launch_bounds__ or the user specified a 0 value. In both cases, we
5200       // don't have to add a PTX directive.
5201       int MinCTASM = FD->getAttr<CUDALaunchBoundsAttr>()->getMinBlocks();
5202       if (MinCTASM > 0) {
5203         // Create !{<func-ref>, metadata !"minctasm", i32 <val>} node
5204         addNVVMMetadata(F, "minctasm", MinCTASM);
5205       }
5206     }
5207   }
5208 }
5209 
addNVVMMetadata(llvm::Function * F,StringRef Name,int Operand)5210 void NVPTXTargetCodeGenInfo::addNVVMMetadata(llvm::Function *F, StringRef Name,
5211                                              int Operand) {
5212   llvm::Module *M = F->getParent();
5213   llvm::LLVMContext &Ctx = M->getContext();
5214 
5215   // Get "nvvm.annotations" metadata node
5216   llvm::NamedMDNode *MD = M->getOrInsertNamedMetadata("nvvm.annotations");
5217 
5218   llvm::Metadata *MDVals[] = {
5219       llvm::ConstantAsMetadata::get(F), llvm::MDString::get(Ctx, Name),
5220       llvm::ConstantAsMetadata::get(
5221           llvm::ConstantInt::get(llvm::Type::getInt32Ty(Ctx), Operand))};
5222   // Append metadata to nvvm.annotations
5223   MD->addOperand(llvm::MDNode::get(Ctx, MDVals));
5224 }
5225 }
5226 
5227 //===----------------------------------------------------------------------===//
5228 // SystemZ ABI Implementation
5229 //===----------------------------------------------------------------------===//
5230 
5231 namespace {
5232 
5233 class SystemZABIInfo : public ABIInfo {
5234 public:
SystemZABIInfo(CodeGenTypes & CGT)5235   SystemZABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
5236 
5237   bool isPromotableIntegerType(QualType Ty) const;
5238   bool isCompoundType(QualType Ty) const;
5239   bool isFPArgumentType(QualType Ty) const;
5240 
5241   ABIArgInfo classifyReturnType(QualType RetTy) const;
5242   ABIArgInfo classifyArgumentType(QualType ArgTy) const;
5243 
computeInfo(CGFunctionInfo & FI) const5244   void computeInfo(CGFunctionInfo &FI) const override {
5245     if (!getCXXABI().classifyReturnType(FI))
5246       FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
5247     for (auto &I : FI.arguments())
5248       I.info = classifyArgumentType(I.type);
5249   }
5250 
5251   llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
5252                          CodeGenFunction &CGF) const override;
5253 };
5254 
5255 class SystemZTargetCodeGenInfo : public TargetCodeGenInfo {
5256 public:
SystemZTargetCodeGenInfo(CodeGenTypes & CGT)5257   SystemZTargetCodeGenInfo(CodeGenTypes &CGT)
5258     : TargetCodeGenInfo(new SystemZABIInfo(CGT)) {}
5259 };
5260 
5261 }
5262 
isPromotableIntegerType(QualType Ty) const5263 bool SystemZABIInfo::isPromotableIntegerType(QualType Ty) const {
5264   // Treat an enum type as its underlying type.
5265   if (const EnumType *EnumTy = Ty->getAs<EnumType>())
5266     Ty = EnumTy->getDecl()->getIntegerType();
5267 
5268   // Promotable integer types are required to be promoted by the ABI.
5269   if (Ty->isPromotableIntegerType())
5270     return true;
5271 
5272   // 32-bit values must also be promoted.
5273   if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
5274     switch (BT->getKind()) {
5275     case BuiltinType::Int:
5276     case BuiltinType::UInt:
5277       return true;
5278     default:
5279       return false;
5280     }
5281   return false;
5282 }
5283 
isCompoundType(QualType Ty) const5284 bool SystemZABIInfo::isCompoundType(QualType Ty) const {
5285   return Ty->isAnyComplexType() || isAggregateTypeForABI(Ty);
5286 }
5287 
isFPArgumentType(QualType Ty) const5288 bool SystemZABIInfo::isFPArgumentType(QualType Ty) const {
5289   if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
5290     switch (BT->getKind()) {
5291     case BuiltinType::Float:
5292     case BuiltinType::Double:
5293       return true;
5294     default:
5295       return false;
5296     }
5297 
5298   if (const RecordType *RT = Ty->getAsStructureType()) {
5299     const RecordDecl *RD = RT->getDecl();
5300     bool Found = false;
5301 
5302     // If this is a C++ record, check the bases first.
5303     if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
5304       for (const auto &I : CXXRD->bases()) {
5305         QualType Base = I.getType();
5306 
5307         // Empty bases don't affect things either way.
5308         if (isEmptyRecord(getContext(), Base, true))
5309           continue;
5310 
5311         if (Found)
5312           return false;
5313         Found = isFPArgumentType(Base);
5314         if (!Found)
5315           return false;
5316       }
5317 
5318     // Check the fields.
5319     for (const auto *FD : RD->fields()) {
5320       // Empty bitfields don't affect things either way.
5321       // Unlike isSingleElementStruct(), empty structure and array fields
5322       // do count.  So do anonymous bitfields that aren't zero-sized.
5323       if (FD->isBitField() && FD->getBitWidthValue(getContext()) == 0)
5324         return true;
5325 
5326       // Unlike isSingleElementStruct(), arrays do not count.
5327       // Nested isFPArgumentType structures still do though.
5328       if (Found)
5329         return false;
5330       Found = isFPArgumentType(FD->getType());
5331       if (!Found)
5332         return false;
5333     }
5334 
5335     // Unlike isSingleElementStruct(), trailing padding is allowed.
5336     // An 8-byte aligned struct s { float f; } is passed as a double.
5337     return Found;
5338   }
5339 
5340   return false;
5341 }
5342 
EmitVAArg(llvm::Value * VAListAddr,QualType Ty,CodeGenFunction & CGF) const5343 llvm::Value *SystemZABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
5344                                        CodeGenFunction &CGF) const {
5345   // Assume that va_list type is correct; should be pointer to LLVM type:
5346   // struct {
5347   //   i64 __gpr;
5348   //   i64 __fpr;
5349   //   i8 *__overflow_arg_area;
5350   //   i8 *__reg_save_area;
5351   // };
5352 
5353   // Every argument occupies 8 bytes and is passed by preference in either
5354   // GPRs or FPRs.
5355   Ty = CGF.getContext().getCanonicalType(Ty);
5356   ABIArgInfo AI = classifyArgumentType(Ty);
5357   bool InFPRs = isFPArgumentType(Ty);
5358 
5359   llvm::Type *APTy = llvm::PointerType::getUnqual(CGF.ConvertTypeForMem(Ty));
5360   bool IsIndirect = AI.isIndirect();
5361   unsigned UnpaddedBitSize;
5362   if (IsIndirect) {
5363     APTy = llvm::PointerType::getUnqual(APTy);
5364     UnpaddedBitSize = 64;
5365   } else
5366     UnpaddedBitSize = getContext().getTypeSize(Ty);
5367   unsigned PaddedBitSize = 64;
5368   assert((UnpaddedBitSize <= PaddedBitSize) && "Invalid argument size.");
5369 
5370   unsigned PaddedSize = PaddedBitSize / 8;
5371   unsigned Padding = (PaddedBitSize - UnpaddedBitSize) / 8;
5372 
5373   unsigned MaxRegs, RegCountField, RegSaveIndex, RegPadding;
5374   if (InFPRs) {
5375     MaxRegs = 4; // Maximum of 4 FPR arguments
5376     RegCountField = 1; // __fpr
5377     RegSaveIndex = 16; // save offset for f0
5378     RegPadding = 0; // floats are passed in the high bits of an FPR
5379   } else {
5380     MaxRegs = 5; // Maximum of 5 GPR arguments
5381     RegCountField = 0; // __gpr
5382     RegSaveIndex = 2; // save offset for r2
5383     RegPadding = Padding; // values are passed in the low bits of a GPR
5384   }
5385 
5386   llvm::Value *RegCountPtr =
5387     CGF.Builder.CreateStructGEP(VAListAddr, RegCountField, "reg_count_ptr");
5388   llvm::Value *RegCount = CGF.Builder.CreateLoad(RegCountPtr, "reg_count");
5389   llvm::Type *IndexTy = RegCount->getType();
5390   llvm::Value *MaxRegsV = llvm::ConstantInt::get(IndexTy, MaxRegs);
5391   llvm::Value *InRegs = CGF.Builder.CreateICmpULT(RegCount, MaxRegsV,
5392                                                  "fits_in_regs");
5393 
5394   llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
5395   llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem");
5396   llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
5397   CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock);
5398 
5399   // Emit code to load the value if it was passed in registers.
5400   CGF.EmitBlock(InRegBlock);
5401 
5402   // Work out the address of an argument register.
5403   llvm::Value *PaddedSizeV = llvm::ConstantInt::get(IndexTy, PaddedSize);
5404   llvm::Value *ScaledRegCount =
5405     CGF.Builder.CreateMul(RegCount, PaddedSizeV, "scaled_reg_count");
5406   llvm::Value *RegBase =
5407     llvm::ConstantInt::get(IndexTy, RegSaveIndex * PaddedSize + RegPadding);
5408   llvm::Value *RegOffset =
5409     CGF.Builder.CreateAdd(ScaledRegCount, RegBase, "reg_offset");
5410   llvm::Value *RegSaveAreaPtr =
5411     CGF.Builder.CreateStructGEP(VAListAddr, 3, "reg_save_area_ptr");
5412   llvm::Value *RegSaveArea =
5413     CGF.Builder.CreateLoad(RegSaveAreaPtr, "reg_save_area");
5414   llvm::Value *RawRegAddr =
5415     CGF.Builder.CreateGEP(RegSaveArea, RegOffset, "raw_reg_addr");
5416   llvm::Value *RegAddr =
5417     CGF.Builder.CreateBitCast(RawRegAddr, APTy, "reg_addr");
5418 
5419   // Update the register count
5420   llvm::Value *One = llvm::ConstantInt::get(IndexTy, 1);
5421   llvm::Value *NewRegCount =
5422     CGF.Builder.CreateAdd(RegCount, One, "reg_count");
5423   CGF.Builder.CreateStore(NewRegCount, RegCountPtr);
5424   CGF.EmitBranch(ContBlock);
5425 
5426   // Emit code to load the value if it was passed in memory.
5427   CGF.EmitBlock(InMemBlock);
5428 
5429   // Work out the address of a stack argument.
5430   llvm::Value *OverflowArgAreaPtr =
5431     CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_ptr");
5432   llvm::Value *OverflowArgArea =
5433     CGF.Builder.CreateLoad(OverflowArgAreaPtr, "overflow_arg_area");
5434   llvm::Value *PaddingV = llvm::ConstantInt::get(IndexTy, Padding);
5435   llvm::Value *RawMemAddr =
5436     CGF.Builder.CreateGEP(OverflowArgArea, PaddingV, "raw_mem_addr");
5437   llvm::Value *MemAddr =
5438     CGF.Builder.CreateBitCast(RawMemAddr, APTy, "mem_addr");
5439 
5440   // Update overflow_arg_area_ptr pointer
5441   llvm::Value *NewOverflowArgArea =
5442     CGF.Builder.CreateGEP(OverflowArgArea, PaddedSizeV, "overflow_arg_area");
5443   CGF.Builder.CreateStore(NewOverflowArgArea, OverflowArgAreaPtr);
5444   CGF.EmitBranch(ContBlock);
5445 
5446   // Return the appropriate result.
5447   CGF.EmitBlock(ContBlock);
5448   llvm::PHINode *ResAddr = CGF.Builder.CreatePHI(APTy, 2, "va_arg.addr");
5449   ResAddr->addIncoming(RegAddr, InRegBlock);
5450   ResAddr->addIncoming(MemAddr, InMemBlock);
5451 
5452   if (IsIndirect)
5453     return CGF.Builder.CreateLoad(ResAddr, "indirect_arg");
5454 
5455   return ResAddr;
5456 }
5457 
classifyReturnType(QualType RetTy) const5458 ABIArgInfo SystemZABIInfo::classifyReturnType(QualType RetTy) const {
5459   if (RetTy->isVoidType())
5460     return ABIArgInfo::getIgnore();
5461   if (isCompoundType(RetTy) || getContext().getTypeSize(RetTy) > 64)
5462     return ABIArgInfo::getIndirect(0);
5463   return (isPromotableIntegerType(RetTy) ?
5464           ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
5465 }
5466 
classifyArgumentType(QualType Ty) const5467 ABIArgInfo SystemZABIInfo::classifyArgumentType(QualType Ty) const {
5468   // Handle the generic C++ ABI.
5469   if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
5470     return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
5471 
5472   // Integers and enums are extended to full register width.
5473   if (isPromotableIntegerType(Ty))
5474     return ABIArgInfo::getExtend();
5475 
5476   // Values that are not 1, 2, 4 or 8 bytes in size are passed indirectly.
5477   uint64_t Size = getContext().getTypeSize(Ty);
5478   if (Size != 8 && Size != 16 && Size != 32 && Size != 64)
5479     return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
5480 
5481   // Handle small structures.
5482   if (const RecordType *RT = Ty->getAs<RecordType>()) {
5483     // Structures with flexible arrays have variable length, so really
5484     // fail the size test above.
5485     const RecordDecl *RD = RT->getDecl();
5486     if (RD->hasFlexibleArrayMember())
5487       return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
5488 
5489     // The structure is passed as an unextended integer, a float, or a double.
5490     llvm::Type *PassTy;
5491     if (isFPArgumentType(Ty)) {
5492       assert(Size == 32 || Size == 64);
5493       if (Size == 32)
5494         PassTy = llvm::Type::getFloatTy(getVMContext());
5495       else
5496         PassTy = llvm::Type::getDoubleTy(getVMContext());
5497     } else
5498       PassTy = llvm::IntegerType::get(getVMContext(), Size);
5499     return ABIArgInfo::getDirect(PassTy);
5500   }
5501 
5502   // Non-structure compounds are passed indirectly.
5503   if (isCompoundType(Ty))
5504     return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
5505 
5506   return ABIArgInfo::getDirect(nullptr);
5507 }
5508 
5509 //===----------------------------------------------------------------------===//
5510 // MSP430 ABI Implementation
5511 //===----------------------------------------------------------------------===//
5512 
5513 namespace {
5514 
5515 class MSP430TargetCodeGenInfo : public TargetCodeGenInfo {
5516 public:
MSP430TargetCodeGenInfo(CodeGenTypes & CGT)5517   MSP430TargetCodeGenInfo(CodeGenTypes &CGT)
5518     : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {}
5519   void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
5520                            CodeGen::CodeGenModule &M) const override;
5521 };
5522 
5523 }
5524 
SetTargetAttributes(const Decl * D,llvm::GlobalValue * GV,CodeGen::CodeGenModule & M) const5525 void MSP430TargetCodeGenInfo::SetTargetAttributes(const Decl *D,
5526                                                   llvm::GlobalValue *GV,
5527                                              CodeGen::CodeGenModule &M) const {
5528   if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
5529     if (const MSP430InterruptAttr *attr = FD->getAttr<MSP430InterruptAttr>()) {
5530       // Handle 'interrupt' attribute:
5531       llvm::Function *F = cast<llvm::Function>(GV);
5532 
5533       // Step 1: Set ISR calling convention.
5534       F->setCallingConv(llvm::CallingConv::MSP430_INTR);
5535 
5536       // Step 2: Add attributes goodness.
5537       F->addFnAttr(llvm::Attribute::NoInline);
5538 
5539       // Step 3: Emit ISR vector alias.
5540       unsigned Num = attr->getNumber() / 2;
5541       llvm::GlobalAlias::create(llvm::Function::ExternalLinkage,
5542                                 "__isr_" + Twine(Num), F);
5543     }
5544   }
5545 }
5546 
5547 //===----------------------------------------------------------------------===//
5548 // MIPS ABI Implementation.  This works for both little-endian and
5549 // big-endian variants.
5550 //===----------------------------------------------------------------------===//
5551 
5552 namespace {
5553 class MipsABIInfo : public ABIInfo {
5554   bool IsO32;
5555   unsigned MinABIStackAlignInBytes, StackAlignInBytes;
5556   void CoerceToIntArgs(uint64_t TySize,
5557                        SmallVectorImpl<llvm::Type *> &ArgList) const;
5558   llvm::Type* HandleAggregates(QualType Ty, uint64_t TySize) const;
5559   llvm::Type* returnAggregateInRegs(QualType RetTy, uint64_t Size) const;
5560   llvm::Type* getPaddingType(uint64_t Align, uint64_t Offset) const;
5561 public:
MipsABIInfo(CodeGenTypes & CGT,bool _IsO32)5562   MipsABIInfo(CodeGenTypes &CGT, bool _IsO32) :
5563     ABIInfo(CGT), IsO32(_IsO32), MinABIStackAlignInBytes(IsO32 ? 4 : 8),
5564     StackAlignInBytes(IsO32 ? 8 : 16) {}
5565 
5566   ABIArgInfo classifyReturnType(QualType RetTy) const;
5567   ABIArgInfo classifyArgumentType(QualType RetTy, uint64_t &Offset) const;
5568   void computeInfo(CGFunctionInfo &FI) const override;
5569   llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
5570                          CodeGenFunction &CGF) const override;
5571 };
5572 
5573 class MIPSTargetCodeGenInfo : public TargetCodeGenInfo {
5574   unsigned SizeOfUnwindException;
5575 public:
MIPSTargetCodeGenInfo(CodeGenTypes & CGT,bool IsO32)5576   MIPSTargetCodeGenInfo(CodeGenTypes &CGT, bool IsO32)
5577     : TargetCodeGenInfo(new MipsABIInfo(CGT, IsO32)),
5578       SizeOfUnwindException(IsO32 ? 24 : 32) {}
5579 
getDwarfEHStackPointer(CodeGen::CodeGenModule & CGM) const5580   int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
5581     return 29;
5582   }
5583 
SetTargetAttributes(const Decl * D,llvm::GlobalValue * GV,CodeGen::CodeGenModule & CGM) const5584   void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
5585                            CodeGen::CodeGenModule &CGM) const override {
5586     const FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
5587     if (!FD) return;
5588     llvm::Function *Fn = cast<llvm::Function>(GV);
5589     if (FD->hasAttr<Mips16Attr>()) {
5590       Fn->addFnAttr("mips16");
5591     }
5592     else if (FD->hasAttr<NoMips16Attr>()) {
5593       Fn->addFnAttr("nomips16");
5594     }
5595   }
5596 
5597   bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
5598                                llvm::Value *Address) const override;
5599 
getSizeOfUnwindException() const5600   unsigned getSizeOfUnwindException() const override {
5601     return SizeOfUnwindException;
5602   }
5603 };
5604 }
5605 
CoerceToIntArgs(uint64_t TySize,SmallVectorImpl<llvm::Type * > & ArgList) const5606 void MipsABIInfo::CoerceToIntArgs(uint64_t TySize,
5607                                   SmallVectorImpl<llvm::Type *> &ArgList) const {
5608   llvm::IntegerType *IntTy =
5609     llvm::IntegerType::get(getVMContext(), MinABIStackAlignInBytes * 8);
5610 
5611   // Add (TySize / MinABIStackAlignInBytes) args of IntTy.
5612   for (unsigned N = TySize / (MinABIStackAlignInBytes * 8); N; --N)
5613     ArgList.push_back(IntTy);
5614 
5615   // If necessary, add one more integer type to ArgList.
5616   unsigned R = TySize % (MinABIStackAlignInBytes * 8);
5617 
5618   if (R)
5619     ArgList.push_back(llvm::IntegerType::get(getVMContext(), R));
5620 }
5621 
5622 // In N32/64, an aligned double precision floating point field is passed in
5623 // a register.
HandleAggregates(QualType Ty,uint64_t TySize) const5624 llvm::Type* MipsABIInfo::HandleAggregates(QualType Ty, uint64_t TySize) const {
5625   SmallVector<llvm::Type*, 8> ArgList, IntArgList;
5626 
5627   if (IsO32) {
5628     CoerceToIntArgs(TySize, ArgList);
5629     return llvm::StructType::get(getVMContext(), ArgList);
5630   }
5631 
5632   if (Ty->isComplexType())
5633     return CGT.ConvertType(Ty);
5634 
5635   const RecordType *RT = Ty->getAs<RecordType>();
5636 
5637   // Unions/vectors are passed in integer registers.
5638   if (!RT || !RT->isStructureOrClassType()) {
5639     CoerceToIntArgs(TySize, ArgList);
5640     return llvm::StructType::get(getVMContext(), ArgList);
5641   }
5642 
5643   const RecordDecl *RD = RT->getDecl();
5644   const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
5645   assert(!(TySize % 8) && "Size of structure must be multiple of 8.");
5646 
5647   uint64_t LastOffset = 0;
5648   unsigned idx = 0;
5649   llvm::IntegerType *I64 = llvm::IntegerType::get(getVMContext(), 64);
5650 
5651   // Iterate over fields in the struct/class and check if there are any aligned
5652   // double fields.
5653   for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
5654        i != e; ++i, ++idx) {
5655     const QualType Ty = i->getType();
5656     const BuiltinType *BT = Ty->getAs<BuiltinType>();
5657 
5658     if (!BT || BT->getKind() != BuiltinType::Double)
5659       continue;
5660 
5661     uint64_t Offset = Layout.getFieldOffset(idx);
5662     if (Offset % 64) // Ignore doubles that are not aligned.
5663       continue;
5664 
5665     // Add ((Offset - LastOffset) / 64) args of type i64.
5666     for (unsigned j = (Offset - LastOffset) / 64; j > 0; --j)
5667       ArgList.push_back(I64);
5668 
5669     // Add double type.
5670     ArgList.push_back(llvm::Type::getDoubleTy(getVMContext()));
5671     LastOffset = Offset + 64;
5672   }
5673 
5674   CoerceToIntArgs(TySize - LastOffset, IntArgList);
5675   ArgList.append(IntArgList.begin(), IntArgList.end());
5676 
5677   return llvm::StructType::get(getVMContext(), ArgList);
5678 }
5679 
getPaddingType(uint64_t OrigOffset,uint64_t Offset) const5680 llvm::Type *MipsABIInfo::getPaddingType(uint64_t OrigOffset,
5681                                         uint64_t Offset) const {
5682   if (OrigOffset + MinABIStackAlignInBytes > Offset)
5683     return nullptr;
5684 
5685   return llvm::IntegerType::get(getVMContext(), (Offset - OrigOffset) * 8);
5686 }
5687 
5688 ABIArgInfo
classifyArgumentType(QualType Ty,uint64_t & Offset) const5689 MipsABIInfo::classifyArgumentType(QualType Ty, uint64_t &Offset) const {
5690   Ty = useFirstFieldIfTransparentUnion(Ty);
5691 
5692   uint64_t OrigOffset = Offset;
5693   uint64_t TySize = getContext().getTypeSize(Ty);
5694   uint64_t Align = getContext().getTypeAlign(Ty) / 8;
5695 
5696   Align = std::min(std::max(Align, (uint64_t)MinABIStackAlignInBytes),
5697                    (uint64_t)StackAlignInBytes);
5698   unsigned CurrOffset = llvm::RoundUpToAlignment(Offset, Align);
5699   Offset = CurrOffset + llvm::RoundUpToAlignment(TySize, Align * 8) / 8;
5700 
5701   if (isAggregateTypeForABI(Ty) || Ty->isVectorType()) {
5702     // Ignore empty aggregates.
5703     if (TySize == 0)
5704       return ABIArgInfo::getIgnore();
5705 
5706     if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
5707       Offset = OrigOffset + MinABIStackAlignInBytes;
5708       return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
5709     }
5710 
5711     // If we have reached here, aggregates are passed directly by coercing to
5712     // another structure type. Padding is inserted if the offset of the
5713     // aggregate is unaligned.
5714     ABIArgInfo ArgInfo =
5715         ABIArgInfo::getDirect(HandleAggregates(Ty, TySize), 0,
5716                               getPaddingType(OrigOffset, CurrOffset));
5717     ArgInfo.setInReg(true);
5718     return ArgInfo;
5719   }
5720 
5721   // Treat an enum type as its underlying type.
5722   if (const EnumType *EnumTy = Ty->getAs<EnumType>())
5723     Ty = EnumTy->getDecl()->getIntegerType();
5724 
5725   // All integral types are promoted to the GPR width.
5726   if (Ty->isIntegralOrEnumerationType())
5727     return ABIArgInfo::getExtend();
5728 
5729   return ABIArgInfo::getDirect(
5730       nullptr, 0, IsO32 ? nullptr : getPaddingType(OrigOffset, CurrOffset));
5731 }
5732 
5733 llvm::Type*
returnAggregateInRegs(QualType RetTy,uint64_t Size) const5734 MipsABIInfo::returnAggregateInRegs(QualType RetTy, uint64_t Size) const {
5735   const RecordType *RT = RetTy->getAs<RecordType>();
5736   SmallVector<llvm::Type*, 8> RTList;
5737 
5738   if (RT && RT->isStructureOrClassType()) {
5739     const RecordDecl *RD = RT->getDecl();
5740     const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
5741     unsigned FieldCnt = Layout.getFieldCount();
5742 
5743     // N32/64 returns struct/classes in floating point registers if the
5744     // following conditions are met:
5745     // 1. The size of the struct/class is no larger than 128-bit.
5746     // 2. The struct/class has one or two fields all of which are floating
5747     //    point types.
5748     // 3. The offset of the first field is zero (this follows what gcc does).
5749     //
5750     // Any other composite results are returned in integer registers.
5751     //
5752     if (FieldCnt && (FieldCnt <= 2) && !Layout.getFieldOffset(0)) {
5753       RecordDecl::field_iterator b = RD->field_begin(), e = RD->field_end();
5754       for (; b != e; ++b) {
5755         const BuiltinType *BT = b->getType()->getAs<BuiltinType>();
5756 
5757         if (!BT || !BT->isFloatingPoint())
5758           break;
5759 
5760         RTList.push_back(CGT.ConvertType(b->getType()));
5761       }
5762 
5763       if (b == e)
5764         return llvm::StructType::get(getVMContext(), RTList,
5765                                      RD->hasAttr<PackedAttr>());
5766 
5767       RTList.clear();
5768     }
5769   }
5770 
5771   CoerceToIntArgs(Size, RTList);
5772   return llvm::StructType::get(getVMContext(), RTList);
5773 }
5774 
classifyReturnType(QualType RetTy) const5775 ABIArgInfo MipsABIInfo::classifyReturnType(QualType RetTy) const {
5776   uint64_t Size = getContext().getTypeSize(RetTy);
5777 
5778   if (RetTy->isVoidType())
5779     return ABIArgInfo::getIgnore();
5780 
5781   // O32 doesn't treat zero-sized structs differently from other structs.
5782   // However, N32/N64 ignores zero sized return values.
5783   if (!IsO32 && Size == 0)
5784     return ABIArgInfo::getIgnore();
5785 
5786   if (isAggregateTypeForABI(RetTy) || RetTy->isVectorType()) {
5787     if (Size <= 128) {
5788       if (RetTy->isAnyComplexType())
5789         return ABIArgInfo::getDirect();
5790 
5791       // O32 returns integer vectors in registers and N32/N64 returns all small
5792       // aggregates in registers.
5793       if (!IsO32 ||
5794           (RetTy->isVectorType() && !RetTy->hasFloatingRepresentation())) {
5795         ABIArgInfo ArgInfo =
5796             ABIArgInfo::getDirect(returnAggregateInRegs(RetTy, Size));
5797         ArgInfo.setInReg(true);
5798         return ArgInfo;
5799       }
5800     }
5801 
5802     return ABIArgInfo::getIndirect(0);
5803   }
5804 
5805   // Treat an enum type as its underlying type.
5806   if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
5807     RetTy = EnumTy->getDecl()->getIntegerType();
5808 
5809   return (RetTy->isPromotableIntegerType() ?
5810           ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
5811 }
5812 
computeInfo(CGFunctionInfo & FI) const5813 void MipsABIInfo::computeInfo(CGFunctionInfo &FI) const {
5814   ABIArgInfo &RetInfo = FI.getReturnInfo();
5815   if (!getCXXABI().classifyReturnType(FI))
5816     RetInfo = classifyReturnType(FI.getReturnType());
5817 
5818   // Check if a pointer to an aggregate is passed as a hidden argument.
5819   uint64_t Offset = RetInfo.isIndirect() ? MinABIStackAlignInBytes : 0;
5820 
5821   for (auto &I : FI.arguments())
5822     I.info = classifyArgumentType(I.type, Offset);
5823 }
5824 
EmitVAArg(llvm::Value * VAListAddr,QualType Ty,CodeGenFunction & CGF) const5825 llvm::Value* MipsABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
5826                                     CodeGenFunction &CGF) const {
5827   llvm::Type *BP = CGF.Int8PtrTy;
5828   llvm::Type *BPP = CGF.Int8PtrPtrTy;
5829 
5830   // Integer arguments are promoted to 32-bit on O32 and 64-bit on N32/N64.
5831   // Pointers are also promoted in the same way but this only matters for N32.
5832   unsigned SlotSizeInBits = IsO32 ? 32 : 64;
5833   unsigned PtrWidth = getTarget().getPointerWidth(0);
5834   if ((Ty->isIntegerType() &&
5835           CGF.getContext().getIntWidth(Ty) < SlotSizeInBits) ||
5836       (Ty->isPointerType() && PtrWidth < SlotSizeInBits)) {
5837     Ty = CGF.getContext().getIntTypeForBitwidth(SlotSizeInBits,
5838                                                 Ty->isSignedIntegerType());
5839   }
5840 
5841   CGBuilderTy &Builder = CGF.Builder;
5842   llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap");
5843   llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
5844   int64_t TypeAlign =
5845       std::min(getContext().getTypeAlign(Ty) / 8, StackAlignInBytes);
5846   llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
5847   llvm::Value *AddrTyped;
5848   llvm::IntegerType *IntTy = (PtrWidth == 32) ? CGF.Int32Ty : CGF.Int64Ty;
5849 
5850   if (TypeAlign > MinABIStackAlignInBytes) {
5851     llvm::Value *AddrAsInt = CGF.Builder.CreatePtrToInt(Addr, IntTy);
5852     llvm::Value *Inc = llvm::ConstantInt::get(IntTy, TypeAlign - 1);
5853     llvm::Value *Mask = llvm::ConstantInt::get(IntTy, -TypeAlign);
5854     llvm::Value *Add = CGF.Builder.CreateAdd(AddrAsInt, Inc);
5855     llvm::Value *And = CGF.Builder.CreateAnd(Add, Mask);
5856     AddrTyped = CGF.Builder.CreateIntToPtr(And, PTy);
5857   }
5858   else
5859     AddrTyped = Builder.CreateBitCast(Addr, PTy);
5860 
5861   llvm::Value *AlignedAddr = Builder.CreateBitCast(AddrTyped, BP);
5862   TypeAlign = std::max((unsigned)TypeAlign, MinABIStackAlignInBytes);
5863   unsigned ArgSizeInBits = CGF.getContext().getTypeSize(Ty);
5864   uint64_t Offset = llvm::RoundUpToAlignment(ArgSizeInBits / 8, TypeAlign);
5865   llvm::Value *NextAddr =
5866     Builder.CreateGEP(AlignedAddr, llvm::ConstantInt::get(IntTy, Offset),
5867                       "ap.next");
5868   Builder.CreateStore(NextAddr, VAListAddrAsBPP);
5869 
5870   return AddrTyped;
5871 }
5872 
5873 bool
initDwarfEHRegSizeTable(CodeGen::CodeGenFunction & CGF,llvm::Value * Address) const5874 MIPSTargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
5875                                                llvm::Value *Address) const {
5876   // This information comes from gcc's implementation, which seems to
5877   // as canonical as it gets.
5878 
5879   // Everything on MIPS is 4 bytes.  Double-precision FP registers
5880   // are aliased to pairs of single-precision FP registers.
5881   llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4);
5882 
5883   // 0-31 are the general purpose registers, $0 - $31.
5884   // 32-63 are the floating-point registers, $f0 - $f31.
5885   // 64 and 65 are the multiply/divide registers, $hi and $lo.
5886   // 66 is the (notional, I think) register for signal-handler return.
5887   AssignToArrayRange(CGF.Builder, Address, Four8, 0, 65);
5888 
5889   // 67-74 are the floating-point status registers, $fcc0 - $fcc7.
5890   // They are one bit wide and ignored here.
5891 
5892   // 80-111 are the coprocessor 0 registers, $c0r0 - $c0r31.
5893   // (coprocessor 1 is the FP unit)
5894   // 112-143 are the coprocessor 2 registers, $c2r0 - $c2r31.
5895   // 144-175 are the coprocessor 3 registers, $c3r0 - $c3r31.
5896   // 176-181 are the DSP accumulator registers.
5897   AssignToArrayRange(CGF.Builder, Address, Four8, 80, 181);
5898   return false;
5899 }
5900 
5901 //===----------------------------------------------------------------------===//
5902 // TCE ABI Implementation (see http://tce.cs.tut.fi). Uses mostly the defaults.
5903 // Currently subclassed only to implement custom OpenCL C function attribute
5904 // handling.
5905 //===----------------------------------------------------------------------===//
5906 
5907 namespace {
5908 
5909 class TCETargetCodeGenInfo : public DefaultTargetCodeGenInfo {
5910 public:
TCETargetCodeGenInfo(CodeGenTypes & CGT)5911   TCETargetCodeGenInfo(CodeGenTypes &CGT)
5912     : DefaultTargetCodeGenInfo(CGT) {}
5913 
5914   void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
5915                            CodeGen::CodeGenModule &M) const override;
5916 };
5917 
SetTargetAttributes(const Decl * D,llvm::GlobalValue * GV,CodeGen::CodeGenModule & M) const5918 void TCETargetCodeGenInfo::SetTargetAttributes(const Decl *D,
5919                                                llvm::GlobalValue *GV,
5920                                                CodeGen::CodeGenModule &M) const {
5921   const FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
5922   if (!FD) return;
5923 
5924   llvm::Function *F = cast<llvm::Function>(GV);
5925 
5926   if (M.getLangOpts().OpenCL) {
5927     if (FD->hasAttr<OpenCLKernelAttr>()) {
5928       // OpenCL C Kernel functions are not subject to inlining
5929       F->addFnAttr(llvm::Attribute::NoInline);
5930       const ReqdWorkGroupSizeAttr *Attr = FD->getAttr<ReqdWorkGroupSizeAttr>();
5931       if (Attr) {
5932         // Convert the reqd_work_group_size() attributes to metadata.
5933         llvm::LLVMContext &Context = F->getContext();
5934         llvm::NamedMDNode *OpenCLMetadata =
5935             M.getModule().getOrInsertNamedMetadata("opencl.kernel_wg_size_info");
5936 
5937         SmallVector<llvm::Metadata *, 5> Operands;
5938         Operands.push_back(llvm::ConstantAsMetadata::get(F));
5939 
5940         Operands.push_back(
5941             llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue(
5942                 M.Int32Ty, llvm::APInt(32, Attr->getXDim()))));
5943         Operands.push_back(
5944             llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue(
5945                 M.Int32Ty, llvm::APInt(32, Attr->getYDim()))));
5946         Operands.push_back(
5947             llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue(
5948                 M.Int32Ty, llvm::APInt(32, Attr->getZDim()))));
5949 
5950         // Add a boolean constant operand for "required" (true) or "hint" (false)
5951         // for implementing the work_group_size_hint attr later. Currently
5952         // always true as the hint is not yet implemented.
5953         Operands.push_back(
5954             llvm::ConstantAsMetadata::get(llvm::ConstantInt::getTrue(Context)));
5955         OpenCLMetadata->addOperand(llvm::MDNode::get(Context, Operands));
5956       }
5957     }
5958   }
5959 }
5960 
5961 }
5962 
5963 //===----------------------------------------------------------------------===//
5964 // Hexagon ABI Implementation
5965 //===----------------------------------------------------------------------===//
5966 
5967 namespace {
5968 
5969 class HexagonABIInfo : public ABIInfo {
5970 
5971 
5972 public:
HexagonABIInfo(CodeGenTypes & CGT)5973   HexagonABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
5974 
5975 private:
5976 
5977   ABIArgInfo classifyReturnType(QualType RetTy) const;
5978   ABIArgInfo classifyArgumentType(QualType RetTy) const;
5979 
5980   void computeInfo(CGFunctionInfo &FI) const override;
5981 
5982   llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
5983                          CodeGenFunction &CGF) const override;
5984 };
5985 
5986 class HexagonTargetCodeGenInfo : public TargetCodeGenInfo {
5987 public:
HexagonTargetCodeGenInfo(CodeGenTypes & CGT)5988   HexagonTargetCodeGenInfo(CodeGenTypes &CGT)
5989     :TargetCodeGenInfo(new HexagonABIInfo(CGT)) {}
5990 
getDwarfEHStackPointer(CodeGen::CodeGenModule & M) const5991   int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
5992     return 29;
5993   }
5994 };
5995 
5996 }
5997 
computeInfo(CGFunctionInfo & FI) const5998 void HexagonABIInfo::computeInfo(CGFunctionInfo &FI) const {
5999   if (!getCXXABI().classifyReturnType(FI))
6000     FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
6001   for (auto &I : FI.arguments())
6002     I.info = classifyArgumentType(I.type);
6003 }
6004 
classifyArgumentType(QualType Ty) const6005 ABIArgInfo HexagonABIInfo::classifyArgumentType(QualType Ty) const {
6006   if (!isAggregateTypeForABI(Ty)) {
6007     // Treat an enum type as its underlying type.
6008     if (const EnumType *EnumTy = Ty->getAs<EnumType>())
6009       Ty = EnumTy->getDecl()->getIntegerType();
6010 
6011     return (Ty->isPromotableIntegerType() ?
6012             ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
6013   }
6014 
6015   // Ignore empty records.
6016   if (isEmptyRecord(getContext(), Ty, true))
6017     return ABIArgInfo::getIgnore();
6018 
6019   if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
6020     return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
6021 
6022   uint64_t Size = getContext().getTypeSize(Ty);
6023   if (Size > 64)
6024     return ABIArgInfo::getIndirect(0, /*ByVal=*/true);
6025     // Pass in the smallest viable integer type.
6026   else if (Size > 32)
6027       return ABIArgInfo::getDirect(llvm::Type::getInt64Ty(getVMContext()));
6028   else if (Size > 16)
6029       return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
6030   else if (Size > 8)
6031       return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
6032   else
6033       return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
6034 }
6035 
classifyReturnType(QualType RetTy) const6036 ABIArgInfo HexagonABIInfo::classifyReturnType(QualType RetTy) const {
6037   if (RetTy->isVoidType())
6038     return ABIArgInfo::getIgnore();
6039 
6040   // Large vector types should be returned via memory.
6041   if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 64)
6042     return ABIArgInfo::getIndirect(0);
6043 
6044   if (!isAggregateTypeForABI(RetTy)) {
6045     // Treat an enum type as its underlying type.
6046     if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
6047       RetTy = EnumTy->getDecl()->getIntegerType();
6048 
6049     return (RetTy->isPromotableIntegerType() ?
6050             ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
6051   }
6052 
6053   if (isEmptyRecord(getContext(), RetTy, true))
6054     return ABIArgInfo::getIgnore();
6055 
6056   // Aggregates <= 8 bytes are returned in r0; other aggregates
6057   // are returned indirectly.
6058   uint64_t Size = getContext().getTypeSize(RetTy);
6059   if (Size <= 64) {
6060     // Return in the smallest viable integer type.
6061     if (Size <= 8)
6062       return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
6063     if (Size <= 16)
6064       return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
6065     if (Size <= 32)
6066       return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
6067     return ABIArgInfo::getDirect(llvm::Type::getInt64Ty(getVMContext()));
6068   }
6069 
6070   return ABIArgInfo::getIndirect(0, /*ByVal=*/true);
6071 }
6072 
EmitVAArg(llvm::Value * VAListAddr,QualType Ty,CodeGenFunction & CGF) const6073 llvm::Value *HexagonABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
6074                                        CodeGenFunction &CGF) const {
6075   // FIXME: Need to handle alignment
6076   llvm::Type *BPP = CGF.Int8PtrPtrTy;
6077 
6078   CGBuilderTy &Builder = CGF.Builder;
6079   llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
6080                                                        "ap");
6081   llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
6082   llvm::Type *PTy =
6083     llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
6084   llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
6085 
6086   uint64_t Offset =
6087     llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 4);
6088   llvm::Value *NextAddr =
6089     Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset),
6090                       "ap.next");
6091   Builder.CreateStore(NextAddr, VAListAddrAsBPP);
6092 
6093   return AddrTyped;
6094 }
6095 
6096 //===----------------------------------------------------------------------===//
6097 // AMDGPU ABI Implementation
6098 //===----------------------------------------------------------------------===//
6099 
6100 namespace {
6101 
6102 class AMDGPUTargetCodeGenInfo : public TargetCodeGenInfo {
6103 public:
AMDGPUTargetCodeGenInfo(CodeGenTypes & CGT)6104   AMDGPUTargetCodeGenInfo(CodeGenTypes &CGT)
6105     : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {}
6106   void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
6107                            CodeGen::CodeGenModule &M) const override;
6108 };
6109 
6110 }
6111 
SetTargetAttributes(const Decl * D,llvm::GlobalValue * GV,CodeGen::CodeGenModule & M) const6112 void AMDGPUTargetCodeGenInfo::SetTargetAttributes(
6113   const Decl *D,
6114   llvm::GlobalValue *GV,
6115   CodeGen::CodeGenModule &M) const {
6116   const FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
6117   if (!FD)
6118     return;
6119 
6120   if (const auto Attr = FD->getAttr<AMDGPUNumVGPRAttr>()) {
6121     llvm::Function *F = cast<llvm::Function>(GV);
6122     uint32_t NumVGPR = Attr->getNumVGPR();
6123     if (NumVGPR != 0)
6124       F->addFnAttr("amdgpu_num_vgpr", llvm::utostr(NumVGPR));
6125   }
6126 
6127   if (const auto Attr = FD->getAttr<AMDGPUNumSGPRAttr>()) {
6128     llvm::Function *F = cast<llvm::Function>(GV);
6129     unsigned NumSGPR = Attr->getNumSGPR();
6130     if (NumSGPR != 0)
6131       F->addFnAttr("amdgpu_num_sgpr", llvm::utostr(NumSGPR));
6132   }
6133 }
6134 
6135 
6136 //===----------------------------------------------------------------------===//
6137 // SPARC v9 ABI Implementation.
6138 // Based on the SPARC Compliance Definition version 2.4.1.
6139 //
6140 // Function arguments a mapped to a nominal "parameter array" and promoted to
6141 // registers depending on their type. Each argument occupies 8 or 16 bytes in
6142 // the array, structs larger than 16 bytes are passed indirectly.
6143 //
6144 // One case requires special care:
6145 //
6146 //   struct mixed {
6147 //     int i;
6148 //     float f;
6149 //   };
6150 //
6151 // When a struct mixed is passed by value, it only occupies 8 bytes in the
6152 // parameter array, but the int is passed in an integer register, and the float
6153 // is passed in a floating point register. This is represented as two arguments
6154 // with the LLVM IR inreg attribute:
6155 //
6156 //   declare void f(i32 inreg %i, float inreg %f)
6157 //
6158 // The code generator will only allocate 4 bytes from the parameter array for
6159 // the inreg arguments. All other arguments are allocated a multiple of 8
6160 // bytes.
6161 //
6162 namespace {
6163 class SparcV9ABIInfo : public ABIInfo {
6164 public:
SparcV9ABIInfo(CodeGenTypes & CGT)6165   SparcV9ABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
6166 
6167 private:
6168   ABIArgInfo classifyType(QualType RetTy, unsigned SizeLimit) const;
6169   void computeInfo(CGFunctionInfo &FI) const override;
6170   llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
6171                          CodeGenFunction &CGF) const override;
6172 
6173   // Coercion type builder for structs passed in registers. The coercion type
6174   // serves two purposes:
6175   //
6176   // 1. Pad structs to a multiple of 64 bits, so they are passed 'left-aligned'
6177   //    in registers.
6178   // 2. Expose aligned floating point elements as first-level elements, so the
6179   //    code generator knows to pass them in floating point registers.
6180   //
6181   // We also compute the InReg flag which indicates that the struct contains
6182   // aligned 32-bit floats.
6183   //
6184   struct CoerceBuilder {
6185     llvm::LLVMContext &Context;
6186     const llvm::DataLayout &DL;
6187     SmallVector<llvm::Type*, 8> Elems;
6188     uint64_t Size;
6189     bool InReg;
6190 
CoerceBuilder__anon2cff319c1011::SparcV9ABIInfo::CoerceBuilder6191     CoerceBuilder(llvm::LLVMContext &c, const llvm::DataLayout &dl)
6192       : Context(c), DL(dl), Size(0), InReg(false) {}
6193 
6194     // Pad Elems with integers until Size is ToSize.
pad__anon2cff319c1011::SparcV9ABIInfo::CoerceBuilder6195     void pad(uint64_t ToSize) {
6196       assert(ToSize >= Size && "Cannot remove elements");
6197       if (ToSize == Size)
6198         return;
6199 
6200       // Finish the current 64-bit word.
6201       uint64_t Aligned = llvm::RoundUpToAlignment(Size, 64);
6202       if (Aligned > Size && Aligned <= ToSize) {
6203         Elems.push_back(llvm::IntegerType::get(Context, Aligned - Size));
6204         Size = Aligned;
6205       }
6206 
6207       // Add whole 64-bit words.
6208       while (Size + 64 <= ToSize) {
6209         Elems.push_back(llvm::Type::getInt64Ty(Context));
6210         Size += 64;
6211       }
6212 
6213       // Final in-word padding.
6214       if (Size < ToSize) {
6215         Elems.push_back(llvm::IntegerType::get(Context, ToSize - Size));
6216         Size = ToSize;
6217       }
6218     }
6219 
6220     // Add a floating point element at Offset.
addFloat__anon2cff319c1011::SparcV9ABIInfo::CoerceBuilder6221     void addFloat(uint64_t Offset, llvm::Type *Ty, unsigned Bits) {
6222       // Unaligned floats are treated as integers.
6223       if (Offset % Bits)
6224         return;
6225       // The InReg flag is only required if there are any floats < 64 bits.
6226       if (Bits < 64)
6227         InReg = true;
6228       pad(Offset);
6229       Elems.push_back(Ty);
6230       Size = Offset + Bits;
6231     }
6232 
6233     // Add a struct type to the coercion type, starting at Offset (in bits).
addStruct__anon2cff319c1011::SparcV9ABIInfo::CoerceBuilder6234     void addStruct(uint64_t Offset, llvm::StructType *StrTy) {
6235       const llvm::StructLayout *Layout = DL.getStructLayout(StrTy);
6236       for (unsigned i = 0, e = StrTy->getNumElements(); i != e; ++i) {
6237         llvm::Type *ElemTy = StrTy->getElementType(i);
6238         uint64_t ElemOffset = Offset + Layout->getElementOffsetInBits(i);
6239         switch (ElemTy->getTypeID()) {
6240         case llvm::Type::StructTyID:
6241           addStruct(ElemOffset, cast<llvm::StructType>(ElemTy));
6242           break;
6243         case llvm::Type::FloatTyID:
6244           addFloat(ElemOffset, ElemTy, 32);
6245           break;
6246         case llvm::Type::DoubleTyID:
6247           addFloat(ElemOffset, ElemTy, 64);
6248           break;
6249         case llvm::Type::FP128TyID:
6250           addFloat(ElemOffset, ElemTy, 128);
6251           break;
6252         case llvm::Type::PointerTyID:
6253           if (ElemOffset % 64 == 0) {
6254             pad(ElemOffset);
6255             Elems.push_back(ElemTy);
6256             Size += 64;
6257           }
6258           break;
6259         default:
6260           break;
6261         }
6262       }
6263     }
6264 
6265     // Check if Ty is a usable substitute for the coercion type.
isUsableType__anon2cff319c1011::SparcV9ABIInfo::CoerceBuilder6266     bool isUsableType(llvm::StructType *Ty) const {
6267       if (Ty->getNumElements() != Elems.size())
6268         return false;
6269       for (unsigned i = 0, e = Elems.size(); i != e; ++i)
6270         if (Elems[i] != Ty->getElementType(i))
6271           return false;
6272       return true;
6273     }
6274 
6275     // Get the coercion type as a literal struct type.
getType__anon2cff319c1011::SparcV9ABIInfo::CoerceBuilder6276     llvm::Type *getType() const {
6277       if (Elems.size() == 1)
6278         return Elems.front();
6279       else
6280         return llvm::StructType::get(Context, Elems);
6281     }
6282   };
6283 };
6284 } // end anonymous namespace
6285 
6286 ABIArgInfo
classifyType(QualType Ty,unsigned SizeLimit) const6287 SparcV9ABIInfo::classifyType(QualType Ty, unsigned SizeLimit) const {
6288   if (Ty->isVoidType())
6289     return ABIArgInfo::getIgnore();
6290 
6291   uint64_t Size = getContext().getTypeSize(Ty);
6292 
6293   // Anything too big to fit in registers is passed with an explicit indirect
6294   // pointer / sret pointer.
6295   if (Size > SizeLimit)
6296     return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
6297 
6298   // Treat an enum type as its underlying type.
6299   if (const EnumType *EnumTy = Ty->getAs<EnumType>())
6300     Ty = EnumTy->getDecl()->getIntegerType();
6301 
6302   // Integer types smaller than a register are extended.
6303   if (Size < 64 && Ty->isIntegerType())
6304     return ABIArgInfo::getExtend();
6305 
6306   // Other non-aggregates go in registers.
6307   if (!isAggregateTypeForABI(Ty))
6308     return ABIArgInfo::getDirect();
6309 
6310   // If a C++ object has either a non-trivial copy constructor or a non-trivial
6311   // destructor, it is passed with an explicit indirect pointer / sret pointer.
6312   if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
6313     return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
6314 
6315   // This is a small aggregate type that should be passed in registers.
6316   // Build a coercion type from the LLVM struct type.
6317   llvm::StructType *StrTy = dyn_cast<llvm::StructType>(CGT.ConvertType(Ty));
6318   if (!StrTy)
6319     return ABIArgInfo::getDirect();
6320 
6321   CoerceBuilder CB(getVMContext(), getDataLayout());
6322   CB.addStruct(0, StrTy);
6323   CB.pad(llvm::RoundUpToAlignment(CB.DL.getTypeSizeInBits(StrTy), 64));
6324 
6325   // Try to use the original type for coercion.
6326   llvm::Type *CoerceTy = CB.isUsableType(StrTy) ? StrTy : CB.getType();
6327 
6328   if (CB.InReg)
6329     return ABIArgInfo::getDirectInReg(CoerceTy);
6330   else
6331     return ABIArgInfo::getDirect(CoerceTy);
6332 }
6333 
EmitVAArg(llvm::Value * VAListAddr,QualType Ty,CodeGenFunction & CGF) const6334 llvm::Value *SparcV9ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
6335                                        CodeGenFunction &CGF) const {
6336   ABIArgInfo AI = classifyType(Ty, 16 * 8);
6337   llvm::Type *ArgTy = CGT.ConvertType(Ty);
6338   if (AI.canHaveCoerceToType() && !AI.getCoerceToType())
6339     AI.setCoerceToType(ArgTy);
6340 
6341   llvm::Type *BPP = CGF.Int8PtrPtrTy;
6342   CGBuilderTy &Builder = CGF.Builder;
6343   llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap");
6344   llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
6345   llvm::Type *ArgPtrTy = llvm::PointerType::getUnqual(ArgTy);
6346   llvm::Value *ArgAddr;
6347   unsigned Stride;
6348 
6349   switch (AI.getKind()) {
6350   case ABIArgInfo::Expand:
6351   case ABIArgInfo::InAlloca:
6352     llvm_unreachable("Unsupported ABI kind for va_arg");
6353 
6354   case ABIArgInfo::Extend:
6355     Stride = 8;
6356     ArgAddr = Builder
6357       .CreateConstGEP1_32(Addr, 8 - getDataLayout().getTypeAllocSize(ArgTy),
6358                           "extend");
6359     break;
6360 
6361   case ABIArgInfo::Direct:
6362     Stride = getDataLayout().getTypeAllocSize(AI.getCoerceToType());
6363     ArgAddr = Addr;
6364     break;
6365 
6366   case ABIArgInfo::Indirect:
6367     Stride = 8;
6368     ArgAddr = Builder.CreateBitCast(Addr,
6369                                     llvm::PointerType::getUnqual(ArgPtrTy),
6370                                     "indirect");
6371     ArgAddr = Builder.CreateLoad(ArgAddr, "indirect.arg");
6372     break;
6373 
6374   case ABIArgInfo::Ignore:
6375     return llvm::UndefValue::get(ArgPtrTy);
6376   }
6377 
6378   // Update VAList.
6379   Addr = Builder.CreateConstGEP1_32(Addr, Stride, "ap.next");
6380   Builder.CreateStore(Addr, VAListAddrAsBPP);
6381 
6382   return Builder.CreatePointerCast(ArgAddr, ArgPtrTy, "arg.addr");
6383 }
6384 
computeInfo(CGFunctionInfo & FI) const6385 void SparcV9ABIInfo::computeInfo(CGFunctionInfo &FI) const {
6386   FI.getReturnInfo() = classifyType(FI.getReturnType(), 32 * 8);
6387   for (auto &I : FI.arguments())
6388     I.info = classifyType(I.type, 16 * 8);
6389 }
6390 
6391 namespace {
6392 class SparcV9TargetCodeGenInfo : public TargetCodeGenInfo {
6393 public:
SparcV9TargetCodeGenInfo(CodeGenTypes & CGT)6394   SparcV9TargetCodeGenInfo(CodeGenTypes &CGT)
6395     : TargetCodeGenInfo(new SparcV9ABIInfo(CGT)) {}
6396 
getDwarfEHStackPointer(CodeGen::CodeGenModule & M) const6397   int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
6398     return 14;
6399   }
6400 
6401   bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
6402                                llvm::Value *Address) const override;
6403 };
6404 } // end anonymous namespace
6405 
6406 bool
initDwarfEHRegSizeTable(CodeGen::CodeGenFunction & CGF,llvm::Value * Address) const6407 SparcV9TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
6408                                                 llvm::Value *Address) const {
6409   // This is calculated from the LLVM and GCC tables and verified
6410   // against gcc output.  AFAIK all ABIs use the same encoding.
6411 
6412   CodeGen::CGBuilderTy &Builder = CGF.Builder;
6413 
6414   llvm::IntegerType *i8 = CGF.Int8Ty;
6415   llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
6416   llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
6417 
6418   // 0-31: the 8-byte general-purpose registers
6419   AssignToArrayRange(Builder, Address, Eight8, 0, 31);
6420 
6421   // 32-63: f0-31, the 4-byte floating-point registers
6422   AssignToArrayRange(Builder, Address, Four8, 32, 63);
6423 
6424   //   Y   = 64
6425   //   PSR = 65
6426   //   WIM = 66
6427   //   TBR = 67
6428   //   PC  = 68
6429   //   NPC = 69
6430   //   FSR = 70
6431   //   CSR = 71
6432   AssignToArrayRange(Builder, Address, Eight8, 64, 71);
6433 
6434   // 72-87: d0-15, the 8-byte floating-point registers
6435   AssignToArrayRange(Builder, Address, Eight8, 72, 87);
6436 
6437   return false;
6438 }
6439 
6440 
6441 //===----------------------------------------------------------------------===//
6442 // XCore ABI Implementation
6443 //===----------------------------------------------------------------------===//
6444 
6445 namespace {
6446 
6447 /// A SmallStringEnc instance is used to build up the TypeString by passing
6448 /// it by reference between functions that append to it.
6449 typedef llvm::SmallString<128> SmallStringEnc;
6450 
6451 /// TypeStringCache caches the meta encodings of Types.
6452 ///
6453 /// The reason for caching TypeStrings is two fold:
6454 ///   1. To cache a type's encoding for later uses;
6455 ///   2. As a means to break recursive member type inclusion.
6456 ///
6457 /// A cache Entry can have a Status of:
6458 ///   NonRecursive:   The type encoding is not recursive;
6459 ///   Recursive:      The type encoding is recursive;
6460 ///   Incomplete:     An incomplete TypeString;
6461 ///   IncompleteUsed: An incomplete TypeString that has been used in a
6462 ///                   Recursive type encoding.
6463 ///
6464 /// A NonRecursive entry will have all of its sub-members expanded as fully
6465 /// as possible. Whilst it may contain types which are recursive, the type
6466 /// itself is not recursive and thus its encoding may be safely used whenever
6467 /// the type is encountered.
6468 ///
6469 /// A Recursive entry will have all of its sub-members expanded as fully as
6470 /// possible. The type itself is recursive and it may contain other types which
6471 /// are recursive. The Recursive encoding must not be used during the expansion
6472 /// of a recursive type's recursive branch. For simplicity the code uses
6473 /// IncompleteCount to reject all usage of Recursive encodings for member types.
6474 ///
6475 /// An Incomplete entry is always a RecordType and only encodes its
6476 /// identifier e.g. "s(S){}". Incomplete 'StubEnc' entries are ephemeral and
6477 /// are placed into the cache during type expansion as a means to identify and
6478 /// handle recursive inclusion of types as sub-members. If there is recursion
6479 /// the entry becomes IncompleteUsed.
6480 ///
6481 /// During the expansion of a RecordType's members:
6482 ///
6483 ///   If the cache contains a NonRecursive encoding for the member type, the
6484 ///   cached encoding is used;
6485 ///
6486 ///   If the cache contains a Recursive encoding for the member type, the
6487 ///   cached encoding is 'Swapped' out, as it may be incorrect, and...
6488 ///
6489 ///   If the member is a RecordType, an Incomplete encoding is placed into the
6490 ///   cache to break potential recursive inclusion of itself as a sub-member;
6491 ///
6492 ///   Once a member RecordType has been expanded, its temporary incomplete
6493 ///   entry is removed from the cache. If a Recursive encoding was swapped out
6494 ///   it is swapped back in;
6495 ///
6496 ///   If an incomplete entry is used to expand a sub-member, the incomplete
6497 ///   entry is marked as IncompleteUsed. The cache keeps count of how many
6498 ///   IncompleteUsed entries it currently contains in IncompleteUsedCount;
6499 ///
6500 ///   If a member's encoding is found to be a NonRecursive or Recursive viz:
6501 ///   IncompleteUsedCount==0, the member's encoding is added to the cache.
6502 ///   Else the member is part of a recursive type and thus the recursion has
6503 ///   been exited too soon for the encoding to be correct for the member.
6504 ///
6505 class TypeStringCache {
6506   enum Status {NonRecursive, Recursive, Incomplete, IncompleteUsed};
6507   struct Entry {
6508     std::string Str;     // The encoded TypeString for the type.
6509     enum Status State;   // Information about the encoding in 'Str'.
6510     std::string Swapped; // A temporary place holder for a Recursive encoding
6511                          // during the expansion of RecordType's members.
6512   };
6513   std::map<const IdentifierInfo *, struct Entry> Map;
6514   unsigned IncompleteCount;     // Number of Incomplete entries in the Map.
6515   unsigned IncompleteUsedCount; // Number of IncompleteUsed entries in the Map.
6516 public:
TypeStringCache()6517   TypeStringCache() : IncompleteCount(0), IncompleteUsedCount(0) {};
6518   void addIncomplete(const IdentifierInfo *ID, std::string StubEnc);
6519   bool removeIncomplete(const IdentifierInfo *ID);
6520   void addIfComplete(const IdentifierInfo *ID, StringRef Str,
6521                      bool IsRecursive);
6522   StringRef lookupStr(const IdentifierInfo *ID);
6523 };
6524 
6525 /// TypeString encodings for enum & union fields must be order.
6526 /// FieldEncoding is a helper for this ordering process.
6527 class FieldEncoding {
6528   bool HasName;
6529   std::string Enc;
6530 public:
FieldEncoding(bool b,SmallStringEnc & e)6531   FieldEncoding(bool b, SmallStringEnc &e) : HasName(b), Enc(e.c_str()) {};
str()6532   StringRef str() {return Enc.c_str();};
operator <(const FieldEncoding & rhs) const6533   bool operator<(const FieldEncoding &rhs) const {
6534     if (HasName != rhs.HasName) return HasName;
6535     return Enc < rhs.Enc;
6536   }
6537 };
6538 
6539 class XCoreABIInfo : public DefaultABIInfo {
6540 public:
XCoreABIInfo(CodeGen::CodeGenTypes & CGT)6541   XCoreABIInfo(CodeGen::CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
6542   llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
6543                          CodeGenFunction &CGF) const override;
6544 };
6545 
6546 class XCoreTargetCodeGenInfo : public TargetCodeGenInfo {
6547   mutable TypeStringCache TSC;
6548 public:
XCoreTargetCodeGenInfo(CodeGenTypes & CGT)6549   XCoreTargetCodeGenInfo(CodeGenTypes &CGT)
6550     :TargetCodeGenInfo(new XCoreABIInfo(CGT)) {}
6551   void emitTargetMD(const Decl *D, llvm::GlobalValue *GV,
6552                     CodeGen::CodeGenModule &M) const override;
6553 };
6554 
6555 } // End anonymous namespace.
6556 
EmitVAArg(llvm::Value * VAListAddr,QualType Ty,CodeGenFunction & CGF) const6557 llvm::Value *XCoreABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
6558                                      CodeGenFunction &CGF) const {
6559   CGBuilderTy &Builder = CGF.Builder;
6560 
6561   // Get the VAList.
6562   llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr,
6563                                                        CGF.Int8PtrPtrTy);
6564   llvm::Value *AP = Builder.CreateLoad(VAListAddrAsBPP);
6565 
6566   // Handle the argument.
6567   ABIArgInfo AI = classifyArgumentType(Ty);
6568   llvm::Type *ArgTy = CGT.ConvertType(Ty);
6569   if (AI.canHaveCoerceToType() && !AI.getCoerceToType())
6570     AI.setCoerceToType(ArgTy);
6571   llvm::Type *ArgPtrTy = llvm::PointerType::getUnqual(ArgTy);
6572   llvm::Value *Val;
6573   uint64_t ArgSize = 0;
6574   switch (AI.getKind()) {
6575   case ABIArgInfo::Expand:
6576   case ABIArgInfo::InAlloca:
6577     llvm_unreachable("Unsupported ABI kind for va_arg");
6578   case ABIArgInfo::Ignore:
6579     Val = llvm::UndefValue::get(ArgPtrTy);
6580     ArgSize = 0;
6581     break;
6582   case ABIArgInfo::Extend:
6583   case ABIArgInfo::Direct:
6584     Val = Builder.CreatePointerCast(AP, ArgPtrTy);
6585     ArgSize = getDataLayout().getTypeAllocSize(AI.getCoerceToType());
6586     if (ArgSize < 4)
6587       ArgSize = 4;
6588     break;
6589   case ABIArgInfo::Indirect:
6590     llvm::Value *ArgAddr;
6591     ArgAddr = Builder.CreateBitCast(AP, llvm::PointerType::getUnqual(ArgPtrTy));
6592     ArgAddr = Builder.CreateLoad(ArgAddr);
6593     Val = Builder.CreatePointerCast(ArgAddr, ArgPtrTy);
6594     ArgSize = 4;
6595     break;
6596   }
6597 
6598   // Increment the VAList.
6599   if (ArgSize) {
6600     llvm::Value *APN = Builder.CreateConstGEP1_32(AP, ArgSize);
6601     Builder.CreateStore(APN, VAListAddrAsBPP);
6602   }
6603   return Val;
6604 }
6605 
6606 /// During the expansion of a RecordType, an incomplete TypeString is placed
6607 /// into the cache as a means to identify and break recursion.
6608 /// If there is a Recursive encoding in the cache, it is swapped out and will
6609 /// be reinserted by removeIncomplete().
6610 /// All other types of encoding should have been used rather than arriving here.
addIncomplete(const IdentifierInfo * ID,std::string StubEnc)6611 void TypeStringCache::addIncomplete(const IdentifierInfo *ID,
6612                                     std::string StubEnc) {
6613   if (!ID)
6614     return;
6615   Entry &E = Map[ID];
6616   assert( (E.Str.empty() || E.State == Recursive) &&
6617          "Incorrectly use of addIncomplete");
6618   assert(!StubEnc.empty() && "Passing an empty string to addIncomplete()");
6619   E.Swapped.swap(E.Str); // swap out the Recursive
6620   E.Str.swap(StubEnc);
6621   E.State = Incomplete;
6622   ++IncompleteCount;
6623 }
6624 
6625 /// Once the RecordType has been expanded, the temporary incomplete TypeString
6626 /// must be removed from the cache.
6627 /// If a Recursive was swapped out by addIncomplete(), it will be replaced.
6628 /// Returns true if the RecordType was defined recursively.
removeIncomplete(const IdentifierInfo * ID)6629 bool TypeStringCache::removeIncomplete(const IdentifierInfo *ID) {
6630   if (!ID)
6631     return false;
6632   auto I = Map.find(ID);
6633   assert(I != Map.end() && "Entry not present");
6634   Entry &E = I->second;
6635   assert( (E.State == Incomplete ||
6636            E.State == IncompleteUsed) &&
6637          "Entry must be an incomplete type");
6638   bool IsRecursive = false;
6639   if (E.State == IncompleteUsed) {
6640     // We made use of our Incomplete encoding, thus we are recursive.
6641     IsRecursive = true;
6642     --IncompleteUsedCount;
6643   }
6644   if (E.Swapped.empty())
6645     Map.erase(I);
6646   else {
6647     // Swap the Recursive back.
6648     E.Swapped.swap(E.Str);
6649     E.Swapped.clear();
6650     E.State = Recursive;
6651   }
6652   --IncompleteCount;
6653   return IsRecursive;
6654 }
6655 
6656 /// Add the encoded TypeString to the cache only if it is NonRecursive or
6657 /// Recursive (viz: all sub-members were expanded as fully as possible).
addIfComplete(const IdentifierInfo * ID,StringRef Str,bool IsRecursive)6658 void TypeStringCache::addIfComplete(const IdentifierInfo *ID, StringRef Str,
6659                                     bool IsRecursive) {
6660   if (!ID || IncompleteUsedCount)
6661     return; // No key or it is is an incomplete sub-type so don't add.
6662   Entry &E = Map[ID];
6663   if (IsRecursive && !E.Str.empty()) {
6664     assert(E.State==Recursive && E.Str.size() == Str.size() &&
6665            "This is not the same Recursive entry");
6666     // The parent container was not recursive after all, so we could have used
6667     // this Recursive sub-member entry after all, but we assumed the worse when
6668     // we started viz: IncompleteCount!=0.
6669     return;
6670   }
6671   assert(E.Str.empty() && "Entry already present");
6672   E.Str = Str.str();
6673   E.State = IsRecursive? Recursive : NonRecursive;
6674 }
6675 
6676 /// Return a cached TypeString encoding for the ID. If there isn't one, or we
6677 /// are recursively expanding a type (IncompleteCount != 0) and the cached
6678 /// encoding is Recursive, return an empty StringRef.
lookupStr(const IdentifierInfo * ID)6679 StringRef TypeStringCache::lookupStr(const IdentifierInfo *ID) {
6680   if (!ID)
6681     return StringRef();   // We have no key.
6682   auto I = Map.find(ID);
6683   if (I == Map.end())
6684     return StringRef();   // We have no encoding.
6685   Entry &E = I->second;
6686   if (E.State == Recursive && IncompleteCount)
6687     return StringRef();   // We don't use Recursive encodings for member types.
6688 
6689   if (E.State == Incomplete) {
6690     // The incomplete type is being used to break out of recursion.
6691     E.State = IncompleteUsed;
6692     ++IncompleteUsedCount;
6693   }
6694   return E.Str.c_str();
6695 }
6696 
6697 /// The XCore ABI includes a type information section that communicates symbol
6698 /// type information to the linker. The linker uses this information to verify
6699 /// safety/correctness of things such as array bound and pointers et al.
6700 /// The ABI only requires C (and XC) language modules to emit TypeStrings.
6701 /// This type information (TypeString) is emitted into meta data for all global
6702 /// symbols: definitions, declarations, functions & variables.
6703 ///
6704 /// The TypeString carries type, qualifier, name, size & value details.
6705 /// Please see 'Tools Development Guide' section 2.16.2 for format details:
6706 /// <https://www.xmos.com/download/public/Tools-Development-Guide%28X9114A%29.pdf>
6707 /// The output is tested by test/CodeGen/xcore-stringtype.c.
6708 ///
6709 static bool getTypeString(SmallStringEnc &Enc, const Decl *D,
6710                           CodeGen::CodeGenModule &CGM, TypeStringCache &TSC);
6711 
6712 /// XCore uses emitTargetMD to emit TypeString metadata for global symbols.
emitTargetMD(const Decl * D,llvm::GlobalValue * GV,CodeGen::CodeGenModule & CGM) const6713 void XCoreTargetCodeGenInfo::emitTargetMD(const Decl *D, llvm::GlobalValue *GV,
6714                                           CodeGen::CodeGenModule &CGM) const {
6715   SmallStringEnc Enc;
6716   if (getTypeString(Enc, D, CGM, TSC)) {
6717     llvm::LLVMContext &Ctx = CGM.getModule().getContext();
6718     llvm::SmallVector<llvm::Metadata *, 2> MDVals;
6719     MDVals.push_back(llvm::ConstantAsMetadata::get(GV));
6720     MDVals.push_back(llvm::MDString::get(Ctx, Enc.str()));
6721     llvm::NamedMDNode *MD =
6722       CGM.getModule().getOrInsertNamedMetadata("xcore.typestrings");
6723     MD->addOperand(llvm::MDNode::get(Ctx, MDVals));
6724   }
6725 }
6726 
6727 static bool appendType(SmallStringEnc &Enc, QualType QType,
6728                        const CodeGen::CodeGenModule &CGM,
6729                        TypeStringCache &TSC);
6730 
6731 /// Helper function for appendRecordType().
6732 /// Builds a SmallVector containing the encoded field types in declaration order.
extractFieldType(SmallVectorImpl<FieldEncoding> & FE,const RecordDecl * RD,const CodeGen::CodeGenModule & CGM,TypeStringCache & TSC)6733 static bool extractFieldType(SmallVectorImpl<FieldEncoding> &FE,
6734                              const RecordDecl *RD,
6735                              const CodeGen::CodeGenModule &CGM,
6736                              TypeStringCache &TSC) {
6737   for (const auto *Field : RD->fields()) {
6738     SmallStringEnc Enc;
6739     Enc += "m(";
6740     Enc += Field->getName();
6741     Enc += "){";
6742     if (Field->isBitField()) {
6743       Enc += "b(";
6744       llvm::raw_svector_ostream OS(Enc);
6745       OS.resync();
6746       OS << Field->getBitWidthValue(CGM.getContext());
6747       OS.flush();
6748       Enc += ':';
6749     }
6750     if (!appendType(Enc, Field->getType(), CGM, TSC))
6751       return false;
6752     if (Field->isBitField())
6753       Enc += ')';
6754     Enc += '}';
6755     FE.push_back(FieldEncoding(!Field->getName().empty(), Enc));
6756   }
6757   return true;
6758 }
6759 
6760 /// Appends structure and union types to Enc and adds encoding to cache.
6761 /// Recursively calls appendType (via extractFieldType) for each field.
6762 /// Union types have their fields ordered according to the ABI.
appendRecordType(SmallStringEnc & Enc,const RecordType * RT,const CodeGen::CodeGenModule & CGM,TypeStringCache & TSC,const IdentifierInfo * ID)6763 static bool appendRecordType(SmallStringEnc &Enc, const RecordType *RT,
6764                              const CodeGen::CodeGenModule &CGM,
6765                              TypeStringCache &TSC, const IdentifierInfo *ID) {
6766   // Append the cached TypeString if we have one.
6767   StringRef TypeString = TSC.lookupStr(ID);
6768   if (!TypeString.empty()) {
6769     Enc += TypeString;
6770     return true;
6771   }
6772 
6773   // Start to emit an incomplete TypeString.
6774   size_t Start = Enc.size();
6775   Enc += (RT->isUnionType()? 'u' : 's');
6776   Enc += '(';
6777   if (ID)
6778     Enc += ID->getName();
6779   Enc += "){";
6780 
6781   // We collect all encoded fields and order as necessary.
6782   bool IsRecursive = false;
6783   const RecordDecl *RD = RT->getDecl()->getDefinition();
6784   if (RD && !RD->field_empty()) {
6785     // An incomplete TypeString stub is placed in the cache for this RecordType
6786     // so that recursive calls to this RecordType will use it whilst building a
6787     // complete TypeString for this RecordType.
6788     SmallVector<FieldEncoding, 16> FE;
6789     std::string StubEnc(Enc.substr(Start).str());
6790     StubEnc += '}';  // StubEnc now holds a valid incomplete TypeString.
6791     TSC.addIncomplete(ID, std::move(StubEnc));
6792     if (!extractFieldType(FE, RD, CGM, TSC)) {
6793       (void) TSC.removeIncomplete(ID);
6794       return false;
6795     }
6796     IsRecursive = TSC.removeIncomplete(ID);
6797     // The ABI requires unions to be sorted but not structures.
6798     // See FieldEncoding::operator< for sort algorithm.
6799     if (RT->isUnionType())
6800       std::sort(FE.begin(), FE.end());
6801     // We can now complete the TypeString.
6802     unsigned E = FE.size();
6803     for (unsigned I = 0; I != E; ++I) {
6804       if (I)
6805         Enc += ',';
6806       Enc += FE[I].str();
6807     }
6808   }
6809   Enc += '}';
6810   TSC.addIfComplete(ID, Enc.substr(Start), IsRecursive);
6811   return true;
6812 }
6813 
6814 /// Appends enum types to Enc and adds the encoding to the cache.
appendEnumType(SmallStringEnc & Enc,const EnumType * ET,TypeStringCache & TSC,const IdentifierInfo * ID)6815 static bool appendEnumType(SmallStringEnc &Enc, const EnumType *ET,
6816                            TypeStringCache &TSC,
6817                            const IdentifierInfo *ID) {
6818   // Append the cached TypeString if we have one.
6819   StringRef TypeString = TSC.lookupStr(ID);
6820   if (!TypeString.empty()) {
6821     Enc += TypeString;
6822     return true;
6823   }
6824 
6825   size_t Start = Enc.size();
6826   Enc += "e(";
6827   if (ID)
6828     Enc += ID->getName();
6829   Enc += "){";
6830 
6831   // We collect all encoded enumerations and order them alphanumerically.
6832   if (const EnumDecl *ED = ET->getDecl()->getDefinition()) {
6833     SmallVector<FieldEncoding, 16> FE;
6834     for (auto I = ED->enumerator_begin(), E = ED->enumerator_end(); I != E;
6835          ++I) {
6836       SmallStringEnc EnumEnc;
6837       EnumEnc += "m(";
6838       EnumEnc += I->getName();
6839       EnumEnc += "){";
6840       I->getInitVal().toString(EnumEnc);
6841       EnumEnc += '}';
6842       FE.push_back(FieldEncoding(!I->getName().empty(), EnumEnc));
6843     }
6844     std::sort(FE.begin(), FE.end());
6845     unsigned E = FE.size();
6846     for (unsigned I = 0; I != E; ++I) {
6847       if (I)
6848         Enc += ',';
6849       Enc += FE[I].str();
6850     }
6851   }
6852   Enc += '}';
6853   TSC.addIfComplete(ID, Enc.substr(Start), false);
6854   return true;
6855 }
6856 
6857 /// Appends type's qualifier to Enc.
6858 /// This is done prior to appending the type's encoding.
appendQualifier(SmallStringEnc & Enc,QualType QT)6859 static void appendQualifier(SmallStringEnc &Enc, QualType QT) {
6860   // Qualifiers are emitted in alphabetical order.
6861   static const char *Table[] = {"","c:","r:","cr:","v:","cv:","rv:","crv:"};
6862   int Lookup = 0;
6863   if (QT.isConstQualified())
6864     Lookup += 1<<0;
6865   if (QT.isRestrictQualified())
6866     Lookup += 1<<1;
6867   if (QT.isVolatileQualified())
6868     Lookup += 1<<2;
6869   Enc += Table[Lookup];
6870 }
6871 
6872 /// Appends built-in types to Enc.
appendBuiltinType(SmallStringEnc & Enc,const BuiltinType * BT)6873 static bool appendBuiltinType(SmallStringEnc &Enc, const BuiltinType *BT) {
6874   const char *EncType;
6875   switch (BT->getKind()) {
6876     case BuiltinType::Void:
6877       EncType = "0";
6878       break;
6879     case BuiltinType::Bool:
6880       EncType = "b";
6881       break;
6882     case BuiltinType::Char_U:
6883       EncType = "uc";
6884       break;
6885     case BuiltinType::UChar:
6886       EncType = "uc";
6887       break;
6888     case BuiltinType::SChar:
6889       EncType = "sc";
6890       break;
6891     case BuiltinType::UShort:
6892       EncType = "us";
6893       break;
6894     case BuiltinType::Short:
6895       EncType = "ss";
6896       break;
6897     case BuiltinType::UInt:
6898       EncType = "ui";
6899       break;
6900     case BuiltinType::Int:
6901       EncType = "si";
6902       break;
6903     case BuiltinType::ULong:
6904       EncType = "ul";
6905       break;
6906     case BuiltinType::Long:
6907       EncType = "sl";
6908       break;
6909     case BuiltinType::ULongLong:
6910       EncType = "ull";
6911       break;
6912     case BuiltinType::LongLong:
6913       EncType = "sll";
6914       break;
6915     case BuiltinType::Float:
6916       EncType = "ft";
6917       break;
6918     case BuiltinType::Double:
6919       EncType = "d";
6920       break;
6921     case BuiltinType::LongDouble:
6922       EncType = "ld";
6923       break;
6924     default:
6925       return false;
6926   }
6927   Enc += EncType;
6928   return true;
6929 }
6930 
6931 /// Appends a pointer encoding to Enc before calling appendType for the pointee.
appendPointerType(SmallStringEnc & Enc,const PointerType * PT,const CodeGen::CodeGenModule & CGM,TypeStringCache & TSC)6932 static bool appendPointerType(SmallStringEnc &Enc, const PointerType *PT,
6933                               const CodeGen::CodeGenModule &CGM,
6934                               TypeStringCache &TSC) {
6935   Enc += "p(";
6936   if (!appendType(Enc, PT->getPointeeType(), CGM, TSC))
6937     return false;
6938   Enc += ')';
6939   return true;
6940 }
6941 
6942 /// Appends array encoding to Enc before calling appendType for the element.
appendArrayType(SmallStringEnc & Enc,QualType QT,const ArrayType * AT,const CodeGen::CodeGenModule & CGM,TypeStringCache & TSC,StringRef NoSizeEnc)6943 static bool appendArrayType(SmallStringEnc &Enc, QualType QT,
6944                             const ArrayType *AT,
6945                             const CodeGen::CodeGenModule &CGM,
6946                             TypeStringCache &TSC, StringRef NoSizeEnc) {
6947   if (AT->getSizeModifier() != ArrayType::Normal)
6948     return false;
6949   Enc += "a(";
6950   if (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(AT))
6951     CAT->getSize().toStringUnsigned(Enc);
6952   else
6953     Enc += NoSizeEnc; // Global arrays use "*", otherwise it is "".
6954   Enc += ':';
6955   // The Qualifiers should be attached to the type rather than the array.
6956   appendQualifier(Enc, QT);
6957   if (!appendType(Enc, AT->getElementType(), CGM, TSC))
6958     return false;
6959   Enc += ')';
6960   return true;
6961 }
6962 
6963 /// Appends a function encoding to Enc, calling appendType for the return type
6964 /// and the arguments.
appendFunctionType(SmallStringEnc & Enc,const FunctionType * FT,const CodeGen::CodeGenModule & CGM,TypeStringCache & TSC)6965 static bool appendFunctionType(SmallStringEnc &Enc, const FunctionType *FT,
6966                              const CodeGen::CodeGenModule &CGM,
6967                              TypeStringCache &TSC) {
6968   Enc += "f{";
6969   if (!appendType(Enc, FT->getReturnType(), CGM, TSC))
6970     return false;
6971   Enc += "}(";
6972   if (const FunctionProtoType *FPT = FT->getAs<FunctionProtoType>()) {
6973     // N.B. we are only interested in the adjusted param types.
6974     auto I = FPT->param_type_begin();
6975     auto E = FPT->param_type_end();
6976     if (I != E) {
6977       do {
6978         if (!appendType(Enc, *I, CGM, TSC))
6979           return false;
6980         ++I;
6981         if (I != E)
6982           Enc += ',';
6983       } while (I != E);
6984       if (FPT->isVariadic())
6985         Enc += ",va";
6986     } else {
6987       if (FPT->isVariadic())
6988         Enc += "va";
6989       else
6990         Enc += '0';
6991     }
6992   }
6993   Enc += ')';
6994   return true;
6995 }
6996 
6997 /// Handles the type's qualifier before dispatching a call to handle specific
6998 /// type encodings.
appendType(SmallStringEnc & Enc,QualType QType,const CodeGen::CodeGenModule & CGM,TypeStringCache & TSC)6999 static bool appendType(SmallStringEnc &Enc, QualType QType,
7000                        const CodeGen::CodeGenModule &CGM,
7001                        TypeStringCache &TSC) {
7002 
7003   QualType QT = QType.getCanonicalType();
7004 
7005   if (const ArrayType *AT = QT->getAsArrayTypeUnsafe())
7006     // The Qualifiers should be attached to the type rather than the array.
7007     // Thus we don't call appendQualifier() here.
7008     return appendArrayType(Enc, QT, AT, CGM, TSC, "");
7009 
7010   appendQualifier(Enc, QT);
7011 
7012   if (const BuiltinType *BT = QT->getAs<BuiltinType>())
7013     return appendBuiltinType(Enc, BT);
7014 
7015   if (const PointerType *PT = QT->getAs<PointerType>())
7016     return appendPointerType(Enc, PT, CGM, TSC);
7017 
7018   if (const EnumType *ET = QT->getAs<EnumType>())
7019     return appendEnumType(Enc, ET, TSC, QT.getBaseTypeIdentifier());
7020 
7021   if (const RecordType *RT = QT->getAsStructureType())
7022     return appendRecordType(Enc, RT, CGM, TSC, QT.getBaseTypeIdentifier());
7023 
7024   if (const RecordType *RT = QT->getAsUnionType())
7025     return appendRecordType(Enc, RT, CGM, TSC, QT.getBaseTypeIdentifier());
7026 
7027   if (const FunctionType *FT = QT->getAs<FunctionType>())
7028     return appendFunctionType(Enc, FT, CGM, TSC);
7029 
7030   return false;
7031 }
7032 
getTypeString(SmallStringEnc & Enc,const Decl * D,CodeGen::CodeGenModule & CGM,TypeStringCache & TSC)7033 static bool getTypeString(SmallStringEnc &Enc, const Decl *D,
7034                           CodeGen::CodeGenModule &CGM, TypeStringCache &TSC) {
7035   if (!D)
7036     return false;
7037 
7038   if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
7039     if (FD->getLanguageLinkage() != CLanguageLinkage)
7040       return false;
7041     return appendType(Enc, FD->getType(), CGM, TSC);
7042   }
7043 
7044   if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
7045     if (VD->getLanguageLinkage() != CLanguageLinkage)
7046       return false;
7047     QualType QT = VD->getType().getCanonicalType();
7048     if (const ArrayType *AT = QT->getAsArrayTypeUnsafe()) {
7049       // Global ArrayTypes are given a size of '*' if the size is unknown.
7050       // The Qualifiers should be attached to the type rather than the array.
7051       // Thus we don't call appendQualifier() here.
7052       return appendArrayType(Enc, QT, AT, CGM, TSC, "*");
7053     }
7054     return appendType(Enc, QT, CGM, TSC);
7055   }
7056   return false;
7057 }
7058 
7059 
7060 //===----------------------------------------------------------------------===//
7061 // Driver code
7062 //===----------------------------------------------------------------------===//
7063 
getTriple() const7064 const llvm::Triple &CodeGenModule::getTriple() const {
7065   return getTarget().getTriple();
7066 }
7067 
supportsCOMDAT() const7068 bool CodeGenModule::supportsCOMDAT() const {
7069   return !getTriple().isOSBinFormatMachO();
7070 }
7071 
getTargetCodeGenInfo()7072 const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
7073   if (TheTargetCodeGenInfo)
7074     return *TheTargetCodeGenInfo;
7075 
7076   const llvm::Triple &Triple = getTarget().getTriple();
7077   switch (Triple.getArch()) {
7078   default:
7079     return *(TheTargetCodeGenInfo = new DefaultTargetCodeGenInfo(Types));
7080 
7081   case llvm::Triple::le32:
7082     return *(TheTargetCodeGenInfo = new PNaClTargetCodeGenInfo(Types));
7083   case llvm::Triple::mips:
7084   case llvm::Triple::mipsel:
7085     return *(TheTargetCodeGenInfo = new MIPSTargetCodeGenInfo(Types, true));
7086 
7087   case llvm::Triple::mips64:
7088   case llvm::Triple::mips64el:
7089     return *(TheTargetCodeGenInfo = new MIPSTargetCodeGenInfo(Types, false));
7090 
7091   case llvm::Triple::aarch64:
7092   case llvm::Triple::aarch64_be: {
7093     AArch64ABIInfo::ABIKind Kind = AArch64ABIInfo::AAPCS;
7094     if (getTarget().getABI() == "darwinpcs")
7095       Kind = AArch64ABIInfo::DarwinPCS;
7096 
7097     return *(TheTargetCodeGenInfo = new AArch64TargetCodeGenInfo(Types, Kind));
7098   }
7099 
7100   case llvm::Triple::arm:
7101   case llvm::Triple::armeb:
7102   case llvm::Triple::thumb:
7103   case llvm::Triple::thumbeb:
7104     {
7105       ARMABIInfo::ABIKind Kind = ARMABIInfo::AAPCS;
7106       if (getTarget().getABI() == "apcs-gnu")
7107         Kind = ARMABIInfo::APCS;
7108       else if (CodeGenOpts.FloatABI == "hard" ||
7109                (CodeGenOpts.FloatABI != "soft" &&
7110                 Triple.getEnvironment() == llvm::Triple::GNUEABIHF))
7111         Kind = ARMABIInfo::AAPCS_VFP;
7112 
7113       switch (Triple.getOS()) {
7114         case llvm::Triple::NaCl:
7115           return *(TheTargetCodeGenInfo =
7116                    new NaClARMTargetCodeGenInfo(Types, Kind));
7117         default:
7118           return *(TheTargetCodeGenInfo =
7119                    new ARMTargetCodeGenInfo(Types, Kind));
7120       }
7121     }
7122 
7123   case llvm::Triple::ppc:
7124     return *(TheTargetCodeGenInfo = new PPC32TargetCodeGenInfo(Types));
7125   case llvm::Triple::ppc64:
7126     if (Triple.isOSBinFormatELF()) {
7127       PPC64_SVR4_ABIInfo::ABIKind Kind = PPC64_SVR4_ABIInfo::ELFv1;
7128       if (getTarget().getABI() == "elfv2")
7129         Kind = PPC64_SVR4_ABIInfo::ELFv2;
7130 
7131       return *(TheTargetCodeGenInfo =
7132                new PPC64_SVR4_TargetCodeGenInfo(Types, Kind));
7133     } else
7134       return *(TheTargetCodeGenInfo = new PPC64TargetCodeGenInfo(Types));
7135   case llvm::Triple::ppc64le: {
7136     assert(Triple.isOSBinFormatELF() && "PPC64 LE non-ELF not supported!");
7137     PPC64_SVR4_ABIInfo::ABIKind Kind = PPC64_SVR4_ABIInfo::ELFv2;
7138     if (getTarget().getABI() == "elfv1")
7139       Kind = PPC64_SVR4_ABIInfo::ELFv1;
7140 
7141     return *(TheTargetCodeGenInfo =
7142              new PPC64_SVR4_TargetCodeGenInfo(Types, Kind));
7143   }
7144 
7145   case llvm::Triple::nvptx:
7146   case llvm::Triple::nvptx64:
7147     return *(TheTargetCodeGenInfo = new NVPTXTargetCodeGenInfo(Types));
7148 
7149   case llvm::Triple::msp430:
7150     return *(TheTargetCodeGenInfo = new MSP430TargetCodeGenInfo(Types));
7151 
7152   case llvm::Triple::systemz:
7153     return *(TheTargetCodeGenInfo = new SystemZTargetCodeGenInfo(Types));
7154 
7155   case llvm::Triple::tce:
7156     return *(TheTargetCodeGenInfo = new TCETargetCodeGenInfo(Types));
7157 
7158   case llvm::Triple::x86: {
7159     bool IsDarwinVectorABI = Triple.isOSDarwin();
7160     bool IsSmallStructInRegABI =
7161         X86_32TargetCodeGenInfo::isStructReturnInRegABI(Triple, CodeGenOpts);
7162     bool IsWin32FloatStructABI = Triple.isOSWindows() && !Triple.isOSCygMing();
7163 
7164     if (Triple.getOS() == llvm::Triple::Win32) {
7165       return *(TheTargetCodeGenInfo =
7166                new WinX86_32TargetCodeGenInfo(Types,
7167                                               IsDarwinVectorABI, IsSmallStructInRegABI,
7168                                               IsWin32FloatStructABI,
7169                                               CodeGenOpts.NumRegisterParameters));
7170     } else {
7171       return *(TheTargetCodeGenInfo =
7172                new X86_32TargetCodeGenInfo(Types,
7173                                            IsDarwinVectorABI, IsSmallStructInRegABI,
7174                                            IsWin32FloatStructABI,
7175                                            CodeGenOpts.NumRegisterParameters));
7176     }
7177   }
7178 
7179   case llvm::Triple::x86_64: {
7180     bool HasAVX = getTarget().getABI() == "avx";
7181 
7182     switch (Triple.getOS()) {
7183     case llvm::Triple::Win32:
7184       return *(TheTargetCodeGenInfo =
7185                    new WinX86_64TargetCodeGenInfo(Types, HasAVX));
7186     case llvm::Triple::NaCl:
7187       return *(TheTargetCodeGenInfo =
7188                    new NaClX86_64TargetCodeGenInfo(Types, HasAVX));
7189     default:
7190       return *(TheTargetCodeGenInfo =
7191                    new X86_64TargetCodeGenInfo(Types, HasAVX));
7192     }
7193   }
7194   case llvm::Triple::hexagon:
7195     return *(TheTargetCodeGenInfo = new HexagonTargetCodeGenInfo(Types));
7196   case llvm::Triple::r600:
7197     return *(TheTargetCodeGenInfo = new AMDGPUTargetCodeGenInfo(Types));
7198   case llvm::Triple::amdgcn:
7199     return *(TheTargetCodeGenInfo = new AMDGPUTargetCodeGenInfo(Types));
7200   case llvm::Triple::sparcv9:
7201     return *(TheTargetCodeGenInfo = new SparcV9TargetCodeGenInfo(Types));
7202   case llvm::Triple::xcore:
7203     return *(TheTargetCodeGenInfo = new XCoreTargetCodeGenInfo(Types));
7204   }
7205 }
7206